code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
# coding: utf-8
import datetime
import json
import requests
import os
import re
import sys
from todoist.api import TodoistAPI
#SLACK_CHANNEL = os.environ['SLACK_CHANNEL']
#SLACK_POSTURL = os.environ['SLACK_POSTURL']
TDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)
TDIAPI.sync()
name = os.environ['TODOIST_PJT']
def lambda_handler(event, context):
if event["function"] == 'tasklist':
msg = tasklist(name)
if event["function"] == 'activity':
msg = activity(name)
return
def activity(name):
actlogs = TDIAPI.activity.get()
pjts = TDIAPI.state['projects']
for projects_id in pjts:
if projects_id['name'] == name:
tasks_project_id = projects_id['id']
break
else:
print('[INFO] Not match project name')
event_list = []
for events in actlogs['events']:
today = datetime.datetime.now().strftime("%Y-%m-%d")
'''
todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する
そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック
'''
todoist_times = datetime.datetime.strptime(events['event_date'], '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours = 9)
todoist_date = str(todoist_times.strftime("%Y-%m-%d"))
if events['event_type'] == 'completed' and todoist_date == today and events['parent_project_id'] == tasks_project_id:
event_list.append(events['extra_data']['content'])
print(event_list)
return event_list
def tasklist(name):
pjts = TDIAPI.state['projects']
items = TDIAPI.state['items']
labels = TDIAPI.state['labels']
sects = TDIAPI.state['sections']
inbox_list = []
doing_list = []
review_list = []
any_list = []
for projects_id in list:
if projects_id['name'] == name:
tasks_project_id = projects_id['id']
break
try:
tasks_project_id
except NameError:
print("プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。")
return
print(labels)
sys.exit()
for item in items:
l_content = item['content']
l_pjt_name = [ pjt['name'] for pjt in pjts if item['project_id'] == pjt['id'] ]
l_sec_name = [ sect['name'] for sect in sects if item['section_id'] == sect['id']]
#print('+++')
#print(l_pjt_id)
#print(l_content)
#print(l_sec_name[0])
if l_sec_name is not None and l_sec_name[0] == 'ToDo':
print(l_sec_name)
#if item['checked'] == 0 and item['project_id'] == tasks_project_id:
#taskcontent = '- ' + item['content']
#slackmessage.append(taskcontent)
#print(taskcontent)
#print(slackmessage)
#message = '\n'.join(slackmessage)
return
def slack_notify():
title = "*[定期通知] プロジェクト " + name + " のタスクリスト*\n"
slack_message = {
'channel': SLACK_CHANNEL,
'icon_emoji': ":todoist:",
'text': title,
"attachments": [
{
"color": "#36a64f",
"fields": [
{
"value": msg,
},
],
}
]
}
#requests.post(SLACK_POSTURL, data=json.dumps(slack_message))
|
normal
|
{
"blob_id": "3c3d45f0844496b8d623286b36a4935a154f410a",
"index": 4133,
"step-1": "<mask token>\n\n\ndef lambda_handler(event, context):\n if event['function'] == 'tasklist':\n msg = tasklist(name)\n if event['function'] == 'activity':\n msg = activity(name)\n return\n\n\n<mask token>\n\n\ndef tasklist(name):\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n try:\n tasks_project_id\n except NameError:\n print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')\n return\n print(labels)\n sys.exit()\n for item in items:\n l_content = item['content']\n l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==\n pjt['id']]\n l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==\n sect['id']]\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n return\n\n\ndef slack_notify():\n title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\\n'\n slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',\n 'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{\n 'value': msg}]}]}\n",
"step-2": "<mask token>\nTDIAPI.sync()\n<mask token>\n\n\ndef lambda_handler(event, context):\n if event['function'] == 'tasklist':\n msg = tasklist(name)\n if event['function'] == 'activity':\n msg = activity(name)\n return\n\n\ndef activity(name):\n actlogs = TDIAPI.activity.get()\n pjts = TDIAPI.state['projects']\n for projects_id in pjts:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n else:\n print('[INFO] Not match project name')\n event_list = []\n for events in actlogs['events']:\n today = datetime.datetime.now().strftime('%Y-%m-%d')\n \"\"\"\n todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する\n そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック\n \"\"\"\n todoist_times = datetime.datetime.strptime(events['event_date'],\n '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours=9)\n todoist_date = str(todoist_times.strftime('%Y-%m-%d'))\n if events['event_type'\n ] == 'completed' and todoist_date == today and events[\n 'parent_project_id'] == tasks_project_id:\n event_list.append(events['extra_data']['content'])\n print(event_list)\n return event_list\n\n\ndef tasklist(name):\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n try:\n tasks_project_id\n except NameError:\n print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')\n return\n print(labels)\n sys.exit()\n for item in items:\n l_content = item['content']\n l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==\n pjt['id']]\n l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==\n sect['id']]\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n return\n\n\ndef slack_notify():\n title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\\n'\n slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',\n 'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{\n 'value': msg}]}]}\n",
"step-3": "<mask token>\nTDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)\nTDIAPI.sync()\nname = os.environ['TODOIST_PJT']\n\n\ndef lambda_handler(event, context):\n if event['function'] == 'tasklist':\n msg = tasklist(name)\n if event['function'] == 'activity':\n msg = activity(name)\n return\n\n\ndef activity(name):\n actlogs = TDIAPI.activity.get()\n pjts = TDIAPI.state['projects']\n for projects_id in pjts:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n else:\n print('[INFO] Not match project name')\n event_list = []\n for events in actlogs['events']:\n today = datetime.datetime.now().strftime('%Y-%m-%d')\n \"\"\"\n todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する\n そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック\n \"\"\"\n todoist_times = datetime.datetime.strptime(events['event_date'],\n '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours=9)\n todoist_date = str(todoist_times.strftime('%Y-%m-%d'))\n if events['event_type'\n ] == 'completed' and todoist_date == today and events[\n 'parent_project_id'] == tasks_project_id:\n event_list.append(events['extra_data']['content'])\n print(event_list)\n return event_list\n\n\ndef tasklist(name):\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n try:\n tasks_project_id\n except NameError:\n print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')\n return\n print(labels)\n sys.exit()\n for item in items:\n l_content = item['content']\n l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==\n pjt['id']]\n l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==\n sect['id']]\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n return\n\n\ndef slack_notify():\n title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\\n'\n slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',\n 'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{\n 'value': msg}]}]}\n",
"step-4": "import datetime\nimport json\nimport requests\nimport os\nimport re\nimport sys\nfrom todoist.api import TodoistAPI\nTDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)\nTDIAPI.sync()\nname = os.environ['TODOIST_PJT']\n\n\ndef lambda_handler(event, context):\n if event['function'] == 'tasklist':\n msg = tasklist(name)\n if event['function'] == 'activity':\n msg = activity(name)\n return\n\n\ndef activity(name):\n actlogs = TDIAPI.activity.get()\n pjts = TDIAPI.state['projects']\n for projects_id in pjts:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n else:\n print('[INFO] Not match project name')\n event_list = []\n for events in actlogs['events']:\n today = datetime.datetime.now().strftime('%Y-%m-%d')\n \"\"\"\n todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する\n そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック\n \"\"\"\n todoist_times = datetime.datetime.strptime(events['event_date'],\n '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours=9)\n todoist_date = str(todoist_times.strftime('%Y-%m-%d'))\n if events['event_type'\n ] == 'completed' and todoist_date == today and events[\n 'parent_project_id'] == tasks_project_id:\n event_list.append(events['extra_data']['content'])\n print(event_list)\n return event_list\n\n\ndef tasklist(name):\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n try:\n tasks_project_id\n except NameError:\n print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')\n return\n print(labels)\n sys.exit()\n for item in items:\n l_content = item['content']\n l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==\n pjt['id']]\n l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==\n sect['id']]\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n return\n\n\ndef slack_notify():\n title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\\n'\n slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',\n 'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{\n 'value': msg}]}]}\n",
"step-5": "# coding: utf-8\n\nimport datetime\nimport json\nimport requests\nimport os\nimport re\nimport sys\nfrom todoist.api import TodoistAPI\n\n#SLACK_CHANNEL = os.environ['SLACK_CHANNEL']\n#SLACK_POSTURL = os.environ['SLACK_POSTURL']\nTDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)\nTDIAPI.sync()\nname = os.environ['TODOIST_PJT']\n\ndef lambda_handler(event, context):\n if event[\"function\"] == 'tasklist':\n msg = tasklist(name)\n if event[\"function\"] == 'activity':\n msg = activity(name)\n return\n\ndef activity(name):\n actlogs = TDIAPI.activity.get()\n pjts = TDIAPI.state['projects']\n\n for projects_id in pjts:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n else:\n print('[INFO] Not match project name')\n\n event_list = []\n for events in actlogs['events']:\n today = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n\n '''\n todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する\n そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック\n '''\n todoist_times = datetime.datetime.strptime(events['event_date'], '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours = 9)\n todoist_date = str(todoist_times.strftime(\"%Y-%m-%d\"))\n\n if events['event_type'] == 'completed' and todoist_date == today and events['parent_project_id'] == tasks_project_id:\n event_list.append(events['extra_data']['content'])\n\n print(event_list)\n return event_list\n\ndef tasklist(name):\n\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n\n try:\n tasks_project_id\n except NameError:\n print(\"プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。\")\n return\n\n print(labels)\n sys.exit()\n\n for item in items:\n l_content = item['content']\n l_pjt_name = [ pjt['name'] for pjt in pjts if item['project_id'] == pjt['id'] ]\n l_sec_name = [ sect['name'] for sect in sects if item['section_id'] == sect['id']]\n #print('+++')\n #print(l_pjt_id)\n #print(l_content)\n #print(l_sec_name[0])\n\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n #if item['checked'] == 0 and item['project_id'] == tasks_project_id:\n\n #taskcontent = '- ' + item['content']\n #slackmessage.append(taskcontent)\n #print(taskcontent)\n #print(slackmessage)\n #message = '\\n'.join(slackmessage)\n return\n\ndef slack_notify():\n title = \"*[定期通知] プロジェクト \" + name + \" のタスクリスト*\\n\"\n slack_message = {\n 'channel': SLACK_CHANNEL,\n 'icon_emoji': \":todoist:\",\n 'text': title,\n \"attachments\": [\n {\n \"color\": \"#36a64f\",\n \"fields\": [\n {\n \"value\": msg,\n },\n ],\n }\n ]\n }\n #requests.post(SLACK_POSTURL, data=json.dumps(slack_message))\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
""" Script to run pilon iteratively to correct genome assemblies """
import os
import argparse
import logging
import subprocess
def parse_arguments():
""" Parse command line arguments """
# Create parser
parser = argparse.ArgumentParser(description='Run pilon many times')
# Add arguments
parser.add_argument('--draft_seq', '-d', required=True,
help='Draft sequence to correct')
parser.add_argument('--forward', '-f', required=True,
help='Reads to use for correction')
parser.add_argument('--reverse', '-r',
help='Reverse read for correction')
parser.add_argument('--output', '-o', required=True,
help='Output directory')
parser.add_argument('--iterations', '-i', required=True,
help='How many times to run pilon')
parser.add_argument('--threads', '-t', required=True,
help='Threads to use')
parser.add_argument('--pilon', '-p', required=True,
help='Path to pilon.jar')
# Parse arguments
args = parser.parse_args()
return args
def run_bwa(reference_genome, forward_read, reverse_read, threads, output, i):
""" Run bwa to align reads to reference genome """
# Index ref genome
print('Align reads with BWA MEM')
bwa_index_args = ['bwa', 'index', reference_genome]
process = subprocess.Popen(bwa_index_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = process.communicate()
# Align reads to reference genome
bwa_mem_args = ['bwa', 'mem', '-t', threads, '-x', 'ont2d', reference_genome, forward_read, reverse_read]
process = subprocess.Popen(bwa_mem_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = process.communicate()
# Write alignment to file
sam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.sam')
with open(sam_file, 'w') as bwa_mem_out:
bwa_mem_out.write(out)
return sam_file
def run_samtools(sam_file, threads, output, i):
""" Sort and convert to BAM using samtools """
# Conver the SAM-file to a BAM-file
print('Convert SAM-file to BAM-file')
bam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.bam')
samtools_view_args = ['samtools', 'view', '-@', threads, '-bS', '-o',
bam_file, sam_file]
process = subprocess.Popen(samtools_view_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = process.communicate()
# Sort and return the BAM-fil
print('Sort BAM-file')
bam_sorted_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.sorted.bam')
samtools_sort_args = ['samtools', 'sort', bam_file, '-o', bam_sorted_file]
process = subprocess.Popen(samtools_sort_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = process.communicate()
# Index sorted BAM-file
samtools_index_args = ['samtools', 'index', bam_sorted_file]
process = subprocess.Popen(samtools_index_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = process.communicate()
return bam_sorted_file
def run_pilon(bam_sorted_file, reference_genome, pilon_output, threads, pilon_path):
""" Run Pilon """
print('Run Pilon')
pilon_args = ['java', '-Xmx16G', '-jar', pilon_path, '--genome', reference_genome,
'--frags', bam_sorted_file, '--threads', threads, '--output',
pilon_output]
process = subprocess.Popen(pilon_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = process.communicate()
print(out)
with open(pilon_output + '.log', 'w') as pilon_log:
pilon_log.write(out)
def main():
""" Main Application """
# Get arguments
args = parse_arguments()
logging.basicConfig(filename='logging.log', level=logging.DEBUG)
output = args.output
reference_genome = args.draft_seq
if args.reverse:
reverse_read = args.reverse
else:
reverse_read = ""
forward_read = args.forward
threads = args.threads
iterations = args.iterations
pilon_path = args.pilon
logging.info('OUTPUT DIRECTORY:' + output)
logging.info('READS: ' + forward_read + ', ' + reverse_read)
logging.info('THREADS: ' + threads)
logging.info('ITERATIONS: ' + iterations)
# Set pilon output
pilon_output = os.path.join(output, 'pilon_1')
os.mkdir(output)
logging.info('START CORRECTION')
for i in range(int(iterations)):
# Log
logging.info('ITERATION: ' + str(i + 1))
logging.info('REFERENCE GENOME: ' + reference_genome)
logging.info('PILON OUTPUT: ' + pilon_output)
sam_file = run_bwa(reference_genome, forward_read, reverse_read, threads, output, i)
bam_sorted_file = run_samtools(sam_file, threads, output, i)
run_pilon(bam_sorted_file, reference_genome, pilon_output, threads, pilon_path)
# Set pilon output to new reference
reference_genome = os.path.join(output, 'pilon_' + str(i + 1) + '.fasta')
pilon_output = os.path.join(output, 'pilon_' + str(i + 2))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "fdfb71595bf86fbe1763535814ec9c3cfd312d87",
"index": 2722,
"step-1": "<mask token>\n\n\ndef run_bwa(reference_genome, forward_read, reverse_read, threads, output, i):\n \"\"\" Run bwa to align reads to reference genome \"\"\"\n print('Align reads with BWA MEM')\n bwa_index_args = ['bwa', 'index', reference_genome]\n process = subprocess.Popen(bwa_index_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n bwa_mem_args = ['bwa', 'mem', '-t', threads, '-x', 'ont2d',\n reference_genome, forward_read, reverse_read]\n process = subprocess.Popen(bwa_mem_args, stdin=subprocess.PIPE, stdout=\n subprocess.PIPE)\n out, err = process.communicate()\n sam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.sam')\n with open(sam_file, 'w') as bwa_mem_out:\n bwa_mem_out.write(out)\n return sam_file\n\n\ndef run_samtools(sam_file, threads, output, i):\n \"\"\" Sort and convert to BAM using samtools \"\"\"\n print('Convert SAM-file to BAM-file')\n bam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.bam')\n samtools_view_args = ['samtools', 'view', '-@', threads, '-bS', '-o',\n bam_file, sam_file]\n process = subprocess.Popen(samtools_view_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n print('Sort BAM-file')\n bam_sorted_file = os.path.join(output, 'bwa_mem_' + str(i + 1) +\n '.sorted.bam')\n samtools_sort_args = ['samtools', 'sort', bam_file, '-o', bam_sorted_file]\n process = subprocess.Popen(samtools_sort_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n samtools_index_args = ['samtools', 'index', bam_sorted_file]\n process = subprocess.Popen(samtools_index_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n return bam_sorted_file\n\n\n<mask token>\n\n\ndef main():\n \"\"\" Main Application \"\"\"\n args = parse_arguments()\n logging.basicConfig(filename='logging.log', level=logging.DEBUG)\n output = args.output\n reference_genome = args.draft_seq\n if args.reverse:\n reverse_read = args.reverse\n else:\n reverse_read = ''\n forward_read = args.forward\n threads = args.threads\n iterations = args.iterations\n pilon_path = args.pilon\n logging.info('OUTPUT DIRECTORY:' + output)\n logging.info('READS: ' + forward_read + ', ' + reverse_read)\n logging.info('THREADS: ' + threads)\n logging.info('ITERATIONS: ' + iterations)\n pilon_output = os.path.join(output, 'pilon_1')\n os.mkdir(output)\n logging.info('START CORRECTION')\n for i in range(int(iterations)):\n logging.info('ITERATION: ' + str(i + 1))\n logging.info('REFERENCE GENOME: ' + reference_genome)\n logging.info('PILON OUTPUT: ' + pilon_output)\n sam_file = run_bwa(reference_genome, forward_read, reverse_read,\n threads, output, i)\n bam_sorted_file = run_samtools(sam_file, threads, output, i)\n run_pilon(bam_sorted_file, reference_genome, pilon_output, threads,\n pilon_path)\n reference_genome = os.path.join(output, 'pilon_' + str(i + 1) +\n '.fasta')\n pilon_output = os.path.join(output, 'pilon_' + str(i + 2))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_bwa(reference_genome, forward_read, reverse_read, threads, output, i):\n \"\"\" Run bwa to align reads to reference genome \"\"\"\n print('Align reads with BWA MEM')\n bwa_index_args = ['bwa', 'index', reference_genome]\n process = subprocess.Popen(bwa_index_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n bwa_mem_args = ['bwa', 'mem', '-t', threads, '-x', 'ont2d',\n reference_genome, forward_read, reverse_read]\n process = subprocess.Popen(bwa_mem_args, stdin=subprocess.PIPE, stdout=\n subprocess.PIPE)\n out, err = process.communicate()\n sam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.sam')\n with open(sam_file, 'w') as bwa_mem_out:\n bwa_mem_out.write(out)\n return sam_file\n\n\ndef run_samtools(sam_file, threads, output, i):\n \"\"\" Sort and convert to BAM using samtools \"\"\"\n print('Convert SAM-file to BAM-file')\n bam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.bam')\n samtools_view_args = ['samtools', 'view', '-@', threads, '-bS', '-o',\n bam_file, sam_file]\n process = subprocess.Popen(samtools_view_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n print('Sort BAM-file')\n bam_sorted_file = os.path.join(output, 'bwa_mem_' + str(i + 1) +\n '.sorted.bam')\n samtools_sort_args = ['samtools', 'sort', bam_file, '-o', bam_sorted_file]\n process = subprocess.Popen(samtools_sort_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n samtools_index_args = ['samtools', 'index', bam_sorted_file]\n process = subprocess.Popen(samtools_index_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n return bam_sorted_file\n\n\ndef run_pilon(bam_sorted_file, reference_genome, pilon_output, threads,\n pilon_path):\n \"\"\" Run Pilon \"\"\"\n print('Run Pilon')\n pilon_args = ['java', '-Xmx16G', '-jar', pilon_path, '--genome',\n reference_genome, '--frags', bam_sorted_file, '--threads', threads,\n '--output', pilon_output]\n process = subprocess.Popen(pilon_args, stdin=subprocess.PIPE, stdout=\n subprocess.PIPE)\n out, err = process.communicate()\n print(out)\n with open(pilon_output + '.log', 'w') as pilon_log:\n pilon_log.write(out)\n\n\ndef main():\n \"\"\" Main Application \"\"\"\n args = parse_arguments()\n logging.basicConfig(filename='logging.log', level=logging.DEBUG)\n output = args.output\n reference_genome = args.draft_seq\n if args.reverse:\n reverse_read = args.reverse\n else:\n reverse_read = ''\n forward_read = args.forward\n threads = args.threads\n iterations = args.iterations\n pilon_path = args.pilon\n logging.info('OUTPUT DIRECTORY:' + output)\n logging.info('READS: ' + forward_read + ', ' + reverse_read)\n logging.info('THREADS: ' + threads)\n logging.info('ITERATIONS: ' + iterations)\n pilon_output = os.path.join(output, 'pilon_1')\n os.mkdir(output)\n logging.info('START CORRECTION')\n for i in range(int(iterations)):\n logging.info('ITERATION: ' + str(i + 1))\n logging.info('REFERENCE GENOME: ' + reference_genome)\n logging.info('PILON OUTPUT: ' + pilon_output)\n sam_file = run_bwa(reference_genome, forward_read, reverse_read,\n threads, output, i)\n bam_sorted_file = run_samtools(sam_file, threads, output, i)\n run_pilon(bam_sorted_file, reference_genome, pilon_output, threads,\n pilon_path)\n reference_genome = os.path.join(output, 'pilon_' + str(i + 1) +\n '.fasta')\n pilon_output = os.path.join(output, 'pilon_' + str(i + 2))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_arguments():\n \"\"\" Parse command line arguments \"\"\"\n parser = argparse.ArgumentParser(description='Run pilon many times')\n parser.add_argument('--draft_seq', '-d', required=True, help=\n 'Draft sequence to correct')\n parser.add_argument('--forward', '-f', required=True, help=\n 'Reads to use for correction')\n parser.add_argument('--reverse', '-r', help='Reverse read for correction')\n parser.add_argument('--output', '-o', required=True, help=\n 'Output directory')\n parser.add_argument('--iterations', '-i', required=True, help=\n 'How many times to run pilon')\n parser.add_argument('--threads', '-t', required=True, help='Threads to use'\n )\n parser.add_argument('--pilon', '-p', required=True, help=\n 'Path to pilon.jar')\n args = parser.parse_args()\n return args\n\n\ndef run_bwa(reference_genome, forward_read, reverse_read, threads, output, i):\n \"\"\" Run bwa to align reads to reference genome \"\"\"\n print('Align reads with BWA MEM')\n bwa_index_args = ['bwa', 'index', reference_genome]\n process = subprocess.Popen(bwa_index_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n bwa_mem_args = ['bwa', 'mem', '-t', threads, '-x', 'ont2d',\n reference_genome, forward_read, reverse_read]\n process = subprocess.Popen(bwa_mem_args, stdin=subprocess.PIPE, stdout=\n subprocess.PIPE)\n out, err = process.communicate()\n sam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.sam')\n with open(sam_file, 'w') as bwa_mem_out:\n bwa_mem_out.write(out)\n return sam_file\n\n\ndef run_samtools(sam_file, threads, output, i):\n \"\"\" Sort and convert to BAM using samtools \"\"\"\n print('Convert SAM-file to BAM-file')\n bam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.bam')\n samtools_view_args = ['samtools', 'view', '-@', threads, '-bS', '-o',\n bam_file, sam_file]\n process = subprocess.Popen(samtools_view_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n print('Sort BAM-file')\n bam_sorted_file = os.path.join(output, 'bwa_mem_' + str(i + 1) +\n '.sorted.bam')\n samtools_sort_args = ['samtools', 'sort', bam_file, '-o', bam_sorted_file]\n process = subprocess.Popen(samtools_sort_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n samtools_index_args = ['samtools', 'index', bam_sorted_file]\n process = subprocess.Popen(samtools_index_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n return bam_sorted_file\n\n\ndef run_pilon(bam_sorted_file, reference_genome, pilon_output, threads,\n pilon_path):\n \"\"\" Run Pilon \"\"\"\n print('Run Pilon')\n pilon_args = ['java', '-Xmx16G', '-jar', pilon_path, '--genome',\n reference_genome, '--frags', bam_sorted_file, '--threads', threads,\n '--output', pilon_output]\n process = subprocess.Popen(pilon_args, stdin=subprocess.PIPE, stdout=\n subprocess.PIPE)\n out, err = process.communicate()\n print(out)\n with open(pilon_output + '.log', 'w') as pilon_log:\n pilon_log.write(out)\n\n\ndef main():\n \"\"\" Main Application \"\"\"\n args = parse_arguments()\n logging.basicConfig(filename='logging.log', level=logging.DEBUG)\n output = args.output\n reference_genome = args.draft_seq\n if args.reverse:\n reverse_read = args.reverse\n else:\n reverse_read = ''\n forward_read = args.forward\n threads = args.threads\n iterations = args.iterations\n pilon_path = args.pilon\n logging.info('OUTPUT DIRECTORY:' + output)\n logging.info('READS: ' + forward_read + ', ' + reverse_read)\n logging.info('THREADS: ' + threads)\n logging.info('ITERATIONS: ' + iterations)\n pilon_output = os.path.join(output, 'pilon_1')\n os.mkdir(output)\n logging.info('START CORRECTION')\n for i in range(int(iterations)):\n logging.info('ITERATION: ' + str(i + 1))\n logging.info('REFERENCE GENOME: ' + reference_genome)\n logging.info('PILON OUTPUT: ' + pilon_output)\n sam_file = run_bwa(reference_genome, forward_read, reverse_read,\n threads, output, i)\n bam_sorted_file = run_samtools(sam_file, threads, output, i)\n run_pilon(bam_sorted_file, reference_genome, pilon_output, threads,\n pilon_path)\n reference_genome = os.path.join(output, 'pilon_' + str(i + 1) +\n '.fasta')\n pilon_output = os.path.join(output, 'pilon_' + str(i + 2))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport os\nimport argparse\nimport logging\nimport subprocess\n\n\ndef parse_arguments():\n \"\"\" Parse command line arguments \"\"\"\n parser = argparse.ArgumentParser(description='Run pilon many times')\n parser.add_argument('--draft_seq', '-d', required=True, help=\n 'Draft sequence to correct')\n parser.add_argument('--forward', '-f', required=True, help=\n 'Reads to use for correction')\n parser.add_argument('--reverse', '-r', help='Reverse read for correction')\n parser.add_argument('--output', '-o', required=True, help=\n 'Output directory')\n parser.add_argument('--iterations', '-i', required=True, help=\n 'How many times to run pilon')\n parser.add_argument('--threads', '-t', required=True, help='Threads to use'\n )\n parser.add_argument('--pilon', '-p', required=True, help=\n 'Path to pilon.jar')\n args = parser.parse_args()\n return args\n\n\ndef run_bwa(reference_genome, forward_read, reverse_read, threads, output, i):\n \"\"\" Run bwa to align reads to reference genome \"\"\"\n print('Align reads with BWA MEM')\n bwa_index_args = ['bwa', 'index', reference_genome]\n process = subprocess.Popen(bwa_index_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n bwa_mem_args = ['bwa', 'mem', '-t', threads, '-x', 'ont2d',\n reference_genome, forward_read, reverse_read]\n process = subprocess.Popen(bwa_mem_args, stdin=subprocess.PIPE, stdout=\n subprocess.PIPE)\n out, err = process.communicate()\n sam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.sam')\n with open(sam_file, 'w') as bwa_mem_out:\n bwa_mem_out.write(out)\n return sam_file\n\n\ndef run_samtools(sam_file, threads, output, i):\n \"\"\" Sort and convert to BAM using samtools \"\"\"\n print('Convert SAM-file to BAM-file')\n bam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.bam')\n samtools_view_args = ['samtools', 'view', '-@', threads, '-bS', '-o',\n bam_file, sam_file]\n process = subprocess.Popen(samtools_view_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n print('Sort BAM-file')\n bam_sorted_file = os.path.join(output, 'bwa_mem_' + str(i + 1) +\n '.sorted.bam')\n samtools_sort_args = ['samtools', 'sort', bam_file, '-o', bam_sorted_file]\n process = subprocess.Popen(samtools_sort_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n samtools_index_args = ['samtools', 'index', bam_sorted_file]\n process = subprocess.Popen(samtools_index_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n return bam_sorted_file\n\n\ndef run_pilon(bam_sorted_file, reference_genome, pilon_output, threads,\n pilon_path):\n \"\"\" Run Pilon \"\"\"\n print('Run Pilon')\n pilon_args = ['java', '-Xmx16G', '-jar', pilon_path, '--genome',\n reference_genome, '--frags', bam_sorted_file, '--threads', threads,\n '--output', pilon_output]\n process = subprocess.Popen(pilon_args, stdin=subprocess.PIPE, stdout=\n subprocess.PIPE)\n out, err = process.communicate()\n print(out)\n with open(pilon_output + '.log', 'w') as pilon_log:\n pilon_log.write(out)\n\n\ndef main():\n \"\"\" Main Application \"\"\"\n args = parse_arguments()\n logging.basicConfig(filename='logging.log', level=logging.DEBUG)\n output = args.output\n reference_genome = args.draft_seq\n if args.reverse:\n reverse_read = args.reverse\n else:\n reverse_read = ''\n forward_read = args.forward\n threads = args.threads\n iterations = args.iterations\n pilon_path = args.pilon\n logging.info('OUTPUT DIRECTORY:' + output)\n logging.info('READS: ' + forward_read + ', ' + reverse_read)\n logging.info('THREADS: ' + threads)\n logging.info('ITERATIONS: ' + iterations)\n pilon_output = os.path.join(output, 'pilon_1')\n os.mkdir(output)\n logging.info('START CORRECTION')\n for i in range(int(iterations)):\n logging.info('ITERATION: ' + str(i + 1))\n logging.info('REFERENCE GENOME: ' + reference_genome)\n logging.info('PILON OUTPUT: ' + pilon_output)\n sam_file = run_bwa(reference_genome, forward_read, reverse_read,\n threads, output, i)\n bam_sorted_file = run_samtools(sam_file, threads, output, i)\n run_pilon(bam_sorted_file, reference_genome, pilon_output, threads,\n pilon_path)\n reference_genome = os.path.join(output, 'pilon_' + str(i + 1) +\n '.fasta')\n pilon_output = os.path.join(output, 'pilon_' + str(i + 2))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\" Script to run pilon iteratively to correct genome assemblies \"\"\"\nimport os\nimport argparse\nimport logging\nimport subprocess\n\n\ndef parse_arguments():\n \"\"\" Parse command line arguments \"\"\"\n # Create parser\n parser = argparse.ArgumentParser(description='Run pilon many times')\n \n # Add arguments\n parser.add_argument('--draft_seq', '-d', required=True,\n help='Draft sequence to correct')\n parser.add_argument('--forward', '-f', required=True,\n help='Reads to use for correction')\n parser.add_argument('--reverse', '-r',\n help='Reverse read for correction')\n parser.add_argument('--output', '-o', required=True,\n help='Output directory')\n parser.add_argument('--iterations', '-i', required=True,\n help='How many times to run pilon')\n parser.add_argument('--threads', '-t', required=True,\n help='Threads to use')\n parser.add_argument('--pilon', '-p', required=True,\n help='Path to pilon.jar')\n # Parse arguments\n args = parser.parse_args()\n\n return args\n\ndef run_bwa(reference_genome, forward_read, reverse_read, threads, output, i):\n \"\"\" Run bwa to align reads to reference genome \"\"\"\n # Index ref genome\n print('Align reads with BWA MEM')\n bwa_index_args = ['bwa', 'index', reference_genome]\n process = subprocess.Popen(bwa_index_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n \n # Align reads to reference genome\n bwa_mem_args = ['bwa', 'mem', '-t', threads, '-x', 'ont2d', reference_genome, forward_read, reverse_read] \n process = subprocess.Popen(bwa_mem_args, stdin=subprocess.PIPE, \n stdout=subprocess.PIPE)\n out, err = process.communicate()\n\n # Write alignment to file\n sam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.sam')\n with open(sam_file, 'w') as bwa_mem_out:\n bwa_mem_out.write(out)\n \n return sam_file\n\ndef run_samtools(sam_file, threads, output, i):\n \"\"\" Sort and convert to BAM using samtools \"\"\"\n\n # Conver the SAM-file to a BAM-file\n print('Convert SAM-file to BAM-file')\n bam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.bam')\n samtools_view_args = ['samtools', 'view', '-@', threads, '-bS', '-o',\n bam_file, sam_file]\n process = subprocess.Popen(samtools_view_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n \n # Sort and return the BAM-fil\n print('Sort BAM-file')\n bam_sorted_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.sorted.bam')\n samtools_sort_args = ['samtools', 'sort', bam_file, '-o', bam_sorted_file]\n process = subprocess.Popen(samtools_sort_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n\n # Index sorted BAM-file\n samtools_index_args = ['samtools', 'index', bam_sorted_file]\n process = subprocess.Popen(samtools_index_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n\n return bam_sorted_file\n\ndef run_pilon(bam_sorted_file, reference_genome, pilon_output, threads, pilon_path):\n \"\"\" Run Pilon \"\"\"\n print('Run Pilon')\n pilon_args = ['java', '-Xmx16G', '-jar', pilon_path, '--genome', reference_genome,\n '--frags', bam_sorted_file, '--threads', threads, '--output',\n pilon_output]\n process = subprocess.Popen(pilon_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out, err = process.communicate()\n print(out)\n with open(pilon_output + '.log', 'w') as pilon_log:\n pilon_log.write(out)\n\ndef main():\n \"\"\" Main Application \"\"\"\n # Get arguments\n args = parse_arguments()\n \n logging.basicConfig(filename='logging.log', level=logging.DEBUG)\n \n output = args.output\n reference_genome = args.draft_seq\n if args.reverse:\n reverse_read = args.reverse\n else:\n reverse_read = \"\"\n forward_read = args.forward\n threads = args.threads\n iterations = args.iterations\n pilon_path = args.pilon\n logging.info('OUTPUT DIRECTORY:' + output)\n logging.info('READS: ' + forward_read + ', ' + reverse_read) \n logging.info('THREADS: ' + threads)\n logging.info('ITERATIONS: ' + iterations)\n\n # Set pilon output\n pilon_output = os.path.join(output, 'pilon_1')\n os.mkdir(output)\n\n logging.info('START CORRECTION')\n for i in range(int(iterations)):\n # Log\n logging.info('ITERATION: ' + str(i + 1))\n logging.info('REFERENCE GENOME: ' + reference_genome)\n logging.info('PILON OUTPUT: ' + pilon_output)\n sam_file = run_bwa(reference_genome, forward_read, reverse_read, threads, output, i)\n bam_sorted_file = run_samtools(sam_file, threads, output, i)\n run_pilon(bam_sorted_file, reference_genome, pilon_output, threads, pilon_path)\n\n # Set pilon output to new reference\n reference_genome = os.path.join(output, 'pilon_' + str(i + 1) + '.fasta')\n pilon_output = os.path.join(output, 'pilon_' + str(i + 2))\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
from mcpi.minecraft import Minecraft
from time import sleep
import random
mc = Minecraft.create()
myID=mc.getPlayerEntityId("Baymax1112")
mineral = [14,15,16,56,73,129,57]
while True:
sleep(0.5)
r=random.choice(mineral)
x,y,z = mc.entity.getTilePos(myID)
mc.setBlocks(x+1,y+3,z+1,x-1,y-3,z-1,r)
|
normal
|
{
"blob_id": "b28ae19f31ae746f901dea645dfeaa211a15cd31",
"index": 1879,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n sleep(0.5)\n r = random.choice(mineral)\n x, y, z = mc.entity.getTilePos(myID)\n mc.setBlocks(x + 1, y + 3, z + 1, x - 1, y - 3, z - 1, r)\n",
"step-3": "<mask token>\nmc = Minecraft.create()\nmyID = mc.getPlayerEntityId('Baymax1112')\nmineral = [14, 15, 16, 56, 73, 129, 57]\nwhile True:\n sleep(0.5)\n r = random.choice(mineral)\n x, y, z = mc.entity.getTilePos(myID)\n mc.setBlocks(x + 1, y + 3, z + 1, x - 1, y - 3, z - 1, r)\n",
"step-4": "from mcpi.minecraft import Minecraft\nfrom time import sleep\nimport random\nmc = Minecraft.create()\nmyID = mc.getPlayerEntityId('Baymax1112')\nmineral = [14, 15, 16, 56, 73, 129, 57]\nwhile True:\n sleep(0.5)\n r = random.choice(mineral)\n x, y, z = mc.entity.getTilePos(myID)\n mc.setBlocks(x + 1, y + 3, z + 1, x - 1, y - 3, z - 1, r)\n",
"step-5": "from mcpi.minecraft import Minecraft\nfrom time import sleep\nimport random \nmc = Minecraft.create()\nmyID=mc.getPlayerEntityId(\"Baymax1112\")\nmineral = [14,15,16,56,73,129,57]\nwhile True:\n sleep(0.5)\n r=random.choice(mineral)\n x,y,z = mc.entity.getTilePos(myID)\n mc.setBlocks(x+1,y+3,z+1,x-1,y-3,z-1,r)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
删除排序数组中的重复项:
给定一个排序数组,你需要在原地删除重复出现的元素,使得每个元素只出现一次,返回移除后数组的新长度。
不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。
示例 1:
给定数组 nums = [1,1,2],
函数应该返回新的长度 2, 并且原数组 nums 的前两个元素被修改为 1, 2。
你不需要考虑数组中超出新长度后面的元素。
示例 2:
给定 nums = [0,0,1,1,1,2,2,3,3,4],
函数应该返回新的长度 5, 并且原数组 nums 的前五个元素被修改为 0, 1, 2, 3, 4。
你不需要考虑数组中超出新长度后面的元素。
'''
def delete_sort_array(origin_list):
if len(origin_list) == 0:
return 0
elif len(origin_list) == 1:
return 1
else:
for index,item in enumerate(origin_list[:]):
if index+1 < len(origin_list):
if origin_list[index] == origin_list[index+1]:
origin_list.pop(index)
return len(origin_list)
print(delete_sort_array([1,1,5,5,6,6,13,14]))
|
normal
|
{
"blob_id": "ac0f0fbb9bcb450ac24198069ef8bea8b049ef47",
"index": 5824,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef delete_sort_array(origin_list):\n if len(origin_list) == 0:\n return 0\n elif len(origin_list) == 1:\n return 1\n else:\n for index, item in enumerate(origin_list[:]):\n if index + 1 < len(origin_list):\n if origin_list[index] == origin_list[index + 1]:\n origin_list.pop(index)\n return len(origin_list)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef delete_sort_array(origin_list):\n if len(origin_list) == 0:\n return 0\n elif len(origin_list) == 1:\n return 1\n else:\n for index, item in enumerate(origin_list[:]):\n if index + 1 < len(origin_list):\n if origin_list[index] == origin_list[index + 1]:\n origin_list.pop(index)\n return len(origin_list)\n\n\nprint(delete_sort_array([1, 1, 5, 5, 6, 6, 13, 14]))\n",
"step-4": "'''\n 删除排序数组中的重复项:\n\n给定一个排序数组,你需要在原地删除重复出现的元素,使得每个元素只出现一次,返回移除后数组的新长度。\n\n不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。\n\n示例 1:\n\n给定数组 nums = [1,1,2],\n\n函数应该返回新的长度 2, 并且原数组 nums 的前两个元素被修改为 1, 2。\n\n你不需要考虑数组中超出新长度后面的元素。\n示例 2:\n\n给定 nums = [0,0,1,1,1,2,2,3,3,4],\n\n函数应该返回新的长度 5, 并且原数组 nums 的前五个元素被修改为 0, 1, 2, 3, 4。\n\n你不需要考虑数组中超出新长度后面的元素。\n\n'''\n\ndef delete_sort_array(origin_list):\n if len(origin_list) == 0:\n return 0\n elif len(origin_list) == 1:\n return 1\n else:\n for index,item in enumerate(origin_list[:]):\n if index+1 < len(origin_list):\n if origin_list[index] == origin_list[index+1]:\n origin_list.pop(index)\n return len(origin_list)\nprint(delete_sort_array([1,1,5,5,6,6,13,14]))\n\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
with open('sequence.protein.2.fasta', 'w') as fw:
with open('sequence.protein.fasta', 'r') as fr:
for line in fr:
fw.write(line)
<|reserved_special_token_1|>
# 내 풀이
with open("sequence.protein.2.fasta", "w") as fw:
with open("sequence.protein.fasta", "r") as fr:
for line in fr:
fw.write(line)
# 강사님 풀이
# fr = open('sequence.protein.fasta','r'):
# lines=fr.readlines()
# seq_list=list()
# for line in lines:
|
flexible
|
{
"blob_id": "84fb0e364ee3cd846148abfc9326f404f008c510",
"index": 7908,
"step-1": "<mask token>\n",
"step-2": "with open('sequence.protein.2.fasta', 'w') as fw:\n with open('sequence.protein.fasta', 'r') as fr:\n for line in fr:\n fw.write(line)\n",
"step-3": "# 내 풀이\nwith open(\"sequence.protein.2.fasta\", \"w\") as fw:\n with open(\"sequence.protein.fasta\", \"r\") as fr:\n for line in fr:\n fw.write(line)\n\n# 강사님 풀이\n# fr = open('sequence.protein.fasta','r'):\n# lines=fr.readlines()\n# seq_list=list()\n# for line in lines:\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
class Model:
def derivativesDependsOn(self, models):
return []
def derivedVariablesDependsOn(self, models):
return []
def initializeSimplifiedModel(self, timeHistory, stateHistory, derivedVariablesHistory):
return False
def computeSimplifiedState(self, args, time):
return []
def computeSimplifiedDerivedVariables(self, args, time):
return []
def initializeState(self):
return []
def computeDerivatives(self, t, state, derived, models):
return []
def computeDerivedVariables(self, t, state):
return []
|
normal
|
{
"blob_id": "b27e89ff799f26b87a61254e1c4a5f782fcbe605",
"index": 2540,
"step-1": "class Model:\n <mask token>\n\n def derivedVariablesDependsOn(self, models):\n return []\n <mask token>\n <mask token>\n\n def computeSimplifiedDerivedVariables(self, args, time):\n return []\n\n def initializeState(self):\n return []\n <mask token>\n <mask token>\n",
"step-2": "class Model:\n <mask token>\n\n def derivedVariablesDependsOn(self, models):\n return []\n\n def initializeSimplifiedModel(self, timeHistory, stateHistory,\n derivedVariablesHistory):\n return False\n <mask token>\n\n def computeSimplifiedDerivedVariables(self, args, time):\n return []\n\n def initializeState(self):\n return []\n <mask token>\n <mask token>\n",
"step-3": "class Model:\n <mask token>\n\n def derivedVariablesDependsOn(self, models):\n return []\n\n def initializeSimplifiedModel(self, timeHistory, stateHistory,\n derivedVariablesHistory):\n return False\n\n def computeSimplifiedState(self, args, time):\n return []\n\n def computeSimplifiedDerivedVariables(self, args, time):\n return []\n\n def initializeState(self):\n return []\n\n def computeDerivatives(self, t, state, derived, models):\n return []\n <mask token>\n",
"step-4": "class Model:\n <mask token>\n\n def derivedVariablesDependsOn(self, models):\n return []\n\n def initializeSimplifiedModel(self, timeHistory, stateHistory,\n derivedVariablesHistory):\n return False\n\n def computeSimplifiedState(self, args, time):\n return []\n\n def computeSimplifiedDerivedVariables(self, args, time):\n return []\n\n def initializeState(self):\n return []\n\n def computeDerivatives(self, t, state, derived, models):\n return []\n\n def computeDerivedVariables(self, t, state):\n return []\n",
"step-5": "class Model:\n def derivativesDependsOn(self, models):\n return []\n\n def derivedVariablesDependsOn(self, models):\n return []\n \n def initializeSimplifiedModel(self, timeHistory, stateHistory, derivedVariablesHistory):\n return False\n\n def computeSimplifiedState(self, args, time):\n return []\n\n def computeSimplifiedDerivedVariables(self, args, time):\n return []\n\n def initializeState(self):\n return []\n\n def computeDerivatives(self, t, state, derived, models):\n return []\n\n def computeDerivedVariables(self, t, state):\n return []\n\n",
"step-ids": [
4,
5,
7,
8,
10
]
}
|
[
4,
5,
7,
8,
10
] |
/home/openerp/production/extra-addons/productivity_analysis/report/productivity_analysis.py
|
normal
|
{
"blob_id": "6531833a4fe57c15c0668cee9015c7d43491427a",
"index": 341,
"step-1": "/home/openerp/production/extra-addons/productivity_analysis/report/productivity_analysis.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class FieldDesigner:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return '\n'.join(map(str, self.field))
<|reserved_special_token_1|>
class FieldDesigner:
<|reserved_special_token_0|>
def __init__(self):
self.field = []
<|reserved_special_token_0|>
def __str__(self):
return '\n'.join(map(str, self.field))
<|reserved_special_token_1|>
class FieldDesigner:
<|reserved_special_token_0|>
def __init__(self):
self.field = []
def design_field(self, height, width):
self.field = [['~' for __ in range(height)] for __ in range(width)]
return self.field
def __str__(self):
return '\n'.join(map(str, self.field))
<|reserved_special_token_1|>
class FieldDesigner:
"""
Designs a field for BattleShips, accepts field height and width
"""
def __init__(self):
self.field = []
def design_field(self, height, width):
self.field = [['~' for __ in range(height)] for __ in range(width)]
return self.field
def __str__(self):
return '\n'.join(map(str, self.field))
<|reserved_special_token_1|>
class FieldDesigner:
"""
Designs a field for BattleShips, accepts field height and width
"""
def __init__(
self,
):
self.field = []
def design_field(
self,
height,
width,
):
self.field = [[
'~' for __
in range(height)]
for __ in range(width)
]
return self.field
def __str__(
self,
):
return '\n'.join(map(str, self.field))
|
flexible
|
{
"blob_id": "c812419e7e024b0bb1207832b2b4a726ef61b272",
"index": 9137,
"step-1": "class FieldDesigner:\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return '\\n'.join(map(str, self.field))\n",
"step-2": "class FieldDesigner:\n <mask token>\n\n def __init__(self):\n self.field = []\n <mask token>\n\n def __str__(self):\n return '\\n'.join(map(str, self.field))\n",
"step-3": "class FieldDesigner:\n <mask token>\n\n def __init__(self):\n self.field = []\n\n def design_field(self, height, width):\n self.field = [['~' for __ in range(height)] for __ in range(width)]\n return self.field\n\n def __str__(self):\n return '\\n'.join(map(str, self.field))\n",
"step-4": "class FieldDesigner:\n \"\"\"\n Designs a field for BattleShips, accepts field height and width\n \"\"\"\n\n def __init__(self):\n self.field = []\n\n def design_field(self, height, width):\n self.field = [['~' for __ in range(height)] for __ in range(width)]\n return self.field\n\n def __str__(self):\n return '\\n'.join(map(str, self.field))\n",
"step-5": "class FieldDesigner:\n \"\"\"\n Designs a field for BattleShips, accepts field height and width\n \"\"\"\n def __init__(\n self,\n ):\n self.field = []\n\n def design_field(\n self,\n height,\n width,\n ):\n\n self.field = [[\n '~' for __\n in range(height)]\n for __ in range(width)\n ]\n\n return self.field\n\n def __str__(\n self,\n ):\n return '\\n'.join(map(str, self.field))",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import re
import gpxpy
def extract_gpx_data(gpx_file_path, attribute='elevation'):
"""Reads in a GPX file and returns a list of values
for a specified GPX attribute.
Parameters
----------
gpx_file_path : str
File path to the GPX file (.gpx extension).
attribute: str
Name of the attribute to extract. Default
value is 'elevation'. Must match one of the
entries in the function-defined list.
Returns
-------
data : list
List containing float values of the extracted
GPX attributes.
"""
# Open GPX file in context manager and parse with gpxpy
with open(gpx_file_path) as gpx_file:
gpx = gpxpy.parse(gpx_file)
# Define GPX main attributes
primary_attributes = [
"latitude",
"longitude",
"elevation",
"time"
]
# Define GPX extension attributes
secondary_attributes = [
"cadence", "distance", "altitude",
"energy", "speed", "verticalSpeed"
]
# Check if specified attribute is in main
# GPX attributes (lat/lon/elevation/time)
if attribute in primary_attributes:
# Create list of values for attribute
data = [{
"latitude": point.latitude,
"longitude": point.longitude,
"elevation": point.elevation,
"time": point.time
}.get(attribute)
for track in gpx.tracks
for segment in track.segments
for point in segment.points
]
print(f"Extracted {attribute} data.")
# Check if specified attribute is in
# GPX extensions (cadence/distance/altitude
# /energy/speed/verticalSpeed)
elif attribute in secondary_attributes:
# Define pattern for attribute to match on
pattern = re.compile(f"^.*{attribute}.*$")
# Create list of values for attribute
data = [
float(extension.text)
for track in gpx.tracks
for segment in track.segments
for point in segment.points
for extension in point.extensions
if pattern.match(extension.tag)
]
print(f"Extracted {attribute} data.")
else:
data = []
print("Invalid attribute. Must be one of the following: "
"latitude, longitude, elevation, time, cadence "
"distance, altitude, energy, speed, verticalSpeed.")
# List of attribute values
return data
|
normal
|
{
"blob_id": "cc6d18785eff0406ff7f38f18f15476375e31b76",
"index": 9254,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef extract_gpx_data(gpx_file_path, attribute='elevation'):\n \"\"\"Reads in a GPX file and returns a list of values\n for a specified GPX attribute.\n\n Parameters\n ----------\n gpx_file_path : str\n File path to the GPX file (.gpx extension).\n\n attribute: str\n Name of the attribute to extract. Default\n value is 'elevation'. Must match one of the\n entries in the function-defined list.\n\n Returns\n -------\n data : list\n List containing float values of the extracted\n GPX attributes.\n \"\"\"\n with open(gpx_file_path) as gpx_file:\n gpx = gpxpy.parse(gpx_file)\n primary_attributes = ['latitude', 'longitude', 'elevation', 'time']\n secondary_attributes = ['cadence', 'distance', 'altitude', 'energy',\n 'speed', 'verticalSpeed']\n if attribute in primary_attributes:\n data = [{'latitude': point.latitude, 'longitude': point.longitude,\n 'elevation': point.elevation, 'time': point.time}.get(attribute\n ) for track in gpx.tracks for segment in track.segments for\n point in segment.points]\n print(f'Extracted {attribute} data.')\n elif attribute in secondary_attributes:\n pattern = re.compile(f'^.*{attribute}.*$')\n data = [float(extension.text) for track in gpx.tracks for segment in\n track.segments for point in segment.points for extension in\n point.extensions if pattern.match(extension.tag)]\n print(f'Extracted {attribute} data.')\n else:\n data = []\n print(\n 'Invalid attribute. Must be one of the following: latitude, longitude, elevation, time, cadence distance, altitude, energy, speed, verticalSpeed.'\n )\n return data\n",
"step-3": "import re\nimport gpxpy\n\n\ndef extract_gpx_data(gpx_file_path, attribute='elevation'):\n \"\"\"Reads in a GPX file and returns a list of values\n for a specified GPX attribute.\n\n Parameters\n ----------\n gpx_file_path : str\n File path to the GPX file (.gpx extension).\n\n attribute: str\n Name of the attribute to extract. Default\n value is 'elevation'. Must match one of the\n entries in the function-defined list.\n\n Returns\n -------\n data : list\n List containing float values of the extracted\n GPX attributes.\n \"\"\"\n with open(gpx_file_path) as gpx_file:\n gpx = gpxpy.parse(gpx_file)\n primary_attributes = ['latitude', 'longitude', 'elevation', 'time']\n secondary_attributes = ['cadence', 'distance', 'altitude', 'energy',\n 'speed', 'verticalSpeed']\n if attribute in primary_attributes:\n data = [{'latitude': point.latitude, 'longitude': point.longitude,\n 'elevation': point.elevation, 'time': point.time}.get(attribute\n ) for track in gpx.tracks for segment in track.segments for\n point in segment.points]\n print(f'Extracted {attribute} data.')\n elif attribute in secondary_attributes:\n pattern = re.compile(f'^.*{attribute}.*$')\n data = [float(extension.text) for track in gpx.tracks for segment in\n track.segments for point in segment.points for extension in\n point.extensions if pattern.match(extension.tag)]\n print(f'Extracted {attribute} data.')\n else:\n data = []\n print(\n 'Invalid attribute. Must be one of the following: latitude, longitude, elevation, time, cadence distance, altitude, energy, speed, verticalSpeed.'\n )\n return data\n",
"step-4": "import re\nimport gpxpy\n\n\ndef extract_gpx_data(gpx_file_path, attribute='elevation'):\n \"\"\"Reads in a GPX file and returns a list of values\n for a specified GPX attribute.\n\n Parameters\n ----------\n gpx_file_path : str\n File path to the GPX file (.gpx extension).\n\n attribute: str\n Name of the attribute to extract. Default\n value is 'elevation'. Must match one of the\n entries in the function-defined list.\n\n Returns\n -------\n data : list\n List containing float values of the extracted\n GPX attributes.\n \"\"\"\n # Open GPX file in context manager and parse with gpxpy\n with open(gpx_file_path) as gpx_file:\n gpx = gpxpy.parse(gpx_file)\n\n # Define GPX main attributes\n primary_attributes = [\n \"latitude\",\n \"longitude\",\n \"elevation\",\n \"time\"\n ]\n\n # Define GPX extension attributes\n secondary_attributes = [\n \"cadence\", \"distance\", \"altitude\",\n \"energy\", \"speed\", \"verticalSpeed\"\n ]\n\n # Check if specified attribute is in main\n # GPX attributes (lat/lon/elevation/time)\n if attribute in primary_attributes:\n\n # Create list of values for attribute\n data = [{\n \"latitude\": point.latitude,\n \"longitude\": point.longitude,\n \"elevation\": point.elevation,\n \"time\": point.time\n }.get(attribute)\n for track in gpx.tracks\n for segment in track.segments\n for point in segment.points\n ]\n\n print(f\"Extracted {attribute} data.\")\n\n # Check if specified attribute is in\n # GPX extensions (cadence/distance/altitude\n # /energy/speed/verticalSpeed)\n elif attribute in secondary_attributes:\n\n # Define pattern for attribute to match on\n pattern = re.compile(f\"^.*{attribute}.*$\")\n\n # Create list of values for attribute\n data = [\n float(extension.text)\n for track in gpx.tracks\n for segment in track.segments\n for point in segment.points\n for extension in point.extensions\n if pattern.match(extension.tag)\n ]\n\n print(f\"Extracted {attribute} data.\")\n\n else:\n data = []\n print(\"Invalid attribute. Must be one of the following: \"\n \"latitude, longitude, elevation, time, cadence \"\n \"distance, altitude, energy, speed, verticalSpeed.\")\n\n # List of attribute values\n return data\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from a10sdk.common.A10BaseClass import A10BaseClass
class MacAgeTime(A10BaseClass):
"""Class Description::
Set Aging period for all MAC Interfaces.
Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param aging_time: {"description": "Set aging period in seconds for all MAC interfaces (default 300 seconds)", "format": "number", "default": 300, "optional": true, "maximum": 600, "minimum": 10, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/mac-age-time`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "mac-age-time"
self.a10_url="/axapi/v3/mac-age-time"
self.DeviceProxy = ""
self.aging_time = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
normal
|
{
"blob_id": "f08677430e54822abbce61d0cac5a6fea14d3872",
"index": 6078,
"step-1": "<mask token>\n\n\nclass MacAgeTime(A10BaseClass):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MacAgeTime(A10BaseClass):\n <mask token>\n\n def __init__(self, **kwargs):\n self.ERROR_MSG = ''\n self.required = []\n self.b_key = 'mac-age-time'\n self.a10_url = '/axapi/v3/mac-age-time'\n self.DeviceProxy = ''\n self.aging_time = ''\n for keys, value in kwargs.items():\n setattr(self, keys, value)\n",
"step-3": "<mask token>\n\n\nclass MacAgeTime(A10BaseClass):\n \"\"\"Class Description::\n Set Aging period for all MAC Interfaces.\n\n Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.\n This class is the `\"PARENT\"` class for this module.`\n\n :param aging_time: {\"description\": \"Set aging period in seconds for all MAC interfaces (default 300 seconds)\", \"format\": \"number\", \"default\": 300, \"optional\": true, \"maximum\": 600, \"minimum\": 10, \"type\": \"number\"}\n :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`\n\n \n\n URL for this object::\n `https://<Hostname|Ip address>//axapi/v3/mac-age-time`.\n\n \n\n \n \"\"\"\n\n def __init__(self, **kwargs):\n self.ERROR_MSG = ''\n self.required = []\n self.b_key = 'mac-age-time'\n self.a10_url = '/axapi/v3/mac-age-time'\n self.DeviceProxy = ''\n self.aging_time = ''\n for keys, value in kwargs.items():\n setattr(self, keys, value)\n",
"step-4": "from a10sdk.common.A10BaseClass import A10BaseClass\n\n\nclass MacAgeTime(A10BaseClass):\n \"\"\"Class Description::\n Set Aging period for all MAC Interfaces.\n\n Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.\n This class is the `\"PARENT\"` class for this module.`\n\n :param aging_time: {\"description\": \"Set aging period in seconds for all MAC interfaces (default 300 seconds)\", \"format\": \"number\", \"default\": 300, \"optional\": true, \"maximum\": 600, \"minimum\": 10, \"type\": \"number\"}\n :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`\n\n \n\n URL for this object::\n `https://<Hostname|Ip address>//axapi/v3/mac-age-time`.\n\n \n\n \n \"\"\"\n\n def __init__(self, **kwargs):\n self.ERROR_MSG = ''\n self.required = []\n self.b_key = 'mac-age-time'\n self.a10_url = '/axapi/v3/mac-age-time'\n self.DeviceProxy = ''\n self.aging_time = ''\n for keys, value in kwargs.items():\n setattr(self, keys, value)\n",
"step-5": "from a10sdk.common.A10BaseClass import A10BaseClass\n\n\nclass MacAgeTime(A10BaseClass):\n \n \"\"\"Class Description::\n Set Aging period for all MAC Interfaces.\n\n Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.\n This class is the `\"PARENT\"` class for this module.`\n\n :param aging_time: {\"description\": \"Set aging period in seconds for all MAC interfaces (default 300 seconds)\", \"format\": \"number\", \"default\": 300, \"optional\": true, \"maximum\": 600, \"minimum\": 10, \"type\": \"number\"}\n :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`\n\n \n\n URL for this object::\n `https://<Hostname|Ip address>//axapi/v3/mac-age-time`.\n\n \n\n \n \"\"\"\n def __init__(self, **kwargs):\n self.ERROR_MSG = \"\"\n self.required=[]\n self.b_key = \"mac-age-time\"\n self.a10_url=\"/axapi/v3/mac-age-time\"\n self.DeviceProxy = \"\"\n self.aging_time = \"\"\n\n for keys, value in kwargs.items():\n setattr(self,keys, value)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@client.command()
async def wasitfunny():
possible_responses = [
'Per the judgement from the committee of comedy, we have decided that the joke was indeed funny'
,
'Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin'
]
await client.say(random.choice(possible_responses))
@client.command()
async def isitfunny(funny_subject):
responses = ["Nah that wasn't really funny",
'There is no funny present', 'YOU FORGOT THE FUNNY',
'There is no comedy present here', 'hahaaaaa', 'Funnt',
"Hey man that's pretty funny thanks for sharing",
'jajajajajajajajajaja']
await client.say('regarding ' + str(funny_subject) + ', ' + random.
choice(responses))
@client.command()
async def isitironic(irony_subjects):
irony_responses = ['one irony point', "that's pretty ironic man",
'ironic', 'no irony present', 'minus irony point',
'where is the irony? I was told there would be irony?']
await client.say(random.choice(irony_responses))
<|reserved_special_token_0|>
@client.command()
async def alexfc(anum):
global afc
afc += int(anum)
await client.say('Alex has ' + str(afc) + ' funny coins')
@client.command()
async def muhfc(mnum):
global mfc
mfc += int(mnum)
await client.say('Muhammad has ' + str(mfc) + ' funny coins')
@client.command()
async def chrisfc(cnum):
global cfc
cfc += int(cnum)
await client.say('Chris has ' + str(cfc) + ' funny coins')
@client.command()
async def antfc(anthnum):
global anfc
anfc += int(anthnum)
await client.say('Anthony has ' + str(anfc) + ' funny coins')
client.run(str(os.environ.get(TOKEN)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BOT_PREFIX = '!'
client = discord.Client()
client = Bot(command_prefix=BOT_PREFIX)
@client.command()
async def wasitfunny():
possible_responses = [
'Per the judgement from the committee of comedy, we have decided that the joke was indeed funny'
,
'Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin'
]
await client.say(random.choice(possible_responses))
@client.command()
async def isitfunny(funny_subject):
responses = ["Nah that wasn't really funny",
'There is no funny present', 'YOU FORGOT THE FUNNY',
'There is no comedy present here', 'hahaaaaa', 'Funnt',
"Hey man that's pretty funny thanks for sharing",
'jajajajajajajajajaja']
await client.say('regarding ' + str(funny_subject) + ', ' + random.
choice(responses))
@client.command()
async def isitironic(irony_subjects):
irony_responses = ['one irony point', "that's pretty ironic man",
'ironic', 'no irony present', 'minus irony point',
'where is the irony? I was told there would be irony?']
await client.say(random.choice(irony_responses))
afc = 0
mfc = 0
cfc = 0
anfc = 0
@client.command()
async def alexfc(anum):
global afc
afc += int(anum)
await client.say('Alex has ' + str(afc) + ' funny coins')
@client.command()
async def muhfc(mnum):
global mfc
mfc += int(mnum)
await client.say('Muhammad has ' + str(mfc) + ' funny coins')
@client.command()
async def chrisfc(cnum):
global cfc
cfc += int(cnum)
await client.say('Chris has ' + str(cfc) + ' funny coins')
@client.command()
async def antfc(anthnum):
global anfc
anfc += int(anthnum)
await client.say('Anthony has ' + str(anfc) + ' funny coins')
client.run(str(os.environ.get(TOKEN)))
<|reserved_special_token_1|>
import discord
from discord.ext import commands
from discord.ext.commands import Bot
import asyncio
import random
import requests
import os
BOT_PREFIX = '!'
client = discord.Client()
client = Bot(command_prefix=BOT_PREFIX)
@client.command()
async def wasitfunny():
possible_responses = [
'Per the judgement from the committee of comedy, we have decided that the joke was indeed funny'
,
'Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin'
]
await client.say(random.choice(possible_responses))
@client.command()
async def isitfunny(funny_subject):
responses = ["Nah that wasn't really funny",
'There is no funny present', 'YOU FORGOT THE FUNNY',
'There is no comedy present here', 'hahaaaaa', 'Funnt',
"Hey man that's pretty funny thanks for sharing",
'jajajajajajajajajaja']
await client.say('regarding ' + str(funny_subject) + ', ' + random.
choice(responses))
@client.command()
async def isitironic(irony_subjects):
irony_responses = ['one irony point', "that's pretty ironic man",
'ironic', 'no irony present', 'minus irony point',
'where is the irony? I was told there would be irony?']
await client.say(random.choice(irony_responses))
afc = 0
mfc = 0
cfc = 0
anfc = 0
@client.command()
async def alexfc(anum):
global afc
afc += int(anum)
await client.say('Alex has ' + str(afc) + ' funny coins')
@client.command()
async def muhfc(mnum):
global mfc
mfc += int(mnum)
await client.say('Muhammad has ' + str(mfc) + ' funny coins')
@client.command()
async def chrisfc(cnum):
global cfc
cfc += int(cnum)
await client.say('Chris has ' + str(cfc) + ' funny coins')
@client.command()
async def antfc(anthnum):
global anfc
anfc += int(anthnum)
await client.say('Anthony has ' + str(anfc) + ' funny coins')
client.run(str(os.environ.get(TOKEN)))
<|reserved_special_token_1|>
import discord
from discord.ext import commands
from discord.ext.commands import Bot
import asyncio
import random
import requests
import os
#Discord Tech Stuff
BOT_PREFIX = ("!")
client = discord.Client()
client = Bot(command_prefix=BOT_PREFIX)
#Functions of the Funny Coin
@client.command()
async def wasitfunny():
possible_responses = [
"Per the judgement from the committee of comedy, we have decided that the joke was indeed funny",
"Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin",
]
await client.say(random.choice(possible_responses))
@client.command()
async def isitfunny(funny_subject):
responses = [
"Nah that wasn't really funny",
"There is no funny present",
"YOU FORGOT THE FUNNY",
"There is no comedy present here",
"hahaaaaa",
"Funnt",
"Hey man that's pretty funny thanks for sharing",
"jajajajajajajajajaja",
]
await client.say("regarding " + str(funny_subject) + ", " + random.choice(responses))
@client.command()
async def isitironic(irony_subjects):
irony_responses = [
"one irony point",
"that's pretty ironic man",
"ironic",
"no irony present",
"minus irony point",
"where is the irony? I was told there would be irony?",
]
await client.say(random.choice(irony_responses))
#Alex, Me, Chris, Anthony Coins, Want to add system that has coins for everyone and you can make a like profile for coins
afc = 0
mfc = 0
cfc = 0
anfc = 0
@client.command()
async def alexfc(anum):
global afc
afc += int(anum)
await client.say("Alex has " + str(afc) + " funny coins")
@client.command()
async def muhfc(mnum):
global mfc
mfc += int(mnum)
await client.say("Muhammad has " + str(mfc) + " funny coins")
@client.command()
async def chrisfc(cnum):
global cfc
cfc += int(cnum)
await client.say("Chris has " + str(cfc) + " funny coins")
@client.command()
async def antfc(anthnum):
global anfc
anfc += int(anthnum)
await client.say("Anthony has " + str(anfc) + " funny coins")
client.run(str(os.environ.get(TOKEN)))
|
flexible
|
{
"blob_id": "f047afeb6462ab01a8fea1f3c8693608335eb960",
"index": 3488,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@client.command()\nasync def wasitfunny():\n possible_responses = [\n 'Per the judgement from the committee of comedy, we have decided that the joke was indeed funny'\n ,\n 'Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin'\n ]\n await client.say(random.choice(possible_responses))\n\n\n@client.command()\nasync def isitfunny(funny_subject):\n responses = [\"Nah that wasn't really funny\",\n 'There is no funny present', 'YOU FORGOT THE FUNNY',\n 'There is no comedy present here', 'hahaaaaa', 'Funnt',\n \"Hey man that's pretty funny thanks for sharing\",\n 'jajajajajajajajajaja']\n await client.say('regarding ' + str(funny_subject) + ', ' + random.\n choice(responses))\n\n\n@client.command()\nasync def isitironic(irony_subjects):\n irony_responses = ['one irony point', \"that's pretty ironic man\",\n 'ironic', 'no irony present', 'minus irony point',\n 'where is the irony? I was told there would be irony?']\n await client.say(random.choice(irony_responses))\n\n\n<mask token>\n\n\n@client.command()\nasync def alexfc(anum):\n global afc\n afc += int(anum)\n await client.say('Alex has ' + str(afc) + ' funny coins')\n\n\n@client.command()\nasync def muhfc(mnum):\n global mfc\n mfc += int(mnum)\n await client.say('Muhammad has ' + str(mfc) + ' funny coins')\n\n\n@client.command()\nasync def chrisfc(cnum):\n global cfc\n cfc += int(cnum)\n await client.say('Chris has ' + str(cfc) + ' funny coins')\n\n\n@client.command()\nasync def antfc(anthnum):\n global anfc\n anfc += int(anthnum)\n await client.say('Anthony has ' + str(anfc) + ' funny coins')\n\n\nclient.run(str(os.environ.get(TOKEN)))\n",
"step-3": "<mask token>\nBOT_PREFIX = '!'\nclient = discord.Client()\nclient = Bot(command_prefix=BOT_PREFIX)\n\n\n@client.command()\nasync def wasitfunny():\n possible_responses = [\n 'Per the judgement from the committee of comedy, we have decided that the joke was indeed funny'\n ,\n 'Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin'\n ]\n await client.say(random.choice(possible_responses))\n\n\n@client.command()\nasync def isitfunny(funny_subject):\n responses = [\"Nah that wasn't really funny\",\n 'There is no funny present', 'YOU FORGOT THE FUNNY',\n 'There is no comedy present here', 'hahaaaaa', 'Funnt',\n \"Hey man that's pretty funny thanks for sharing\",\n 'jajajajajajajajajaja']\n await client.say('regarding ' + str(funny_subject) + ', ' + random.\n choice(responses))\n\n\n@client.command()\nasync def isitironic(irony_subjects):\n irony_responses = ['one irony point', \"that's pretty ironic man\",\n 'ironic', 'no irony present', 'minus irony point',\n 'where is the irony? I was told there would be irony?']\n await client.say(random.choice(irony_responses))\n\n\nafc = 0\nmfc = 0\ncfc = 0\nanfc = 0\n\n\n@client.command()\nasync def alexfc(anum):\n global afc\n afc += int(anum)\n await client.say('Alex has ' + str(afc) + ' funny coins')\n\n\n@client.command()\nasync def muhfc(mnum):\n global mfc\n mfc += int(mnum)\n await client.say('Muhammad has ' + str(mfc) + ' funny coins')\n\n\n@client.command()\nasync def chrisfc(cnum):\n global cfc\n cfc += int(cnum)\n await client.say('Chris has ' + str(cfc) + ' funny coins')\n\n\n@client.command()\nasync def antfc(anthnum):\n global anfc\n anfc += int(anthnum)\n await client.say('Anthony has ' + str(anfc) + ' funny coins')\n\n\nclient.run(str(os.environ.get(TOKEN)))\n",
"step-4": "import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot\nimport asyncio\nimport random\nimport requests\nimport os\nBOT_PREFIX = '!'\nclient = discord.Client()\nclient = Bot(command_prefix=BOT_PREFIX)\n\n\n@client.command()\nasync def wasitfunny():\n possible_responses = [\n 'Per the judgement from the committee of comedy, we have decided that the joke was indeed funny'\n ,\n 'Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin'\n ]\n await client.say(random.choice(possible_responses))\n\n\n@client.command()\nasync def isitfunny(funny_subject):\n responses = [\"Nah that wasn't really funny\",\n 'There is no funny present', 'YOU FORGOT THE FUNNY',\n 'There is no comedy present here', 'hahaaaaa', 'Funnt',\n \"Hey man that's pretty funny thanks for sharing\",\n 'jajajajajajajajajaja']\n await client.say('regarding ' + str(funny_subject) + ', ' + random.\n choice(responses))\n\n\n@client.command()\nasync def isitironic(irony_subjects):\n irony_responses = ['one irony point', \"that's pretty ironic man\",\n 'ironic', 'no irony present', 'minus irony point',\n 'where is the irony? I was told there would be irony?']\n await client.say(random.choice(irony_responses))\n\n\nafc = 0\nmfc = 0\ncfc = 0\nanfc = 0\n\n\n@client.command()\nasync def alexfc(anum):\n global afc\n afc += int(anum)\n await client.say('Alex has ' + str(afc) + ' funny coins')\n\n\n@client.command()\nasync def muhfc(mnum):\n global mfc\n mfc += int(mnum)\n await client.say('Muhammad has ' + str(mfc) + ' funny coins')\n\n\n@client.command()\nasync def chrisfc(cnum):\n global cfc\n cfc += int(cnum)\n await client.say('Chris has ' + str(cfc) + ' funny coins')\n\n\n@client.command()\nasync def antfc(anthnum):\n global anfc\n anfc += int(anthnum)\n await client.say('Anthony has ' + str(anfc) + ' funny coins')\n\n\nclient.run(str(os.environ.get(TOKEN)))\n",
"step-5": "import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot\nimport asyncio\nimport random\nimport requests\nimport os\n\n\n#Discord Tech Stuff\nBOT_PREFIX = (\"!\")\n\n\nclient = discord.Client()\nclient = Bot(command_prefix=BOT_PREFIX)\n\n#Functions of the Funny Coin\n@client.command()\nasync def wasitfunny():\n possible_responses = [\n \"Per the judgement from the committee of comedy, we have decided that the joke was indeed funny\",\n \"Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin\",\n ]\n await client.say(random.choice(possible_responses))\n\n@client.command()\nasync def isitfunny(funny_subject):\n responses = [\n \"Nah that wasn't really funny\",\n \"There is no funny present\",\n \"YOU FORGOT THE FUNNY\",\n \"There is no comedy present here\",\n \"hahaaaaa\",\n \"Funnt\",\n \"Hey man that's pretty funny thanks for sharing\",\n \"jajajajajajajajajaja\",\n ]\n await client.say(\"regarding \" + str(funny_subject) + \", \" + random.choice(responses))\n\n\n@client.command()\nasync def isitironic(irony_subjects):\n irony_responses = [\n \"one irony point\",\n \"that's pretty ironic man\",\n \"ironic\",\n \"no irony present\",\n \"minus irony point\",\n \"where is the irony? I was told there would be irony?\",\n ]\n await client.say(random.choice(irony_responses))\n\n#Alex, Me, Chris, Anthony Coins, Want to add system that has coins for everyone and you can make a like profile for coins\nafc = 0\nmfc = 0\ncfc = 0\nanfc = 0\n\n\n@client.command()\nasync def alexfc(anum):\n global afc\n afc += int(anum)\n await client.say(\"Alex has \" + str(afc) + \" funny coins\")\n\n@client.command()\nasync def muhfc(mnum):\n global mfc\n mfc += int(mnum)\n await client.say(\"Muhammad has \" + str(mfc) + \" funny coins\")\n\n@client.command()\nasync def chrisfc(cnum):\n global cfc\n cfc += int(cnum)\n await client.say(\"Chris has \" + str(cfc) + \" funny coins\")\n\n@client.command()\nasync def antfc(anthnum):\n global anfc\n anfc += int(anthnum)\n await client.say(\"Anthony has \" + str(anfc) + \" funny coins\")\n\n \n\n\nclient.run(str(os.environ.get(TOKEN)))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while len(sc_lst) < 10:
try:
sc = int(input('请第%d位评委打分:' % i))
if sc > 0 and sc < 101:
sc_lst.append(sc)
i += 1
else:
print('超出范围,输入无效')
except:
print('请输入1-100以内的数字')
<|reserved_special_token_0|>
sc_lst.remove(max_sc)
sc_lst.remove(min_sc)
<|reserved_special_token_0|>
print('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))
print('end')
<|reserved_special_token_1|>
sc_lst = []
i = 1
while len(sc_lst) < 10:
try:
sc = int(input('请第%d位评委打分:' % i))
if sc > 0 and sc < 101:
sc_lst.append(sc)
i += 1
else:
print('超出范围,输入无效')
except:
print('请输入1-100以内的数字')
max_sc = max(sc_lst)
min_sc = min(sc_lst)
sc_lst.remove(max_sc)
sc_lst.remove(min_sc)
ave_sc = sum(sc_lst) / len(sc_lst)
print('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))
print('end')
<|reserved_special_token_1|>
# 赛场统分
# 【问题】在编程竞赛中,有10个评委为参赛的选手打分,分数为0 ~ 100分。
# 选手最后得分为:去掉一个最高分和一个最低分后其余8个分数的平均值。请编写一个程序实现。
sc_lst = []
i = 1
while len(sc_lst) < 10:
try:
sc = int(input('请第%d位评委打分:' % i))
if sc > 0 and sc < 101:
sc_lst.append(sc)
i += 1
else:
print('超出范围,输入无效')
except:
print('请输入1-100以内的数字')
max_sc = max(sc_lst)
min_sc = min(sc_lst)
sc_lst.remove(max_sc)
sc_lst.remove(min_sc)
ave_sc = sum(sc_lst) / len(sc_lst)
print('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))
print('end')
|
flexible
|
{
"blob_id": "a17abd3947a946daf2c453c120f2e79d2ba60778",
"index": 901,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile len(sc_lst) < 10:\n try:\n sc = int(input('请第%d位评委打分:' % i))\n if sc > 0 and sc < 101:\n sc_lst.append(sc)\n i += 1\n else:\n print('超出范围,输入无效')\n except:\n print('请输入1-100以内的数字')\n<mask token>\nsc_lst.remove(max_sc)\nsc_lst.remove(min_sc)\n<mask token>\nprint('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))\nprint('end')\n",
"step-3": "sc_lst = []\ni = 1\nwhile len(sc_lst) < 10:\n try:\n sc = int(input('请第%d位评委打分:' % i))\n if sc > 0 and sc < 101:\n sc_lst.append(sc)\n i += 1\n else:\n print('超出范围,输入无效')\n except:\n print('请输入1-100以内的数字')\nmax_sc = max(sc_lst)\nmin_sc = min(sc_lst)\nsc_lst.remove(max_sc)\nsc_lst.remove(min_sc)\nave_sc = sum(sc_lst) / len(sc_lst)\nprint('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))\nprint('end')\n",
"step-4": "# 赛场统分\n# 【问题】在编程竞赛中,有10个评委为参赛的选手打分,分数为0 ~ 100分。\n# 选手最后得分为:去掉一个最高分和一个最低分后其余8个分数的平均值。请编写一个程序实现。\n\nsc_lst = []\ni = 1\nwhile len(sc_lst) < 10:\n try:\n sc = int(input('请第%d位评委打分:' % i))\n if sc > 0 and sc < 101:\n sc_lst.append(sc)\n i += 1\n else:\n print('超出范围,输入无效')\n except:\n print('请输入1-100以内的数字')\n\nmax_sc = max(sc_lst)\nmin_sc = min(sc_lst)\nsc_lst.remove(max_sc)\nsc_lst.remove(min_sc)\nave_sc = sum(sc_lst) / len(sc_lst)\nprint('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))\nprint('end')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Tutorial(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Tutorial(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.web_title
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Tutorial(models.Model):
web_title = models.CharField(max_length=200)
web_content = models.TextField()
web_published = models.DateTimeField('date published')
def __str__(self):
return self.web_title
<|reserved_special_token_1|>
from django.db import models
class Tutorial(models.Model):
web_title = models.CharField(max_length=200)
web_content = models.TextField()
web_published = models.DateTimeField('date published')
def __str__(self):
return self.web_title
<|reserved_special_token_1|>
from django.db import models
# Create your models here.
class Tutorial(models.Model):
web_title = models.CharField(max_length=200)
web_content = models.TextField()
web_published = models.DateTimeField("date published")
def __str__(self):
return self.web_title
|
flexible
|
{
"blob_id": "32499688db51f701173ec0ea212c483bf902c109",
"index": 3048,
"step-1": "<mask token>\n\n\nclass Tutorial(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Tutorial(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.web_title\n",
"step-3": "<mask token>\n\n\nclass Tutorial(models.Model):\n web_title = models.CharField(max_length=200)\n web_content = models.TextField()\n web_published = models.DateTimeField('date published')\n\n def __str__(self):\n return self.web_title\n",
"step-4": "from django.db import models\n\n\nclass Tutorial(models.Model):\n web_title = models.CharField(max_length=200)\n web_content = models.TextField()\n web_published = models.DateTimeField('date published')\n\n def __str__(self):\n return self.web_title\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Tutorial(models.Model):\n\tweb_title = models.CharField(max_length=200)\n\tweb_content = models.TextField()\n\tweb_published = models.DateTimeField(\"date published\")\n\n\tdef __str__(self):\n\t\treturn self.web_title\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'meet.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(607, 723)
self.start = QtWidgets.QLabel(Dialog)
self.start.setGeometry(QtCore.QRect(10, 70, 59, 24))
self.start.setObjectName("start")
self.startDate = QtWidgets.QDateEdit(Dialog)
self.startDate.setGeometry(QtCore.QRect(70, 70, 110, 24))
self.startDate.setDate(QtCore.QDate(2017, 1, 1))
self.startDate.setObjectName("startDate")
self.end = QtWidgets.QLabel(Dialog)
self.end.setGeometry(QtCore.QRect(190, 70, 81, 24))
self.end.setObjectName("end")
self.endDate = QtWidgets.QDateEdit(Dialog)
self.endDate.setGeometry(QtCore.QRect(270, 70, 110, 24))
self.endDate.setDate(QtCore.QDate(2017, 1, 1))
self.endDate.setObjectName("endDate")
self.name = QtWidgets.QLabel(Dialog)
self.name.setGeometry(QtCore.QRect(10, 10, 59, 24))
self.name.setObjectName("name")
self.nameEdit = QtWidgets.QLineEdit(Dialog)
self.nameEdit.setGeometry(QtCore.QRect(80, 10, 511, 24))
self.nameEdit.setObjectName("nameEdit")
self.athletes = QtWidgets.QLabel(Dialog)
self.athletes.setGeometry(QtCore.QRect(10, 130, 141, 16))
self.athletes.setObjectName("athletes")
self.addButton = QtWidgets.QPushButton(Dialog)
self.addButton.setGeometry(QtCore.QRect(285, 220, 31, 24))
self.addButton.setObjectName("addButton")
self.removeButton = QtWidgets.QPushButton(Dialog)
self.removeButton.setGeometry(QtCore.QRect(285, 260, 31, 24))
self.removeButton.setObjectName("removeButton")
self.members = QtWidgets.QLabel(Dialog)
self.members.setGeometry(QtCore.QRect(325, 130, 131, 16))
self.members.setObjectName("members")
self.meetCount = QtWidgets.QLabel(Dialog)
self.meetCount.setGeometry(QtCore.QRect(390, 70, 121, 24))
self.meetCount.setObjectName("meetCount")
self.meetCountEdit = QtWidgets.QLineEdit(Dialog)
self.meetCountEdit.setGeometry(QtCore.QRect(510, 70, 81, 24))
self.meetCountEdit.setObjectName("meetCountEdit")
self.sortitionButton = QtWidgets.QPushButton(Dialog)
self.sortitionButton.setGeometry(QtCore.QRect(490, 360, 100, 24))
self.sortitionButton.setObjectName("sortitionButton")
self.cancel = QtWidgets.QPushButton(Dialog)
self.cancel.setGeometry(QtCore.QRect(492, 690, 100, 24))
self.cancel.setObjectName("cancel")
self.athletesList = QtWidgets.QListWidget(Dialog)
self.athletesList.setGeometry(QtCore.QRect(10, 150, 266, 201))
self.athletesList.setObjectName("athletesList")
self.membersList = QtWidgets.QListWidget(Dialog)
self.membersList.setGeometry(QtCore.QRect(325, 150, 266, 201))
self.membersList.setObjectName("membersList")
self.city = QtWidgets.QLabel(Dialog)
self.city.setGeometry(QtCore.QRect(10, 40, 131, 24))
self.city.setObjectName("city")
self.cityEdit = QtWidgets.QLineEdit(Dialog)
self.cityEdit.setGeometry(QtCore.QRect(140, 40, 451, 24))
self.cityEdit.setObjectName("cityEdit")
self.main_referee = QtWidgets.QLabel(Dialog)
self.main_referee.setGeometry(QtCore.QRect(10, 400, 101, 24))
self.main_referee.setObjectName("main_referee")
self.main_clerk = QtWidgets.QLabel(Dialog)
self.main_clerk.setGeometry(QtCore.QRect(10, 430, 131, 24))
self.main_clerk.setObjectName("main_clerk")
self.mainrefCBox = QtWidgets.QComboBox(Dialog)
self.mainrefCBox.setGeometry(QtCore.QRect(120, 400, 471, 24))
self.mainrefCBox.setObjectName("mainrefCBox")
self.mainclerkCBox = QtWidgets.QComboBox(Dialog)
self.mainclerkCBox.setGeometry(QtCore.QRect(140, 430, 451, 24))
self.mainclerkCBox.setObjectName("mainclerkCBox")
self.refList = QtWidgets.QListWidget(Dialog)
self.refList.setGeometry(QtCore.QRect(10, 480, 266, 201))
self.refList.setObjectName("refList")
self.refereeList = QtWidgets.QLabel(Dialog)
self.refereeList.setGeometry(QtCore.QRect(10, 460, 91, 16))
self.refereeList.setObjectName("refereeList")
self.refColList = QtWidgets.QListWidget(Dialog)
self.refColList.setGeometry(QtCore.QRect(325, 480, 266, 201))
self.refColList.setObjectName("refColList")
self.refereeCol = QtWidgets.QLabel(Dialog)
self.refereeCol.setGeometry(QtCore.QRect(325, 460, 141, 16))
self.refereeCol.setObjectName("refereeCol")
self.raddButton = QtWidgets.QPushButton(Dialog)
self.raddButton.setGeometry(QtCore.QRect(285, 560, 31, 24))
self.raddButton.setObjectName("raddButton")
self.rremoveButton = QtWidgets.QPushButton(Dialog)
self.rremoveButton.setGeometry(QtCore.QRect(285, 600, 31, 24))
self.rremoveButton.setObjectName("rremoveButton")
self.wsortitionButton = QtWidgets.QPushButton(Dialog)
self.wsortitionButton.setEnabled(True)
self.wsortitionButton.setGeometry(QtCore.QRect(360, 690, 121, 24))
self.wsortitionButton.setAutoDefault(True)
self.wsortitionButton.setDefault(False)
self.wsortitionButton.setFlat(False)
self.wsortitionButton.setObjectName("wsortitionButton")
self.divrings = QtWidgets.QCheckBox(Dialog)
self.divrings.setGeometry(QtCore.QRect(390, 100, 201, 24))
self.divrings.setObjectName("divrings")
self.weightcatCBox = QtWidgets.QComboBox(Dialog)
self.weightcatCBox.setGeometry(QtCore.QRect(150, 100, 231, 24))
self.weightcatCBox.setObjectName("weightcatCBox")
self.weigthcat = QtWidgets.QLabel(Dialog)
self.weigthcat.setGeometry(QtCore.QRect(10, 100, 131, 24))
self.weigthcat.setObjectName("weigthcat")
self.round = QtWidgets.QLabel(Dialog)
self.round.setGeometry(QtCore.QRect(220, 130, 61, 16))
self.round.setObjectName("round")
self.stage = QtWidgets.QLabel(Dialog)
self.stage.setGeometry(QtCore.QRect(490, 130, 101, 16))
self.stage.setObjectName("stage")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.nameEdit, self.cityEdit)
Dialog.setTabOrder(self.cityEdit, self.startDate)
Dialog.setTabOrder(self.startDate, self.endDate)
Dialog.setTabOrder(self.endDate, self.meetCountEdit)
Dialog.setTabOrder(self.meetCountEdit, self.weightcatCBox)
Dialog.setTabOrder(self.weightcatCBox, self.divrings)
Dialog.setTabOrder(self.divrings, self.athletesList)
Dialog.setTabOrder(self.athletesList, self.addButton)
Dialog.setTabOrder(self.addButton, self.removeButton)
Dialog.setTabOrder(self.removeButton, self.membersList)
Dialog.setTabOrder(self.membersList, self.sortitionButton)
Dialog.setTabOrder(self.sortitionButton, self.mainrefCBox)
Dialog.setTabOrder(self.mainrefCBox, self.mainclerkCBox)
Dialog.setTabOrder(self.mainclerkCBox, self.refList)
Dialog.setTabOrder(self.refList, self.raddButton)
Dialog.setTabOrder(self.raddButton, self.rremoveButton)
Dialog.setTabOrder(self.rremoveButton, self.refColList)
Dialog.setTabOrder(self.refColList, self.wsortitionButton)
Dialog.setTabOrder(self.wsortitionButton, self.cancel)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Создание соревнования"))
self.start.setText(_translate("Dialog", "Начало"))
self.startDate.setDisplayFormat(_translate("Dialog", "dd.MM.yyyy"))
self.end.setText(_translate("Dialog", "Окончание"))
self.endDate.setDisplayFormat(_translate("Dialog", "dd.MM.yyyy"))
self.name.setText(_translate("Dialog", "Название"))
self.athletes.setText(_translate("Dialog", "Список спортсменов"))
self.addButton.setText(_translate("Dialog", ">>"))
self.removeButton.setText(_translate("Dialog", "<<"))
self.members.setText(_translate("Dialog", "Список участников"))
self.meetCount.setText(_translate("Dialog", "Число боев в день"))
self.sortitionButton.setText(_translate("Dialog", "Жеребьевка"))
self.cancel.setText(_translate("Dialog", "Отмена"))
self.city.setText(_translate("Dialog", "Место проведения"))
self.main_referee.setText(_translate("Dialog", "Главный судья"))
self.main_clerk.setText(_translate("Dialog", "Главный секретарь"))
self.refereeList.setText(_translate("Dialog", "Список судей"))
self.refereeCol.setText(_translate("Dialog", "Судейская коллегия"))
self.raddButton.setText(_translate("Dialog", ">>"))
self.rremoveButton.setText(_translate("Dialog", "<<"))
self.wsortitionButton.setText(_translate("Dialog", "Без жеребьевки"))
self.divrings.setText(_translate("Dialog", "Разбивать по рингам"))
self.weigthcat.setText(_translate("Dialog", "Весовая категория"))
self.round.setText(_translate("Dialog", "раунд"))
self.stage.setText(_translate("Dialog", "стадия"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "c076aed1bfff51f8edf5ab4ef029b7fa7ca2422c",
"index": 9479,
"step-1": "<mask token>\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName('Dialog')\n Dialog.resize(607, 723)\n self.start = QtWidgets.QLabel(Dialog)\n self.start.setGeometry(QtCore.QRect(10, 70, 59, 24))\n self.start.setObjectName('start')\n self.startDate = QtWidgets.QDateEdit(Dialog)\n self.startDate.setGeometry(QtCore.QRect(70, 70, 110, 24))\n self.startDate.setDate(QtCore.QDate(2017, 1, 1))\n self.startDate.setObjectName('startDate')\n self.end = QtWidgets.QLabel(Dialog)\n self.end.setGeometry(QtCore.QRect(190, 70, 81, 24))\n self.end.setObjectName('end')\n self.endDate = QtWidgets.QDateEdit(Dialog)\n self.endDate.setGeometry(QtCore.QRect(270, 70, 110, 24))\n self.endDate.setDate(QtCore.QDate(2017, 1, 1))\n self.endDate.setObjectName('endDate')\n self.name = QtWidgets.QLabel(Dialog)\n self.name.setGeometry(QtCore.QRect(10, 10, 59, 24))\n self.name.setObjectName('name')\n self.nameEdit = QtWidgets.QLineEdit(Dialog)\n self.nameEdit.setGeometry(QtCore.QRect(80, 10, 511, 24))\n self.nameEdit.setObjectName('nameEdit')\n self.athletes = QtWidgets.QLabel(Dialog)\n self.athletes.setGeometry(QtCore.QRect(10, 130, 141, 16))\n self.athletes.setObjectName('athletes')\n self.addButton = QtWidgets.QPushButton(Dialog)\n self.addButton.setGeometry(QtCore.QRect(285, 220, 31, 24))\n self.addButton.setObjectName('addButton')\n self.removeButton = QtWidgets.QPushButton(Dialog)\n self.removeButton.setGeometry(QtCore.QRect(285, 260, 31, 24))\n self.removeButton.setObjectName('removeButton')\n self.members = QtWidgets.QLabel(Dialog)\n self.members.setGeometry(QtCore.QRect(325, 130, 131, 16))\n self.members.setObjectName('members')\n self.meetCount = QtWidgets.QLabel(Dialog)\n self.meetCount.setGeometry(QtCore.QRect(390, 70, 121, 24))\n self.meetCount.setObjectName('meetCount')\n self.meetCountEdit = QtWidgets.QLineEdit(Dialog)\n self.meetCountEdit.setGeometry(QtCore.QRect(510, 70, 81, 24))\n self.meetCountEdit.setObjectName('meetCountEdit')\n self.sortitionButton = QtWidgets.QPushButton(Dialog)\n self.sortitionButton.setGeometry(QtCore.QRect(490, 360, 100, 24))\n self.sortitionButton.setObjectName('sortitionButton')\n self.cancel = QtWidgets.QPushButton(Dialog)\n self.cancel.setGeometry(QtCore.QRect(492, 690, 100, 24))\n self.cancel.setObjectName('cancel')\n self.athletesList = QtWidgets.QListWidget(Dialog)\n self.athletesList.setGeometry(QtCore.QRect(10, 150, 266, 201))\n self.athletesList.setObjectName('athletesList')\n self.membersList = QtWidgets.QListWidget(Dialog)\n self.membersList.setGeometry(QtCore.QRect(325, 150, 266, 201))\n self.membersList.setObjectName('membersList')\n self.city = QtWidgets.QLabel(Dialog)\n self.city.setGeometry(QtCore.QRect(10, 40, 131, 24))\n self.city.setObjectName('city')\n self.cityEdit = QtWidgets.QLineEdit(Dialog)\n self.cityEdit.setGeometry(QtCore.QRect(140, 40, 451, 24))\n self.cityEdit.setObjectName('cityEdit')\n self.main_referee = QtWidgets.QLabel(Dialog)\n self.main_referee.setGeometry(QtCore.QRect(10, 400, 101, 24))\n self.main_referee.setObjectName('main_referee')\n self.main_clerk = QtWidgets.QLabel(Dialog)\n self.main_clerk.setGeometry(QtCore.QRect(10, 430, 131, 24))\n self.main_clerk.setObjectName('main_clerk')\n self.mainrefCBox = QtWidgets.QComboBox(Dialog)\n self.mainrefCBox.setGeometry(QtCore.QRect(120, 400, 471, 24))\n self.mainrefCBox.setObjectName('mainrefCBox')\n self.mainclerkCBox = QtWidgets.QComboBox(Dialog)\n self.mainclerkCBox.setGeometry(QtCore.QRect(140, 430, 451, 24))\n self.mainclerkCBox.setObjectName('mainclerkCBox')\n self.refList = QtWidgets.QListWidget(Dialog)\n self.refList.setGeometry(QtCore.QRect(10, 480, 266, 201))\n self.refList.setObjectName('refList')\n self.refereeList = QtWidgets.QLabel(Dialog)\n self.refereeList.setGeometry(QtCore.QRect(10, 460, 91, 16))\n self.refereeList.setObjectName('refereeList')\n self.refColList = QtWidgets.QListWidget(Dialog)\n self.refColList.setGeometry(QtCore.QRect(325, 480, 266, 201))\n self.refColList.setObjectName('refColList')\n self.refereeCol = QtWidgets.QLabel(Dialog)\n self.refereeCol.setGeometry(QtCore.QRect(325, 460, 141, 16))\n self.refereeCol.setObjectName('refereeCol')\n self.raddButton = QtWidgets.QPushButton(Dialog)\n self.raddButton.setGeometry(QtCore.QRect(285, 560, 31, 24))\n self.raddButton.setObjectName('raddButton')\n self.rremoveButton = QtWidgets.QPushButton(Dialog)\n self.rremoveButton.setGeometry(QtCore.QRect(285, 600, 31, 24))\n self.rremoveButton.setObjectName('rremoveButton')\n self.wsortitionButton = QtWidgets.QPushButton(Dialog)\n self.wsortitionButton.setEnabled(True)\n self.wsortitionButton.setGeometry(QtCore.QRect(360, 690, 121, 24))\n self.wsortitionButton.setAutoDefault(True)\n self.wsortitionButton.setDefault(False)\n self.wsortitionButton.setFlat(False)\n self.wsortitionButton.setObjectName('wsortitionButton')\n self.divrings = QtWidgets.QCheckBox(Dialog)\n self.divrings.setGeometry(QtCore.QRect(390, 100, 201, 24))\n self.divrings.setObjectName('divrings')\n self.weightcatCBox = QtWidgets.QComboBox(Dialog)\n self.weightcatCBox.setGeometry(QtCore.QRect(150, 100, 231, 24))\n self.weightcatCBox.setObjectName('weightcatCBox')\n self.weigthcat = QtWidgets.QLabel(Dialog)\n self.weigthcat.setGeometry(QtCore.QRect(10, 100, 131, 24))\n self.weigthcat.setObjectName('weigthcat')\n self.round = QtWidgets.QLabel(Dialog)\n self.round.setGeometry(QtCore.QRect(220, 130, 61, 16))\n self.round.setObjectName('round')\n self.stage = QtWidgets.QLabel(Dialog)\n self.stage.setGeometry(QtCore.QRect(490, 130, 101, 16))\n self.stage.setObjectName('stage')\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n Dialog.setTabOrder(self.nameEdit, self.cityEdit)\n Dialog.setTabOrder(self.cityEdit, self.startDate)\n Dialog.setTabOrder(self.startDate, self.endDate)\n Dialog.setTabOrder(self.endDate, self.meetCountEdit)\n Dialog.setTabOrder(self.meetCountEdit, self.weightcatCBox)\n Dialog.setTabOrder(self.weightcatCBox, self.divrings)\n Dialog.setTabOrder(self.divrings, self.athletesList)\n Dialog.setTabOrder(self.athletesList, self.addButton)\n Dialog.setTabOrder(self.addButton, self.removeButton)\n Dialog.setTabOrder(self.removeButton, self.membersList)\n Dialog.setTabOrder(self.membersList, self.sortitionButton)\n Dialog.setTabOrder(self.sortitionButton, self.mainrefCBox)\n Dialog.setTabOrder(self.mainrefCBox, self.mainclerkCBox)\n Dialog.setTabOrder(self.mainclerkCBox, self.refList)\n Dialog.setTabOrder(self.refList, self.raddButton)\n Dialog.setTabOrder(self.raddButton, self.rremoveButton)\n Dialog.setTabOrder(self.rremoveButton, self.refColList)\n Dialog.setTabOrder(self.refColList, self.wsortitionButton)\n Dialog.setTabOrder(self.wsortitionButton, self.cancel)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName('Dialog')\n Dialog.resize(607, 723)\n self.start = QtWidgets.QLabel(Dialog)\n self.start.setGeometry(QtCore.QRect(10, 70, 59, 24))\n self.start.setObjectName('start')\n self.startDate = QtWidgets.QDateEdit(Dialog)\n self.startDate.setGeometry(QtCore.QRect(70, 70, 110, 24))\n self.startDate.setDate(QtCore.QDate(2017, 1, 1))\n self.startDate.setObjectName('startDate')\n self.end = QtWidgets.QLabel(Dialog)\n self.end.setGeometry(QtCore.QRect(190, 70, 81, 24))\n self.end.setObjectName('end')\n self.endDate = QtWidgets.QDateEdit(Dialog)\n self.endDate.setGeometry(QtCore.QRect(270, 70, 110, 24))\n self.endDate.setDate(QtCore.QDate(2017, 1, 1))\n self.endDate.setObjectName('endDate')\n self.name = QtWidgets.QLabel(Dialog)\n self.name.setGeometry(QtCore.QRect(10, 10, 59, 24))\n self.name.setObjectName('name')\n self.nameEdit = QtWidgets.QLineEdit(Dialog)\n self.nameEdit.setGeometry(QtCore.QRect(80, 10, 511, 24))\n self.nameEdit.setObjectName('nameEdit')\n self.athletes = QtWidgets.QLabel(Dialog)\n self.athletes.setGeometry(QtCore.QRect(10, 130, 141, 16))\n self.athletes.setObjectName('athletes')\n self.addButton = QtWidgets.QPushButton(Dialog)\n self.addButton.setGeometry(QtCore.QRect(285, 220, 31, 24))\n self.addButton.setObjectName('addButton')\n self.removeButton = QtWidgets.QPushButton(Dialog)\n self.removeButton.setGeometry(QtCore.QRect(285, 260, 31, 24))\n self.removeButton.setObjectName('removeButton')\n self.members = QtWidgets.QLabel(Dialog)\n self.members.setGeometry(QtCore.QRect(325, 130, 131, 16))\n self.members.setObjectName('members')\n self.meetCount = QtWidgets.QLabel(Dialog)\n self.meetCount.setGeometry(QtCore.QRect(390, 70, 121, 24))\n self.meetCount.setObjectName('meetCount')\n self.meetCountEdit = QtWidgets.QLineEdit(Dialog)\n self.meetCountEdit.setGeometry(QtCore.QRect(510, 70, 81, 24))\n self.meetCountEdit.setObjectName('meetCountEdit')\n self.sortitionButton = QtWidgets.QPushButton(Dialog)\n self.sortitionButton.setGeometry(QtCore.QRect(490, 360, 100, 24))\n self.sortitionButton.setObjectName('sortitionButton')\n self.cancel = QtWidgets.QPushButton(Dialog)\n self.cancel.setGeometry(QtCore.QRect(492, 690, 100, 24))\n self.cancel.setObjectName('cancel')\n self.athletesList = QtWidgets.QListWidget(Dialog)\n self.athletesList.setGeometry(QtCore.QRect(10, 150, 266, 201))\n self.athletesList.setObjectName('athletesList')\n self.membersList = QtWidgets.QListWidget(Dialog)\n self.membersList.setGeometry(QtCore.QRect(325, 150, 266, 201))\n self.membersList.setObjectName('membersList')\n self.city = QtWidgets.QLabel(Dialog)\n self.city.setGeometry(QtCore.QRect(10, 40, 131, 24))\n self.city.setObjectName('city')\n self.cityEdit = QtWidgets.QLineEdit(Dialog)\n self.cityEdit.setGeometry(QtCore.QRect(140, 40, 451, 24))\n self.cityEdit.setObjectName('cityEdit')\n self.main_referee = QtWidgets.QLabel(Dialog)\n self.main_referee.setGeometry(QtCore.QRect(10, 400, 101, 24))\n self.main_referee.setObjectName('main_referee')\n self.main_clerk = QtWidgets.QLabel(Dialog)\n self.main_clerk.setGeometry(QtCore.QRect(10, 430, 131, 24))\n self.main_clerk.setObjectName('main_clerk')\n self.mainrefCBox = QtWidgets.QComboBox(Dialog)\n self.mainrefCBox.setGeometry(QtCore.QRect(120, 400, 471, 24))\n self.mainrefCBox.setObjectName('mainrefCBox')\n self.mainclerkCBox = QtWidgets.QComboBox(Dialog)\n self.mainclerkCBox.setGeometry(QtCore.QRect(140, 430, 451, 24))\n self.mainclerkCBox.setObjectName('mainclerkCBox')\n self.refList = QtWidgets.QListWidget(Dialog)\n self.refList.setGeometry(QtCore.QRect(10, 480, 266, 201))\n self.refList.setObjectName('refList')\n self.refereeList = QtWidgets.QLabel(Dialog)\n self.refereeList.setGeometry(QtCore.QRect(10, 460, 91, 16))\n self.refereeList.setObjectName('refereeList')\n self.refColList = QtWidgets.QListWidget(Dialog)\n self.refColList.setGeometry(QtCore.QRect(325, 480, 266, 201))\n self.refColList.setObjectName('refColList')\n self.refereeCol = QtWidgets.QLabel(Dialog)\n self.refereeCol.setGeometry(QtCore.QRect(325, 460, 141, 16))\n self.refereeCol.setObjectName('refereeCol')\n self.raddButton = QtWidgets.QPushButton(Dialog)\n self.raddButton.setGeometry(QtCore.QRect(285, 560, 31, 24))\n self.raddButton.setObjectName('raddButton')\n self.rremoveButton = QtWidgets.QPushButton(Dialog)\n self.rremoveButton.setGeometry(QtCore.QRect(285, 600, 31, 24))\n self.rremoveButton.setObjectName('rremoveButton')\n self.wsortitionButton = QtWidgets.QPushButton(Dialog)\n self.wsortitionButton.setEnabled(True)\n self.wsortitionButton.setGeometry(QtCore.QRect(360, 690, 121, 24))\n self.wsortitionButton.setAutoDefault(True)\n self.wsortitionButton.setDefault(False)\n self.wsortitionButton.setFlat(False)\n self.wsortitionButton.setObjectName('wsortitionButton')\n self.divrings = QtWidgets.QCheckBox(Dialog)\n self.divrings.setGeometry(QtCore.QRect(390, 100, 201, 24))\n self.divrings.setObjectName('divrings')\n self.weightcatCBox = QtWidgets.QComboBox(Dialog)\n self.weightcatCBox.setGeometry(QtCore.QRect(150, 100, 231, 24))\n self.weightcatCBox.setObjectName('weightcatCBox')\n self.weigthcat = QtWidgets.QLabel(Dialog)\n self.weigthcat.setGeometry(QtCore.QRect(10, 100, 131, 24))\n self.weigthcat.setObjectName('weigthcat')\n self.round = QtWidgets.QLabel(Dialog)\n self.round.setGeometry(QtCore.QRect(220, 130, 61, 16))\n self.round.setObjectName('round')\n self.stage = QtWidgets.QLabel(Dialog)\n self.stage.setGeometry(QtCore.QRect(490, 130, 101, 16))\n self.stage.setObjectName('stage')\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n Dialog.setTabOrder(self.nameEdit, self.cityEdit)\n Dialog.setTabOrder(self.cityEdit, self.startDate)\n Dialog.setTabOrder(self.startDate, self.endDate)\n Dialog.setTabOrder(self.endDate, self.meetCountEdit)\n Dialog.setTabOrder(self.meetCountEdit, self.weightcatCBox)\n Dialog.setTabOrder(self.weightcatCBox, self.divrings)\n Dialog.setTabOrder(self.divrings, self.athletesList)\n Dialog.setTabOrder(self.athletesList, self.addButton)\n Dialog.setTabOrder(self.addButton, self.removeButton)\n Dialog.setTabOrder(self.removeButton, self.membersList)\n Dialog.setTabOrder(self.membersList, self.sortitionButton)\n Dialog.setTabOrder(self.sortitionButton, self.mainrefCBox)\n Dialog.setTabOrder(self.mainrefCBox, self.mainclerkCBox)\n Dialog.setTabOrder(self.mainclerkCBox, self.refList)\n Dialog.setTabOrder(self.refList, self.raddButton)\n Dialog.setTabOrder(self.raddButton, self.rremoveButton)\n Dialog.setTabOrder(self.rremoveButton, self.refColList)\n Dialog.setTabOrder(self.refColList, self.wsortitionButton)\n Dialog.setTabOrder(self.wsortitionButton, self.cancel)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate('Dialog', 'Создание соревнования'))\n self.start.setText(_translate('Dialog', 'Начало'))\n self.startDate.setDisplayFormat(_translate('Dialog', 'dd.MM.yyyy'))\n self.end.setText(_translate('Dialog', 'Окончание'))\n self.endDate.setDisplayFormat(_translate('Dialog', 'dd.MM.yyyy'))\n self.name.setText(_translate('Dialog', 'Название'))\n self.athletes.setText(_translate('Dialog', 'Список спортсменов'))\n self.addButton.setText(_translate('Dialog', '>>'))\n self.removeButton.setText(_translate('Dialog', '<<'))\n self.members.setText(_translate('Dialog', 'Список участников'))\n self.meetCount.setText(_translate('Dialog', 'Число боев в день'))\n self.sortitionButton.setText(_translate('Dialog', 'Жеребьевка'))\n self.cancel.setText(_translate('Dialog', 'Отмена'))\n self.city.setText(_translate('Dialog', 'Место проведения'))\n self.main_referee.setText(_translate('Dialog', 'Главный судья'))\n self.main_clerk.setText(_translate('Dialog', 'Главный секретарь'))\n self.refereeList.setText(_translate('Dialog', 'Список судей'))\n self.refereeCol.setText(_translate('Dialog', 'Судейская коллегия'))\n self.raddButton.setText(_translate('Dialog', '>>'))\n self.rremoveButton.setText(_translate('Dialog', '<<'))\n self.wsortitionButton.setText(_translate('Dialog', 'Без жеребьевки'))\n self.divrings.setText(_translate('Dialog', 'Разбивать по рингам'))\n self.weigthcat.setText(_translate('Dialog', 'Весовая категория'))\n self.round.setText(_translate('Dialog', 'раунд'))\n self.stage.setText(_translate('Dialog', 'стадия'))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName('Dialog')\n Dialog.resize(607, 723)\n self.start = QtWidgets.QLabel(Dialog)\n self.start.setGeometry(QtCore.QRect(10, 70, 59, 24))\n self.start.setObjectName('start')\n self.startDate = QtWidgets.QDateEdit(Dialog)\n self.startDate.setGeometry(QtCore.QRect(70, 70, 110, 24))\n self.startDate.setDate(QtCore.QDate(2017, 1, 1))\n self.startDate.setObjectName('startDate')\n self.end = QtWidgets.QLabel(Dialog)\n self.end.setGeometry(QtCore.QRect(190, 70, 81, 24))\n self.end.setObjectName('end')\n self.endDate = QtWidgets.QDateEdit(Dialog)\n self.endDate.setGeometry(QtCore.QRect(270, 70, 110, 24))\n self.endDate.setDate(QtCore.QDate(2017, 1, 1))\n self.endDate.setObjectName('endDate')\n self.name = QtWidgets.QLabel(Dialog)\n self.name.setGeometry(QtCore.QRect(10, 10, 59, 24))\n self.name.setObjectName('name')\n self.nameEdit = QtWidgets.QLineEdit(Dialog)\n self.nameEdit.setGeometry(QtCore.QRect(80, 10, 511, 24))\n self.nameEdit.setObjectName('nameEdit')\n self.athletes = QtWidgets.QLabel(Dialog)\n self.athletes.setGeometry(QtCore.QRect(10, 130, 141, 16))\n self.athletes.setObjectName('athletes')\n self.addButton = QtWidgets.QPushButton(Dialog)\n self.addButton.setGeometry(QtCore.QRect(285, 220, 31, 24))\n self.addButton.setObjectName('addButton')\n self.removeButton = QtWidgets.QPushButton(Dialog)\n self.removeButton.setGeometry(QtCore.QRect(285, 260, 31, 24))\n self.removeButton.setObjectName('removeButton')\n self.members = QtWidgets.QLabel(Dialog)\n self.members.setGeometry(QtCore.QRect(325, 130, 131, 16))\n self.members.setObjectName('members')\n self.meetCount = QtWidgets.QLabel(Dialog)\n self.meetCount.setGeometry(QtCore.QRect(390, 70, 121, 24))\n self.meetCount.setObjectName('meetCount')\n self.meetCountEdit = QtWidgets.QLineEdit(Dialog)\n self.meetCountEdit.setGeometry(QtCore.QRect(510, 70, 81, 24))\n self.meetCountEdit.setObjectName('meetCountEdit')\n self.sortitionButton = QtWidgets.QPushButton(Dialog)\n self.sortitionButton.setGeometry(QtCore.QRect(490, 360, 100, 24))\n self.sortitionButton.setObjectName('sortitionButton')\n self.cancel = QtWidgets.QPushButton(Dialog)\n self.cancel.setGeometry(QtCore.QRect(492, 690, 100, 24))\n self.cancel.setObjectName('cancel')\n self.athletesList = QtWidgets.QListWidget(Dialog)\n self.athletesList.setGeometry(QtCore.QRect(10, 150, 266, 201))\n self.athletesList.setObjectName('athletesList')\n self.membersList = QtWidgets.QListWidget(Dialog)\n self.membersList.setGeometry(QtCore.QRect(325, 150, 266, 201))\n self.membersList.setObjectName('membersList')\n self.city = QtWidgets.QLabel(Dialog)\n self.city.setGeometry(QtCore.QRect(10, 40, 131, 24))\n self.city.setObjectName('city')\n self.cityEdit = QtWidgets.QLineEdit(Dialog)\n self.cityEdit.setGeometry(QtCore.QRect(140, 40, 451, 24))\n self.cityEdit.setObjectName('cityEdit')\n self.main_referee = QtWidgets.QLabel(Dialog)\n self.main_referee.setGeometry(QtCore.QRect(10, 400, 101, 24))\n self.main_referee.setObjectName('main_referee')\n self.main_clerk = QtWidgets.QLabel(Dialog)\n self.main_clerk.setGeometry(QtCore.QRect(10, 430, 131, 24))\n self.main_clerk.setObjectName('main_clerk')\n self.mainrefCBox = QtWidgets.QComboBox(Dialog)\n self.mainrefCBox.setGeometry(QtCore.QRect(120, 400, 471, 24))\n self.mainrefCBox.setObjectName('mainrefCBox')\n self.mainclerkCBox = QtWidgets.QComboBox(Dialog)\n self.mainclerkCBox.setGeometry(QtCore.QRect(140, 430, 451, 24))\n self.mainclerkCBox.setObjectName('mainclerkCBox')\n self.refList = QtWidgets.QListWidget(Dialog)\n self.refList.setGeometry(QtCore.QRect(10, 480, 266, 201))\n self.refList.setObjectName('refList')\n self.refereeList = QtWidgets.QLabel(Dialog)\n self.refereeList.setGeometry(QtCore.QRect(10, 460, 91, 16))\n self.refereeList.setObjectName('refereeList')\n self.refColList = QtWidgets.QListWidget(Dialog)\n self.refColList.setGeometry(QtCore.QRect(325, 480, 266, 201))\n self.refColList.setObjectName('refColList')\n self.refereeCol = QtWidgets.QLabel(Dialog)\n self.refereeCol.setGeometry(QtCore.QRect(325, 460, 141, 16))\n self.refereeCol.setObjectName('refereeCol')\n self.raddButton = QtWidgets.QPushButton(Dialog)\n self.raddButton.setGeometry(QtCore.QRect(285, 560, 31, 24))\n self.raddButton.setObjectName('raddButton')\n self.rremoveButton = QtWidgets.QPushButton(Dialog)\n self.rremoveButton.setGeometry(QtCore.QRect(285, 600, 31, 24))\n self.rremoveButton.setObjectName('rremoveButton')\n self.wsortitionButton = QtWidgets.QPushButton(Dialog)\n self.wsortitionButton.setEnabled(True)\n self.wsortitionButton.setGeometry(QtCore.QRect(360, 690, 121, 24))\n self.wsortitionButton.setAutoDefault(True)\n self.wsortitionButton.setDefault(False)\n self.wsortitionButton.setFlat(False)\n self.wsortitionButton.setObjectName('wsortitionButton')\n self.divrings = QtWidgets.QCheckBox(Dialog)\n self.divrings.setGeometry(QtCore.QRect(390, 100, 201, 24))\n self.divrings.setObjectName('divrings')\n self.weightcatCBox = QtWidgets.QComboBox(Dialog)\n self.weightcatCBox.setGeometry(QtCore.QRect(150, 100, 231, 24))\n self.weightcatCBox.setObjectName('weightcatCBox')\n self.weigthcat = QtWidgets.QLabel(Dialog)\n self.weigthcat.setGeometry(QtCore.QRect(10, 100, 131, 24))\n self.weigthcat.setObjectName('weigthcat')\n self.round = QtWidgets.QLabel(Dialog)\n self.round.setGeometry(QtCore.QRect(220, 130, 61, 16))\n self.round.setObjectName('round')\n self.stage = QtWidgets.QLabel(Dialog)\n self.stage.setGeometry(QtCore.QRect(490, 130, 101, 16))\n self.stage.setObjectName('stage')\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n Dialog.setTabOrder(self.nameEdit, self.cityEdit)\n Dialog.setTabOrder(self.cityEdit, self.startDate)\n Dialog.setTabOrder(self.startDate, self.endDate)\n Dialog.setTabOrder(self.endDate, self.meetCountEdit)\n Dialog.setTabOrder(self.meetCountEdit, self.weightcatCBox)\n Dialog.setTabOrder(self.weightcatCBox, self.divrings)\n Dialog.setTabOrder(self.divrings, self.athletesList)\n Dialog.setTabOrder(self.athletesList, self.addButton)\n Dialog.setTabOrder(self.addButton, self.removeButton)\n Dialog.setTabOrder(self.removeButton, self.membersList)\n Dialog.setTabOrder(self.membersList, self.sortitionButton)\n Dialog.setTabOrder(self.sortitionButton, self.mainrefCBox)\n Dialog.setTabOrder(self.mainrefCBox, self.mainclerkCBox)\n Dialog.setTabOrder(self.mainclerkCBox, self.refList)\n Dialog.setTabOrder(self.refList, self.raddButton)\n Dialog.setTabOrder(self.raddButton, self.rremoveButton)\n Dialog.setTabOrder(self.rremoveButton, self.refColList)\n Dialog.setTabOrder(self.refColList, self.wsortitionButton)\n Dialog.setTabOrder(self.wsortitionButton, self.cancel)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate('Dialog', 'Создание соревнования'))\n self.start.setText(_translate('Dialog', 'Начало'))\n self.startDate.setDisplayFormat(_translate('Dialog', 'dd.MM.yyyy'))\n self.end.setText(_translate('Dialog', 'Окончание'))\n self.endDate.setDisplayFormat(_translate('Dialog', 'dd.MM.yyyy'))\n self.name.setText(_translate('Dialog', 'Название'))\n self.athletes.setText(_translate('Dialog', 'Список спортсменов'))\n self.addButton.setText(_translate('Dialog', '>>'))\n self.removeButton.setText(_translate('Dialog', '<<'))\n self.members.setText(_translate('Dialog', 'Список участников'))\n self.meetCount.setText(_translate('Dialog', 'Число боев в день'))\n self.sortitionButton.setText(_translate('Dialog', 'Жеребьевка'))\n self.cancel.setText(_translate('Dialog', 'Отмена'))\n self.city.setText(_translate('Dialog', 'Место проведения'))\n self.main_referee.setText(_translate('Dialog', 'Главный судья'))\n self.main_clerk.setText(_translate('Dialog', 'Главный секретарь'))\n self.refereeList.setText(_translate('Dialog', 'Список судей'))\n self.refereeCol.setText(_translate('Dialog', 'Судейская коллегия'))\n self.raddButton.setText(_translate('Dialog', '>>'))\n self.rremoveButton.setText(_translate('Dialog', '<<'))\n self.wsortitionButton.setText(_translate('Dialog', 'Без жеребьевки'))\n self.divrings.setText(_translate('Dialog', 'Разбивать по рингам'))\n self.weigthcat.setText(_translate('Dialog', 'Весовая категория'))\n self.round.setText(_translate('Dialog', 'раунд'))\n self.stage.setText(_translate('Dialog', 'стадия'))\n\n\nif __name__ == '__main__':\n import sys\n app = QtWidgets.QApplication(sys.argv)\n Dialog = QtWidgets.QDialog()\n ui = Ui_Dialog()\n ui.setupUi(Dialog)\n Dialog.show()\n sys.exit(app.exec_())\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName('Dialog')\n Dialog.resize(607, 723)\n self.start = QtWidgets.QLabel(Dialog)\n self.start.setGeometry(QtCore.QRect(10, 70, 59, 24))\n self.start.setObjectName('start')\n self.startDate = QtWidgets.QDateEdit(Dialog)\n self.startDate.setGeometry(QtCore.QRect(70, 70, 110, 24))\n self.startDate.setDate(QtCore.QDate(2017, 1, 1))\n self.startDate.setObjectName('startDate')\n self.end = QtWidgets.QLabel(Dialog)\n self.end.setGeometry(QtCore.QRect(190, 70, 81, 24))\n self.end.setObjectName('end')\n self.endDate = QtWidgets.QDateEdit(Dialog)\n self.endDate.setGeometry(QtCore.QRect(270, 70, 110, 24))\n self.endDate.setDate(QtCore.QDate(2017, 1, 1))\n self.endDate.setObjectName('endDate')\n self.name = QtWidgets.QLabel(Dialog)\n self.name.setGeometry(QtCore.QRect(10, 10, 59, 24))\n self.name.setObjectName('name')\n self.nameEdit = QtWidgets.QLineEdit(Dialog)\n self.nameEdit.setGeometry(QtCore.QRect(80, 10, 511, 24))\n self.nameEdit.setObjectName('nameEdit')\n self.athletes = QtWidgets.QLabel(Dialog)\n self.athletes.setGeometry(QtCore.QRect(10, 130, 141, 16))\n self.athletes.setObjectName('athletes')\n self.addButton = QtWidgets.QPushButton(Dialog)\n self.addButton.setGeometry(QtCore.QRect(285, 220, 31, 24))\n self.addButton.setObjectName('addButton')\n self.removeButton = QtWidgets.QPushButton(Dialog)\n self.removeButton.setGeometry(QtCore.QRect(285, 260, 31, 24))\n self.removeButton.setObjectName('removeButton')\n self.members = QtWidgets.QLabel(Dialog)\n self.members.setGeometry(QtCore.QRect(325, 130, 131, 16))\n self.members.setObjectName('members')\n self.meetCount = QtWidgets.QLabel(Dialog)\n self.meetCount.setGeometry(QtCore.QRect(390, 70, 121, 24))\n self.meetCount.setObjectName('meetCount')\n self.meetCountEdit = QtWidgets.QLineEdit(Dialog)\n self.meetCountEdit.setGeometry(QtCore.QRect(510, 70, 81, 24))\n self.meetCountEdit.setObjectName('meetCountEdit')\n self.sortitionButton = QtWidgets.QPushButton(Dialog)\n self.sortitionButton.setGeometry(QtCore.QRect(490, 360, 100, 24))\n self.sortitionButton.setObjectName('sortitionButton')\n self.cancel = QtWidgets.QPushButton(Dialog)\n self.cancel.setGeometry(QtCore.QRect(492, 690, 100, 24))\n self.cancel.setObjectName('cancel')\n self.athletesList = QtWidgets.QListWidget(Dialog)\n self.athletesList.setGeometry(QtCore.QRect(10, 150, 266, 201))\n self.athletesList.setObjectName('athletesList')\n self.membersList = QtWidgets.QListWidget(Dialog)\n self.membersList.setGeometry(QtCore.QRect(325, 150, 266, 201))\n self.membersList.setObjectName('membersList')\n self.city = QtWidgets.QLabel(Dialog)\n self.city.setGeometry(QtCore.QRect(10, 40, 131, 24))\n self.city.setObjectName('city')\n self.cityEdit = QtWidgets.QLineEdit(Dialog)\n self.cityEdit.setGeometry(QtCore.QRect(140, 40, 451, 24))\n self.cityEdit.setObjectName('cityEdit')\n self.main_referee = QtWidgets.QLabel(Dialog)\n self.main_referee.setGeometry(QtCore.QRect(10, 400, 101, 24))\n self.main_referee.setObjectName('main_referee')\n self.main_clerk = QtWidgets.QLabel(Dialog)\n self.main_clerk.setGeometry(QtCore.QRect(10, 430, 131, 24))\n self.main_clerk.setObjectName('main_clerk')\n self.mainrefCBox = QtWidgets.QComboBox(Dialog)\n self.mainrefCBox.setGeometry(QtCore.QRect(120, 400, 471, 24))\n self.mainrefCBox.setObjectName('mainrefCBox')\n self.mainclerkCBox = QtWidgets.QComboBox(Dialog)\n self.mainclerkCBox.setGeometry(QtCore.QRect(140, 430, 451, 24))\n self.mainclerkCBox.setObjectName('mainclerkCBox')\n self.refList = QtWidgets.QListWidget(Dialog)\n self.refList.setGeometry(QtCore.QRect(10, 480, 266, 201))\n self.refList.setObjectName('refList')\n self.refereeList = QtWidgets.QLabel(Dialog)\n self.refereeList.setGeometry(QtCore.QRect(10, 460, 91, 16))\n self.refereeList.setObjectName('refereeList')\n self.refColList = QtWidgets.QListWidget(Dialog)\n self.refColList.setGeometry(QtCore.QRect(325, 480, 266, 201))\n self.refColList.setObjectName('refColList')\n self.refereeCol = QtWidgets.QLabel(Dialog)\n self.refereeCol.setGeometry(QtCore.QRect(325, 460, 141, 16))\n self.refereeCol.setObjectName('refereeCol')\n self.raddButton = QtWidgets.QPushButton(Dialog)\n self.raddButton.setGeometry(QtCore.QRect(285, 560, 31, 24))\n self.raddButton.setObjectName('raddButton')\n self.rremoveButton = QtWidgets.QPushButton(Dialog)\n self.rremoveButton.setGeometry(QtCore.QRect(285, 600, 31, 24))\n self.rremoveButton.setObjectName('rremoveButton')\n self.wsortitionButton = QtWidgets.QPushButton(Dialog)\n self.wsortitionButton.setEnabled(True)\n self.wsortitionButton.setGeometry(QtCore.QRect(360, 690, 121, 24))\n self.wsortitionButton.setAutoDefault(True)\n self.wsortitionButton.setDefault(False)\n self.wsortitionButton.setFlat(False)\n self.wsortitionButton.setObjectName('wsortitionButton')\n self.divrings = QtWidgets.QCheckBox(Dialog)\n self.divrings.setGeometry(QtCore.QRect(390, 100, 201, 24))\n self.divrings.setObjectName('divrings')\n self.weightcatCBox = QtWidgets.QComboBox(Dialog)\n self.weightcatCBox.setGeometry(QtCore.QRect(150, 100, 231, 24))\n self.weightcatCBox.setObjectName('weightcatCBox')\n self.weigthcat = QtWidgets.QLabel(Dialog)\n self.weigthcat.setGeometry(QtCore.QRect(10, 100, 131, 24))\n self.weigthcat.setObjectName('weigthcat')\n self.round = QtWidgets.QLabel(Dialog)\n self.round.setGeometry(QtCore.QRect(220, 130, 61, 16))\n self.round.setObjectName('round')\n self.stage = QtWidgets.QLabel(Dialog)\n self.stage.setGeometry(QtCore.QRect(490, 130, 101, 16))\n self.stage.setObjectName('stage')\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n Dialog.setTabOrder(self.nameEdit, self.cityEdit)\n Dialog.setTabOrder(self.cityEdit, self.startDate)\n Dialog.setTabOrder(self.startDate, self.endDate)\n Dialog.setTabOrder(self.endDate, self.meetCountEdit)\n Dialog.setTabOrder(self.meetCountEdit, self.weightcatCBox)\n Dialog.setTabOrder(self.weightcatCBox, self.divrings)\n Dialog.setTabOrder(self.divrings, self.athletesList)\n Dialog.setTabOrder(self.athletesList, self.addButton)\n Dialog.setTabOrder(self.addButton, self.removeButton)\n Dialog.setTabOrder(self.removeButton, self.membersList)\n Dialog.setTabOrder(self.membersList, self.sortitionButton)\n Dialog.setTabOrder(self.sortitionButton, self.mainrefCBox)\n Dialog.setTabOrder(self.mainrefCBox, self.mainclerkCBox)\n Dialog.setTabOrder(self.mainclerkCBox, self.refList)\n Dialog.setTabOrder(self.refList, self.raddButton)\n Dialog.setTabOrder(self.raddButton, self.rremoveButton)\n Dialog.setTabOrder(self.rremoveButton, self.refColList)\n Dialog.setTabOrder(self.refColList, self.wsortitionButton)\n Dialog.setTabOrder(self.wsortitionButton, self.cancel)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate('Dialog', 'Создание соревнования'))\n self.start.setText(_translate('Dialog', 'Начало'))\n self.startDate.setDisplayFormat(_translate('Dialog', 'dd.MM.yyyy'))\n self.end.setText(_translate('Dialog', 'Окончание'))\n self.endDate.setDisplayFormat(_translate('Dialog', 'dd.MM.yyyy'))\n self.name.setText(_translate('Dialog', 'Название'))\n self.athletes.setText(_translate('Dialog', 'Список спортсменов'))\n self.addButton.setText(_translate('Dialog', '>>'))\n self.removeButton.setText(_translate('Dialog', '<<'))\n self.members.setText(_translate('Dialog', 'Список участников'))\n self.meetCount.setText(_translate('Dialog', 'Число боев в день'))\n self.sortitionButton.setText(_translate('Dialog', 'Жеребьевка'))\n self.cancel.setText(_translate('Dialog', 'Отмена'))\n self.city.setText(_translate('Dialog', 'Место проведения'))\n self.main_referee.setText(_translate('Dialog', 'Главный судья'))\n self.main_clerk.setText(_translate('Dialog', 'Главный секретарь'))\n self.refereeList.setText(_translate('Dialog', 'Список судей'))\n self.refereeCol.setText(_translate('Dialog', 'Судейская коллегия'))\n self.raddButton.setText(_translate('Dialog', '>>'))\n self.rremoveButton.setText(_translate('Dialog', '<<'))\n self.wsortitionButton.setText(_translate('Dialog', 'Без жеребьевки'))\n self.divrings.setText(_translate('Dialog', 'Разбивать по рингам'))\n self.weigthcat.setText(_translate('Dialog', 'Весовая категория'))\n self.round.setText(_translate('Dialog', 'раунд'))\n self.stage.setText(_translate('Dialog', 'стадия'))\n\n\nif __name__ == '__main__':\n import sys\n app = QtWidgets.QApplication(sys.argv)\n Dialog = QtWidgets.QDialog()\n ui = Ui_Dialog()\n ui.setupUi(Dialog)\n Dialog.show()\n sys.exit(app.exec_())\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'meet.ui'\n#\n# Created by: PyQt5 UI code generator 5.8.2\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Dialog\")\n Dialog.resize(607, 723)\n self.start = QtWidgets.QLabel(Dialog)\n self.start.setGeometry(QtCore.QRect(10, 70, 59, 24))\n self.start.setObjectName(\"start\")\n self.startDate = QtWidgets.QDateEdit(Dialog)\n self.startDate.setGeometry(QtCore.QRect(70, 70, 110, 24))\n self.startDate.setDate(QtCore.QDate(2017, 1, 1))\n self.startDate.setObjectName(\"startDate\")\n self.end = QtWidgets.QLabel(Dialog)\n self.end.setGeometry(QtCore.QRect(190, 70, 81, 24))\n self.end.setObjectName(\"end\")\n self.endDate = QtWidgets.QDateEdit(Dialog)\n self.endDate.setGeometry(QtCore.QRect(270, 70, 110, 24))\n self.endDate.setDate(QtCore.QDate(2017, 1, 1))\n self.endDate.setObjectName(\"endDate\")\n self.name = QtWidgets.QLabel(Dialog)\n self.name.setGeometry(QtCore.QRect(10, 10, 59, 24))\n self.name.setObjectName(\"name\")\n self.nameEdit = QtWidgets.QLineEdit(Dialog)\n self.nameEdit.setGeometry(QtCore.QRect(80, 10, 511, 24))\n self.nameEdit.setObjectName(\"nameEdit\")\n self.athletes = QtWidgets.QLabel(Dialog)\n self.athletes.setGeometry(QtCore.QRect(10, 130, 141, 16))\n self.athletes.setObjectName(\"athletes\")\n self.addButton = QtWidgets.QPushButton(Dialog)\n self.addButton.setGeometry(QtCore.QRect(285, 220, 31, 24))\n self.addButton.setObjectName(\"addButton\")\n self.removeButton = QtWidgets.QPushButton(Dialog)\n self.removeButton.setGeometry(QtCore.QRect(285, 260, 31, 24))\n self.removeButton.setObjectName(\"removeButton\")\n self.members = QtWidgets.QLabel(Dialog)\n self.members.setGeometry(QtCore.QRect(325, 130, 131, 16))\n self.members.setObjectName(\"members\")\n self.meetCount = QtWidgets.QLabel(Dialog)\n self.meetCount.setGeometry(QtCore.QRect(390, 70, 121, 24))\n self.meetCount.setObjectName(\"meetCount\")\n self.meetCountEdit = QtWidgets.QLineEdit(Dialog)\n self.meetCountEdit.setGeometry(QtCore.QRect(510, 70, 81, 24))\n self.meetCountEdit.setObjectName(\"meetCountEdit\")\n self.sortitionButton = QtWidgets.QPushButton(Dialog)\n self.sortitionButton.setGeometry(QtCore.QRect(490, 360, 100, 24))\n self.sortitionButton.setObjectName(\"sortitionButton\")\n self.cancel = QtWidgets.QPushButton(Dialog)\n self.cancel.setGeometry(QtCore.QRect(492, 690, 100, 24))\n self.cancel.setObjectName(\"cancel\")\n self.athletesList = QtWidgets.QListWidget(Dialog)\n self.athletesList.setGeometry(QtCore.QRect(10, 150, 266, 201))\n self.athletesList.setObjectName(\"athletesList\")\n self.membersList = QtWidgets.QListWidget(Dialog)\n self.membersList.setGeometry(QtCore.QRect(325, 150, 266, 201))\n self.membersList.setObjectName(\"membersList\")\n self.city = QtWidgets.QLabel(Dialog)\n self.city.setGeometry(QtCore.QRect(10, 40, 131, 24))\n self.city.setObjectName(\"city\")\n self.cityEdit = QtWidgets.QLineEdit(Dialog)\n self.cityEdit.setGeometry(QtCore.QRect(140, 40, 451, 24))\n self.cityEdit.setObjectName(\"cityEdit\")\n self.main_referee = QtWidgets.QLabel(Dialog)\n self.main_referee.setGeometry(QtCore.QRect(10, 400, 101, 24))\n self.main_referee.setObjectName(\"main_referee\")\n self.main_clerk = QtWidgets.QLabel(Dialog)\n self.main_clerk.setGeometry(QtCore.QRect(10, 430, 131, 24))\n self.main_clerk.setObjectName(\"main_clerk\")\n self.mainrefCBox = QtWidgets.QComboBox(Dialog)\n self.mainrefCBox.setGeometry(QtCore.QRect(120, 400, 471, 24))\n self.mainrefCBox.setObjectName(\"mainrefCBox\")\n self.mainclerkCBox = QtWidgets.QComboBox(Dialog)\n self.mainclerkCBox.setGeometry(QtCore.QRect(140, 430, 451, 24))\n self.mainclerkCBox.setObjectName(\"mainclerkCBox\")\n self.refList = QtWidgets.QListWidget(Dialog)\n self.refList.setGeometry(QtCore.QRect(10, 480, 266, 201))\n self.refList.setObjectName(\"refList\")\n self.refereeList = QtWidgets.QLabel(Dialog)\n self.refereeList.setGeometry(QtCore.QRect(10, 460, 91, 16))\n self.refereeList.setObjectName(\"refereeList\")\n self.refColList = QtWidgets.QListWidget(Dialog)\n self.refColList.setGeometry(QtCore.QRect(325, 480, 266, 201))\n self.refColList.setObjectName(\"refColList\")\n self.refereeCol = QtWidgets.QLabel(Dialog)\n self.refereeCol.setGeometry(QtCore.QRect(325, 460, 141, 16))\n self.refereeCol.setObjectName(\"refereeCol\")\n self.raddButton = QtWidgets.QPushButton(Dialog)\n self.raddButton.setGeometry(QtCore.QRect(285, 560, 31, 24))\n self.raddButton.setObjectName(\"raddButton\")\n self.rremoveButton = QtWidgets.QPushButton(Dialog)\n self.rremoveButton.setGeometry(QtCore.QRect(285, 600, 31, 24))\n self.rremoveButton.setObjectName(\"rremoveButton\")\n self.wsortitionButton = QtWidgets.QPushButton(Dialog)\n self.wsortitionButton.setEnabled(True)\n self.wsortitionButton.setGeometry(QtCore.QRect(360, 690, 121, 24))\n self.wsortitionButton.setAutoDefault(True)\n self.wsortitionButton.setDefault(False)\n self.wsortitionButton.setFlat(False)\n self.wsortitionButton.setObjectName(\"wsortitionButton\")\n self.divrings = QtWidgets.QCheckBox(Dialog)\n self.divrings.setGeometry(QtCore.QRect(390, 100, 201, 24))\n self.divrings.setObjectName(\"divrings\")\n self.weightcatCBox = QtWidgets.QComboBox(Dialog)\n self.weightcatCBox.setGeometry(QtCore.QRect(150, 100, 231, 24))\n self.weightcatCBox.setObjectName(\"weightcatCBox\")\n self.weigthcat = QtWidgets.QLabel(Dialog)\n self.weigthcat.setGeometry(QtCore.QRect(10, 100, 131, 24))\n self.weigthcat.setObjectName(\"weigthcat\")\n self.round = QtWidgets.QLabel(Dialog)\n self.round.setGeometry(QtCore.QRect(220, 130, 61, 16))\n self.round.setObjectName(\"round\")\n self.stage = QtWidgets.QLabel(Dialog)\n self.stage.setGeometry(QtCore.QRect(490, 130, 101, 16))\n self.stage.setObjectName(\"stage\")\n\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n Dialog.setTabOrder(self.nameEdit, self.cityEdit)\n Dialog.setTabOrder(self.cityEdit, self.startDate)\n Dialog.setTabOrder(self.startDate, self.endDate)\n Dialog.setTabOrder(self.endDate, self.meetCountEdit)\n Dialog.setTabOrder(self.meetCountEdit, self.weightcatCBox)\n Dialog.setTabOrder(self.weightcatCBox, self.divrings)\n Dialog.setTabOrder(self.divrings, self.athletesList)\n Dialog.setTabOrder(self.athletesList, self.addButton)\n Dialog.setTabOrder(self.addButton, self.removeButton)\n Dialog.setTabOrder(self.removeButton, self.membersList)\n Dialog.setTabOrder(self.membersList, self.sortitionButton)\n Dialog.setTabOrder(self.sortitionButton, self.mainrefCBox)\n Dialog.setTabOrder(self.mainrefCBox, self.mainclerkCBox)\n Dialog.setTabOrder(self.mainclerkCBox, self.refList)\n Dialog.setTabOrder(self.refList, self.raddButton)\n Dialog.setTabOrder(self.raddButton, self.rremoveButton)\n Dialog.setTabOrder(self.rremoveButton, self.refColList)\n Dialog.setTabOrder(self.refColList, self.wsortitionButton)\n Dialog.setTabOrder(self.wsortitionButton, self.cancel)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Создание соревнования\"))\n self.start.setText(_translate(\"Dialog\", \"Начало\"))\n self.startDate.setDisplayFormat(_translate(\"Dialog\", \"dd.MM.yyyy\"))\n self.end.setText(_translate(\"Dialog\", \"Окончание\"))\n self.endDate.setDisplayFormat(_translate(\"Dialog\", \"dd.MM.yyyy\"))\n self.name.setText(_translate(\"Dialog\", \"Название\"))\n self.athletes.setText(_translate(\"Dialog\", \"Список спортсменов\"))\n self.addButton.setText(_translate(\"Dialog\", \">>\"))\n self.removeButton.setText(_translate(\"Dialog\", \"<<\"))\n self.members.setText(_translate(\"Dialog\", \"Список участников\"))\n self.meetCount.setText(_translate(\"Dialog\", \"Число боев в день\"))\n self.sortitionButton.setText(_translate(\"Dialog\", \"Жеребьевка\"))\n self.cancel.setText(_translate(\"Dialog\", \"Отмена\"))\n self.city.setText(_translate(\"Dialog\", \"Место проведения\"))\n self.main_referee.setText(_translate(\"Dialog\", \"Главный судья\"))\n self.main_clerk.setText(_translate(\"Dialog\", \"Главный секретарь\"))\n self.refereeList.setText(_translate(\"Dialog\", \"Список судей\"))\n self.refereeCol.setText(_translate(\"Dialog\", \"Судейская коллегия\"))\n self.raddButton.setText(_translate(\"Dialog\", \">>\"))\n self.rremoveButton.setText(_translate(\"Dialog\", \"<<\"))\n self.wsortitionButton.setText(_translate(\"Dialog\", \"Без жеребьевки\"))\n self.divrings.setText(_translate(\"Dialog\", \"Разбивать по рингам\"))\n self.weigthcat.setText(_translate(\"Dialog\", \"Весовая категория\"))\n self.round.setText(_translate(\"Dialog\", \"раунд\"))\n self.stage.setText(_translate(\"Dialog\", \"стадия\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n Dialog = QtWidgets.QDialog()\n ui = Ui_Dialog()\n ui.setupUi(Dialog)\n Dialog.show()\n sys.exit(app.exec_())\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
default_app_config = 'child.apps.ChildConfig'
|
flexible
|
{
"blob_id": "290f96bb210a21183fe1e0e53219ad38ba889625",
"index": 1602,
"step-1": "<mask token>\n",
"step-2": "default_app_config = 'child.apps.ChildConfig'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
"""
This file contains the ScoreLoop which is used to show
the user thw at most 10 highest scores made by the player
"""
import pygame
from score_fetcher import fetch_scores
from entities.sprite_text import TextSprite
class ScoreLoop:
def __init__(self):
self.scores = fetch_scores()
self.sprites = pygame.sprite.Group()
self.get_score_sprites()
self.space_cooldown = True
def get_score_sprites(self):
rank = 1
for score in self.scores:
self.sprites.add(
TextSprite(str(score), 256, 100+50*rank, True)
)
rank += 1
def increment(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
if self.space_cooldown:
return None
return "startloop"
self.space_cooldown = False
return None
def get_sprites(self):
"""retruns sprites for the UI"""
return self.sprites
if __name__ == "__main__":
pass
|
normal
|
{
"blob_id": "047b3398a73c9e7d75d43eeeab85f52c05ff90c3",
"index": 4534,
"step-1": "<mask token>\n\n\nclass ScoreLoop:\n\n def __init__(self):\n self.scores = fetch_scores()\n self.sprites = pygame.sprite.Group()\n self.get_score_sprites()\n self.space_cooldown = True\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ScoreLoop:\n\n def __init__(self):\n self.scores = fetch_scores()\n self.sprites = pygame.sprite.Group()\n self.get_score_sprites()\n self.space_cooldown = True\n\n def get_score_sprites(self):\n rank = 1\n for score in self.scores:\n self.sprites.add(TextSprite(str(score), 256, 100 + 50 * rank, True)\n )\n rank += 1\n\n def increment(self):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE]:\n if self.space_cooldown:\n return None\n return 'startloop'\n self.space_cooldown = False\n return None\n\n def get_sprites(self):\n \"\"\"retruns sprites for the UI\"\"\"\n return self.sprites\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ScoreLoop:\n\n def __init__(self):\n self.scores = fetch_scores()\n self.sprites = pygame.sprite.Group()\n self.get_score_sprites()\n self.space_cooldown = True\n\n def get_score_sprites(self):\n rank = 1\n for score in self.scores:\n self.sprites.add(TextSprite(str(score), 256, 100 + 50 * rank, True)\n )\n rank += 1\n\n def increment(self):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE]:\n if self.space_cooldown:\n return None\n return 'startloop'\n self.space_cooldown = False\n return None\n\n def get_sprites(self):\n \"\"\"retruns sprites for the UI\"\"\"\n return self.sprites\n\n\nif __name__ == '__main__':\n pass\n",
"step-4": "<mask token>\nimport pygame\nfrom score_fetcher import fetch_scores\nfrom entities.sprite_text import TextSprite\n\n\nclass ScoreLoop:\n\n def __init__(self):\n self.scores = fetch_scores()\n self.sprites = pygame.sprite.Group()\n self.get_score_sprites()\n self.space_cooldown = True\n\n def get_score_sprites(self):\n rank = 1\n for score in self.scores:\n self.sprites.add(TextSprite(str(score), 256, 100 + 50 * rank, True)\n )\n rank += 1\n\n def increment(self):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE]:\n if self.space_cooldown:\n return None\n return 'startloop'\n self.space_cooldown = False\n return None\n\n def get_sprites(self):\n \"\"\"retruns sprites for the UI\"\"\"\n return self.sprites\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "\"\"\"\nThis file contains the ScoreLoop which is used to show\nthe user thw at most 10 highest scores made by the player\n\"\"\"\nimport pygame\nfrom score_fetcher import fetch_scores\nfrom entities.sprite_text import TextSprite\n\n\nclass ScoreLoop:\n\n def __init__(self):\n\n self.scores = fetch_scores()\n self.sprites = pygame.sprite.Group()\n self.get_score_sprites()\n\n self.space_cooldown = True\n\n def get_score_sprites(self):\n\n rank = 1\n\n for score in self.scores:\n self.sprites.add(\n TextSprite(str(score), 256, 100+50*rank, True)\n )\n rank += 1\n\n def increment(self):\n\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_SPACE]:\n if self.space_cooldown:\n return None\n return \"startloop\"\n self.space_cooldown = False\n return None\n\n def get_sprites(self):\n \"\"\"retruns sprites for the UI\"\"\"\n return self.sprites\n\n\nif __name__ == \"__main__\":\n\n pass\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class TestUrls(SimpleTestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestUrls(SimpleTestCase):
def test_profile_resolves(self):
url = reverse('profile')
self.assertEqual(resolve(url).func, profile)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestUrls(SimpleTestCase):
def test_profile_resolves(self):
url = reverse('profile')
self.assertEqual(resolve(url).func, profile)
def test_order_history_resolves(self):
url = reverse('order_history', args='1')
self.assertEqual(resolve(url).func, order_history)
<|reserved_special_token_1|>
from django.test import TestCase, SimpleTestCase
from django.urls import reverse, resolve
from .views import profile, order_history
<|reserved_special_token_0|>
class TestUrls(SimpleTestCase):
def test_profile_resolves(self):
url = reverse('profile')
self.assertEqual(resolve(url).func, profile)
def test_order_history_resolves(self):
url = reverse('order_history', args='1')
self.assertEqual(resolve(url).func, order_history)
<|reserved_special_token_1|>
from django.test import TestCase, SimpleTestCase
from django.urls import reverse, resolve
from .views import profile, order_history
""" Url Testing """
class TestUrls(SimpleTestCase):
def test_profile_resolves(self):
url = reverse('profile')
self.assertEqual(resolve(url).func, profile)
def test_order_history_resolves(self):
url = reverse('order_history', args='1')
self.assertEqual(resolve(url).func, order_history)
|
flexible
|
{
"blob_id": "5dc6b54357df87077d8159192cd52697b2616db8",
"index": 9186,
"step-1": "<mask token>\n\n\nclass TestUrls(SimpleTestCase):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestUrls(SimpleTestCase):\n\n def test_profile_resolves(self):\n url = reverse('profile')\n self.assertEqual(resolve(url).func, profile)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestUrls(SimpleTestCase):\n\n def test_profile_resolves(self):\n url = reverse('profile')\n self.assertEqual(resolve(url).func, profile)\n\n def test_order_history_resolves(self):\n url = reverse('order_history', args='1')\n self.assertEqual(resolve(url).func, order_history)\n",
"step-4": "from django.test import TestCase, SimpleTestCase\nfrom django.urls import reverse, resolve\nfrom .views import profile, order_history\n<mask token>\n\n\nclass TestUrls(SimpleTestCase):\n\n def test_profile_resolves(self):\n url = reverse('profile')\n self.assertEqual(resolve(url).func, profile)\n\n def test_order_history_resolves(self):\n url = reverse('order_history', args='1')\n self.assertEqual(resolve(url).func, order_history)\n",
"step-5": "from django.test import TestCase, SimpleTestCase\nfrom django.urls import reverse, resolve\nfrom .views import profile, order_history\n\n\"\"\" Url Testing \"\"\"\n\nclass TestUrls(SimpleTestCase):\n\n def test_profile_resolves(self):\n url = reverse('profile')\n self.assertEqual(resolve(url).func, profile)\n\n def test_order_history_resolves(self):\n url = reverse('order_history', args='1')\n self.assertEqual(resolve(url).func, order_history)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from sbpy.data import Phys
from sbpy import bib
@pytest.mark.remote_data
def test_from_sbdb():
""" test from_horizons method"""
# query one object
data = Phys.from_sbdb('Ceres')
assert len(data.table) == 1
# query several objects
data = Phys.from_sbdb([n+1 for n in range(5)])
assert len(data.table) == 5
|
normal
|
{
"blob_id": "0bfb089556bfa253bf139f03cd3079ced962d858",
"index": 1021,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.mark.remote_data\ndef test_from_sbdb():\n \"\"\" test from_horizons method\"\"\"\n data = Phys.from_sbdb('Ceres')\n assert len(data.table) == 1\n data = Phys.from_sbdb([(n + 1) for n in range(5)])\n assert len(data.table) == 5\n",
"step-3": "import pytest\nfrom sbpy.data import Phys\nfrom sbpy import bib\n\n\n@pytest.mark.remote_data\ndef test_from_sbdb():\n \"\"\" test from_horizons method\"\"\"\n data = Phys.from_sbdb('Ceres')\n assert len(data.table) == 1\n data = Phys.from_sbdb([(n + 1) for n in range(5)])\n assert len(data.table) == 5\n",
"step-4": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\n\nfrom sbpy.data import Phys\nfrom sbpy import bib\n\n\n@pytest.mark.remote_data\ndef test_from_sbdb():\n \"\"\" test from_horizons method\"\"\"\n\n # query one object\n data = Phys.from_sbdb('Ceres')\n assert len(data.table) == 1\n\n # query several objects\n data = Phys.from_sbdb([n+1 for n in range(5)])\n assert len(data.table) == 5\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Mas(object):
def __init__(self, module):
self.module = module
self.mas_path = self.module.get_bin_path('mas')
self._checked_signin = False
self._installed = None
self._outdated = None
self.count_install = 0
self.count_upgrade = 0
self.count_uninstall = 0
self.result = {'changed': False}
self.check_mas_tool()
def app_command(self, command, id):
""" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' """
if not self.module.check_mode:
if command != 'uninstall':
self.check_signin()
rc, out, err = self.run([command, str(id)])
if rc != 0:
self.module.fail_json(msg=
"Error running command '{0}' on app '{1}': {2}".format(
command, str(id), out.rstrip()))
self.__dict__['count_' + command] += 1
def check_mas_tool(self):
""" Verifies that the `mas` tool is available in a recent version """
if not self.mas_path:
self.module.fail_json(msg='Required `mas` tool is not installed')
rc, out, err = self.run(['version'])
if rc != 0 or not out.strip() or StrictVersion(out.strip()
) < StrictVersion('1.5.0'):
self.module.fail_json(msg=
'`mas` tool in version 1.5.0+ needed, got ' + out.strip())
def check_signin(self):
""" Verifies that the user is signed in to the Mac App Store """
if self._checked_signin:
return
rc, out, err = self.run(['account'])
if out.split('\n', 1)[0].rstrip() == 'Not signed in':
self.module.fail_json(msg=
'You must be signed in to the Mac App Store')
self._checked_signin = True
def exit(self):
""" Exit with the data we have collected over time """
msgs = []
if self.count_install > 0:
msgs.append('Installed {0} app(s)'.format(self.count_install))
if self.count_upgrade > 0:
msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))
if self.count_uninstall > 0:
msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))
if msgs:
self.result['changed'] = True
self.result['msg'] = ', '.join(msgs)
self.module.exit_json(**self.result)
def get_current_state(self, command):
""" Returns the list of all app IDs; command can either be 'list' or 'outdated' """
rc, raw_apps, err = self.run([command])
rows = raw_apps.split('\n')
if rows[0] == 'No installed apps found':
rows = []
apps = []
for r in rows:
r = r.split(' ', 1)
if len(r) == 2:
apps.append(int(r[0]))
return apps
def installed(self):
""" Returns the list of installed apps """
if self._installed is None:
self._installed = self.get_current_state('list')
return self._installed
def is_installed(self, id):
""" Checks whether the given app is installed """
return int(id) in self.installed()
def is_outdated(self, id):
""" Checks whether the given app is installed, but outdated """
return int(id) in self.outdated()
def outdated(self):
""" Returns the list of installed, but outdated apps """
if self._outdated is None:
self._outdated = self.get_current_state('outdated')
return self._outdated
def run(self, cmd):
""" Runs a command of the `mas` tool """
cmd.insert(0, self.mas_path)
return self.module.run_command(cmd, False)
def upgrade_all(self):
""" Upgrades all installed apps and sets the correct result data """
outdated = self.outdated()
if not self.module.check_mode:
self.check_signin()
rc, out, err = self.run(['upgrade'])
if rc != 0:
self.module.fail_json(msg='Could not upgrade all apps: ' +
out.rstrip())
self.count_upgrade += len(outdated)
def main():
module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements
='int'), state=dict(type='str', default='present', choices=[
'absent', 'latest', 'present']), upgrade_all=dict(type='bool',
default=False, aliases=['upgrade'])), supports_check_mode=True)
mas = Mas(module)
if module.params['id']:
apps = module.params['id']
else:
apps = []
state = module.params['state']
upgrade = module.params['upgrade_all']
for app in sorted(set(apps)):
if state == 'present':
if not mas.is_installed(app):
mas.app_command('install', app)
elif state == 'absent':
if mas.is_installed(app):
if os.getuid() != 0:
module.fail_json(msg=
"Uninstalling apps requires root permissions ('become: yes')"
)
mas.app_command('uninstall', app)
elif state == 'latest':
if not mas.is_installed(app):
mas.app_command('install', app)
elif mas.is_outdated(app):
mas.app_command('upgrade', app)
mas._outdated = None
if upgrade and mas.outdated():
mas.upgrade_all()
mas.exit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Mas(object):
def __init__(self, module):
self.module = module
self.mas_path = self.module.get_bin_path('mas')
self._checked_signin = False
self._installed = None
self._outdated = None
self.count_install = 0
self.count_upgrade = 0
self.count_uninstall = 0
self.result = {'changed': False}
self.check_mas_tool()
def app_command(self, command, id):
""" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' """
if not self.module.check_mode:
if command != 'uninstall':
self.check_signin()
rc, out, err = self.run([command, str(id)])
if rc != 0:
self.module.fail_json(msg=
"Error running command '{0}' on app '{1}': {2}".format(
command, str(id), out.rstrip()))
self.__dict__['count_' + command] += 1
def check_mas_tool(self):
""" Verifies that the `mas` tool is available in a recent version """
if not self.mas_path:
self.module.fail_json(msg='Required `mas` tool is not installed')
rc, out, err = self.run(['version'])
if rc != 0 or not out.strip() or StrictVersion(out.strip()
) < StrictVersion('1.5.0'):
self.module.fail_json(msg=
'`mas` tool in version 1.5.0+ needed, got ' + out.strip())
def check_signin(self):
""" Verifies that the user is signed in to the Mac App Store """
if self._checked_signin:
return
rc, out, err = self.run(['account'])
if out.split('\n', 1)[0].rstrip() == 'Not signed in':
self.module.fail_json(msg=
'You must be signed in to the Mac App Store')
self._checked_signin = True
def exit(self):
""" Exit with the data we have collected over time """
msgs = []
if self.count_install > 0:
msgs.append('Installed {0} app(s)'.format(self.count_install))
if self.count_upgrade > 0:
msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))
if self.count_uninstall > 0:
msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))
if msgs:
self.result['changed'] = True
self.result['msg'] = ', '.join(msgs)
self.module.exit_json(**self.result)
def get_current_state(self, command):
""" Returns the list of all app IDs; command can either be 'list' or 'outdated' """
rc, raw_apps, err = self.run([command])
rows = raw_apps.split('\n')
if rows[0] == 'No installed apps found':
rows = []
apps = []
for r in rows:
r = r.split(' ', 1)
if len(r) == 2:
apps.append(int(r[0]))
return apps
def installed(self):
""" Returns the list of installed apps """
if self._installed is None:
self._installed = self.get_current_state('list')
return self._installed
def is_installed(self, id):
""" Checks whether the given app is installed """
return int(id) in self.installed()
def is_outdated(self, id):
""" Checks whether the given app is installed, but outdated """
return int(id) in self.outdated()
def outdated(self):
""" Returns the list of installed, but outdated apps """
if self._outdated is None:
self._outdated = self.get_current_state('outdated')
return self._outdated
def run(self, cmd):
""" Runs a command of the `mas` tool """
cmd.insert(0, self.mas_path)
return self.module.run_command(cmd, False)
def upgrade_all(self):
""" Upgrades all installed apps and sets the correct result data """
outdated = self.outdated()
if not self.module.check_mode:
self.check_signin()
rc, out, err = self.run(['upgrade'])
if rc != 0:
self.module.fail_json(msg='Could not upgrade all apps: ' +
out.rstrip())
self.count_upgrade += len(outdated)
def main():
module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements
='int'), state=dict(type='str', default='present', choices=[
'absent', 'latest', 'present']), upgrade_all=dict(type='bool',
default=False, aliases=['upgrade'])), supports_check_mode=True)
mas = Mas(module)
if module.params['id']:
apps = module.params['id']
else:
apps = []
state = module.params['state']
upgrade = module.params['upgrade_all']
for app in sorted(set(apps)):
if state == 'present':
if not mas.is_installed(app):
mas.app_command('install', app)
elif state == 'absent':
if mas.is_installed(app):
if os.getuid() != 0:
module.fail_json(msg=
"Uninstalling apps requires root permissions ('become: yes')"
)
mas.app_command('uninstall', app)
elif state == 'latest':
if not mas.is_installed(app):
mas.app_command('install', app)
elif mas.is_outdated(app):
mas.app_command('upgrade', app)
mas._outdated = None
if upgrade and mas.outdated():
mas.upgrade_all()
mas.exit()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__metaclass__ = type
DOCUMENTATION = """
module: mas
short_description: Manage Mac App Store applications with mas-cli
description:
- Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).
version_added: '0.2.0'
author:
- Michael Heap (@mheap)
- Lukas Bestle (@lukasbestle)
options:
id:
description:
- The Mac App Store identifier of the app(s) you want to manage.
- This can be found by running C(mas search APP_NAME) on your machine.
type: list
elements: int
state:
description:
- Desired state of the app installation.
- The C(absent) value requires root permissions, also see the examples.
type: str
choices:
- absent
- latest
- present
default: present
upgrade_all:
description:
- Upgrade all installed Mac App Store apps.
type: bool
default: "no"
aliases: ["upgrade"]
requirements:
- macOS 10.11+
- "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
- The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
notes:
- This module supports C(check_mode).
"""
EXAMPLES = """
- name: Install Keynote
community.general.mas:
id: 409183694
state: present
- name: Install Divvy with command mas installed in /usr/local/bin
community.general.mas:
id: 413857545
state: present
environment:
PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}
- name: Install a list of apps
community.general.mas:
id:
- 409183694 # Keynote
- 413857545 # Divvy
state: present
- name: Ensure the latest Keynote version is installed
community.general.mas:
id: 409183694
state: latest
- name: Upgrade all installed Mac App Store apps
community.general.mas:
upgrade_all: yes
- name: Install specific apps and also upgrade all others
community.general.mas:
id:
- 409183694 # Keynote
- 413857545 # Divvy
state: present
upgrade_all: yes
- name: Uninstall Divvy
community.general.mas:
id: 413857545
state: absent
become: yes # Uninstallation requires root permissions
"""
RETURN = ' # '
<|reserved_special_token_0|>
class Mas(object):
def __init__(self, module):
self.module = module
self.mas_path = self.module.get_bin_path('mas')
self._checked_signin = False
self._installed = None
self._outdated = None
self.count_install = 0
self.count_upgrade = 0
self.count_uninstall = 0
self.result = {'changed': False}
self.check_mas_tool()
def app_command(self, command, id):
""" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' """
if not self.module.check_mode:
if command != 'uninstall':
self.check_signin()
rc, out, err = self.run([command, str(id)])
if rc != 0:
self.module.fail_json(msg=
"Error running command '{0}' on app '{1}': {2}".format(
command, str(id), out.rstrip()))
self.__dict__['count_' + command] += 1
def check_mas_tool(self):
""" Verifies that the `mas` tool is available in a recent version """
if not self.mas_path:
self.module.fail_json(msg='Required `mas` tool is not installed')
rc, out, err = self.run(['version'])
if rc != 0 or not out.strip() or StrictVersion(out.strip()
) < StrictVersion('1.5.0'):
self.module.fail_json(msg=
'`mas` tool in version 1.5.0+ needed, got ' + out.strip())
def check_signin(self):
""" Verifies that the user is signed in to the Mac App Store """
if self._checked_signin:
return
rc, out, err = self.run(['account'])
if out.split('\n', 1)[0].rstrip() == 'Not signed in':
self.module.fail_json(msg=
'You must be signed in to the Mac App Store')
self._checked_signin = True
def exit(self):
""" Exit with the data we have collected over time """
msgs = []
if self.count_install > 0:
msgs.append('Installed {0} app(s)'.format(self.count_install))
if self.count_upgrade > 0:
msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))
if self.count_uninstall > 0:
msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))
if msgs:
self.result['changed'] = True
self.result['msg'] = ', '.join(msgs)
self.module.exit_json(**self.result)
def get_current_state(self, command):
""" Returns the list of all app IDs; command can either be 'list' or 'outdated' """
rc, raw_apps, err = self.run([command])
rows = raw_apps.split('\n')
if rows[0] == 'No installed apps found':
rows = []
apps = []
for r in rows:
r = r.split(' ', 1)
if len(r) == 2:
apps.append(int(r[0]))
return apps
def installed(self):
""" Returns the list of installed apps """
if self._installed is None:
self._installed = self.get_current_state('list')
return self._installed
def is_installed(self, id):
""" Checks whether the given app is installed """
return int(id) in self.installed()
def is_outdated(self, id):
""" Checks whether the given app is installed, but outdated """
return int(id) in self.outdated()
def outdated(self):
""" Returns the list of installed, but outdated apps """
if self._outdated is None:
self._outdated = self.get_current_state('outdated')
return self._outdated
def run(self, cmd):
""" Runs a command of the `mas` tool """
cmd.insert(0, self.mas_path)
return self.module.run_command(cmd, False)
def upgrade_all(self):
""" Upgrades all installed apps and sets the correct result data """
outdated = self.outdated()
if not self.module.check_mode:
self.check_signin()
rc, out, err = self.run(['upgrade'])
if rc != 0:
self.module.fail_json(msg='Could not upgrade all apps: ' +
out.rstrip())
self.count_upgrade += len(outdated)
def main():
module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements
='int'), state=dict(type='str', default='present', choices=[
'absent', 'latest', 'present']), upgrade_all=dict(type='bool',
default=False, aliases=['upgrade'])), supports_check_mode=True)
mas = Mas(module)
if module.params['id']:
apps = module.params['id']
else:
apps = []
state = module.params['state']
upgrade = module.params['upgrade_all']
for app in sorted(set(apps)):
if state == 'present':
if not mas.is_installed(app):
mas.app_command('install', app)
elif state == 'absent':
if mas.is_installed(app):
if os.getuid() != 0:
module.fail_json(msg=
"Uninstalling apps requires root permissions ('become: yes')"
)
mas.app_command('uninstall', app)
elif state == 'latest':
if not mas.is_installed(app):
mas.app_command('install', app)
elif mas.is_outdated(app):
mas.app_command('upgrade', app)
mas._outdated = None
if upgrade and mas.outdated():
mas.upgrade_all()
mas.exit()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: mas
short_description: Manage Mac App Store applications with mas-cli
description:
- Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).
version_added: '0.2.0'
author:
- Michael Heap (@mheap)
- Lukas Bestle (@lukasbestle)
options:
id:
description:
- The Mac App Store identifier of the app(s) you want to manage.
- This can be found by running C(mas search APP_NAME) on your machine.
type: list
elements: int
state:
description:
- Desired state of the app installation.
- The C(absent) value requires root permissions, also see the examples.
type: str
choices:
- absent
- latest
- present
default: present
upgrade_all:
description:
- Upgrade all installed Mac App Store apps.
type: bool
default: "no"
aliases: ["upgrade"]
requirements:
- macOS 10.11+
- "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
- The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
notes:
- This module supports C(check_mode).
"""
EXAMPLES = """
- name: Install Keynote
community.general.mas:
id: 409183694
state: present
- name: Install Divvy with command mas installed in /usr/local/bin
community.general.mas:
id: 413857545
state: present
environment:
PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}
- name: Install a list of apps
community.general.mas:
id:
- 409183694 # Keynote
- 413857545 # Divvy
state: present
- name: Ensure the latest Keynote version is installed
community.general.mas:
id: 409183694
state: latest
- name: Upgrade all installed Mac App Store apps
community.general.mas:
upgrade_all: yes
- name: Install specific apps and also upgrade all others
community.general.mas:
id:
- 409183694 # Keynote
- 413857545 # Divvy
state: present
upgrade_all: yes
- name: Uninstall Divvy
community.general.mas:
id: 413857545
state: absent
become: yes # Uninstallation requires root permissions
"""
RETURN = ' # '
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from distutils.version import StrictVersion
import os
class Mas(object):
def __init__(self, module):
self.module = module
self.mas_path = self.module.get_bin_path('mas')
self._checked_signin = False
self._installed = None
self._outdated = None
self.count_install = 0
self.count_upgrade = 0
self.count_uninstall = 0
self.result = {'changed': False}
self.check_mas_tool()
def app_command(self, command, id):
""" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' """
if not self.module.check_mode:
if command != 'uninstall':
self.check_signin()
rc, out, err = self.run([command, str(id)])
if rc != 0:
self.module.fail_json(msg=
"Error running command '{0}' on app '{1}': {2}".format(
command, str(id), out.rstrip()))
self.__dict__['count_' + command] += 1
def check_mas_tool(self):
""" Verifies that the `mas` tool is available in a recent version """
if not self.mas_path:
self.module.fail_json(msg='Required `mas` tool is not installed')
rc, out, err = self.run(['version'])
if rc != 0 or not out.strip() or StrictVersion(out.strip()
) < StrictVersion('1.5.0'):
self.module.fail_json(msg=
'`mas` tool in version 1.5.0+ needed, got ' + out.strip())
def check_signin(self):
""" Verifies that the user is signed in to the Mac App Store """
if self._checked_signin:
return
rc, out, err = self.run(['account'])
if out.split('\n', 1)[0].rstrip() == 'Not signed in':
self.module.fail_json(msg=
'You must be signed in to the Mac App Store')
self._checked_signin = True
def exit(self):
""" Exit with the data we have collected over time """
msgs = []
if self.count_install > 0:
msgs.append('Installed {0} app(s)'.format(self.count_install))
if self.count_upgrade > 0:
msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))
if self.count_uninstall > 0:
msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))
if msgs:
self.result['changed'] = True
self.result['msg'] = ', '.join(msgs)
self.module.exit_json(**self.result)
def get_current_state(self, command):
""" Returns the list of all app IDs; command can either be 'list' or 'outdated' """
rc, raw_apps, err = self.run([command])
rows = raw_apps.split('\n')
if rows[0] == 'No installed apps found':
rows = []
apps = []
for r in rows:
r = r.split(' ', 1)
if len(r) == 2:
apps.append(int(r[0]))
return apps
def installed(self):
""" Returns the list of installed apps """
if self._installed is None:
self._installed = self.get_current_state('list')
return self._installed
def is_installed(self, id):
""" Checks whether the given app is installed """
return int(id) in self.installed()
def is_outdated(self, id):
""" Checks whether the given app is installed, but outdated """
return int(id) in self.outdated()
def outdated(self):
""" Returns the list of installed, but outdated apps """
if self._outdated is None:
self._outdated = self.get_current_state('outdated')
return self._outdated
def run(self, cmd):
""" Runs a command of the `mas` tool """
cmd.insert(0, self.mas_path)
return self.module.run_command(cmd, False)
def upgrade_all(self):
""" Upgrades all installed apps and sets the correct result data """
outdated = self.outdated()
if not self.module.check_mode:
self.check_signin()
rc, out, err = self.run(['upgrade'])
if rc != 0:
self.module.fail_json(msg='Could not upgrade all apps: ' +
out.rstrip())
self.count_upgrade += len(outdated)
def main():
module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements
='int'), state=dict(type='str', default='present', choices=[
'absent', 'latest', 'present']), upgrade_all=dict(type='bool',
default=False, aliases=['upgrade'])), supports_check_mode=True)
mas = Mas(module)
if module.params['id']:
apps = module.params['id']
else:
apps = []
state = module.params['state']
upgrade = module.params['upgrade_all']
for app in sorted(set(apps)):
if state == 'present':
if not mas.is_installed(app):
mas.app_command('install', app)
elif state == 'absent':
if mas.is_installed(app):
if os.getuid() != 0:
module.fail_json(msg=
"Uninstalling apps requires root permissions ('become: yes')"
)
mas.app_command('uninstall', app)
elif state == 'latest':
if not mas.is_installed(app):
mas.app_command('install', app)
elif mas.is_outdated(app):
mas.app_command('upgrade', app)
mas._outdated = None
if upgrade and mas.outdated():
mas.upgrade_all()
mas.exit()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Lukas Bestle <project-ansible@lukasbestle.com>
# Copyright: (c) 2017, Michael Heap <m@michaelheap.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: mas
short_description: Manage Mac App Store applications with mas-cli
description:
- Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).
version_added: '0.2.0'
author:
- Michael Heap (@mheap)
- Lukas Bestle (@lukasbestle)
options:
id:
description:
- The Mac App Store identifier of the app(s) you want to manage.
- This can be found by running C(mas search APP_NAME) on your machine.
type: list
elements: int
state:
description:
- Desired state of the app installation.
- The C(absent) value requires root permissions, also see the examples.
type: str
choices:
- absent
- latest
- present
default: present
upgrade_all:
description:
- Upgrade all installed Mac App Store apps.
type: bool
default: "no"
aliases: ["upgrade"]
requirements:
- macOS 10.11+
- "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
- The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Install Keynote
community.general.mas:
id: 409183694
state: present
- name: Install Divvy with command mas installed in /usr/local/bin
community.general.mas:
id: 413857545
state: present
environment:
PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}
- name: Install a list of apps
community.general.mas:
id:
- 409183694 # Keynote
- 413857545 # Divvy
state: present
- name: Ensure the latest Keynote version is installed
community.general.mas:
id: 409183694
state: latest
- name: Upgrade all installed Mac App Store apps
community.general.mas:
upgrade_all: yes
- name: Install specific apps and also upgrade all others
community.general.mas:
id:
- 409183694 # Keynote
- 413857545 # Divvy
state: present
upgrade_all: yes
- name: Uninstall Divvy
community.general.mas:
id: 413857545
state: absent
become: yes # Uninstallation requires root permissions
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from distutils.version import StrictVersion
import os
class Mas(object):
def __init__(self, module):
self.module = module
# Initialize data properties
self.mas_path = self.module.get_bin_path('mas')
self._checked_signin = False
self._installed = None # Populated only if needed
self._outdated = None # Populated only if needed
self.count_install = 0
self.count_upgrade = 0
self.count_uninstall = 0
self.result = {
'changed': False
}
self.check_mas_tool()
def app_command(self, command, id):
''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' '''
if not self.module.check_mode:
if command != 'uninstall':
self.check_signin()
rc, out, err = self.run([command, str(id)])
if rc != 0:
self.module.fail_json(
msg="Error running command '{0}' on app '{1}': {2}".format(command, str(id), out.rstrip())
)
# No error or dry run
self.__dict__['count_' + command] += 1
def check_mas_tool(self):
''' Verifies that the `mas` tool is available in a recent version '''
# Is the `mas` tool available at all?
if not self.mas_path:
self.module.fail_json(msg='Required `mas` tool is not installed')
# Is the version recent enough?
rc, out, err = self.run(['version'])
if rc != 0 or not out.strip() or StrictVersion(out.strip()) < StrictVersion('1.5.0'):
self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip())
def check_signin(self):
''' Verifies that the user is signed in to the Mac App Store '''
# Only check this once per execution
if self._checked_signin:
return
rc, out, err = self.run(['account'])
if out.split("\n", 1)[0].rstrip() == 'Not signed in':
self.module.fail_json(msg='You must be signed in to the Mac App Store')
self._checked_signin = True
def exit(self):
''' Exit with the data we have collected over time '''
msgs = []
if self.count_install > 0:
msgs.append('Installed {0} app(s)'.format(self.count_install))
if self.count_upgrade > 0:
msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))
if self.count_uninstall > 0:
msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))
if msgs:
self.result['changed'] = True
self.result['msg'] = ', '.join(msgs)
self.module.exit_json(**self.result)
def get_current_state(self, command):
''' Returns the list of all app IDs; command can either be 'list' or 'outdated' '''
rc, raw_apps, err = self.run([command])
rows = raw_apps.split("\n")
if rows[0] == "No installed apps found":
rows = []
apps = []
for r in rows:
# Format: "123456789 App Name"
r = r.split(' ', 1)
if len(r) == 2:
apps.append(int(r[0]))
return apps
def installed(self):
''' Returns the list of installed apps '''
# Populate cache if not already done
if self._installed is None:
self._installed = self.get_current_state('list')
return self._installed
def is_installed(self, id):
''' Checks whether the given app is installed '''
return int(id) in self.installed()
def is_outdated(self, id):
''' Checks whether the given app is installed, but outdated '''
return int(id) in self.outdated()
def outdated(self):
''' Returns the list of installed, but outdated apps '''
# Populate cache if not already done
if self._outdated is None:
self._outdated = self.get_current_state('outdated')
return self._outdated
def run(self, cmd):
''' Runs a command of the `mas` tool '''
cmd.insert(0, self.mas_path)
return self.module.run_command(cmd, False)
def upgrade_all(self):
''' Upgrades all installed apps and sets the correct result data '''
outdated = self.outdated()
if not self.module.check_mode:
self.check_signin()
rc, out, err = self.run(['upgrade'])
if rc != 0:
self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip())
self.count_upgrade += len(outdated)
def main():
module = AnsibleModule(
argument_spec=dict(
id=dict(type='list', elements='int'),
state=dict(type='str', default='present', choices=['absent', 'latest', 'present']),
upgrade_all=dict(type='bool', default=False, aliases=['upgrade']),
),
supports_check_mode=True
)
mas = Mas(module)
if module.params['id']:
apps = module.params['id']
else:
apps = []
state = module.params['state']
upgrade = module.params['upgrade_all']
# Run operations on the given app IDs
for app in sorted(set(apps)):
if state == 'present':
if not mas.is_installed(app):
mas.app_command('install', app)
elif state == 'absent':
if mas.is_installed(app):
# Ensure we are root
if os.getuid() != 0:
module.fail_json(msg="Uninstalling apps requires root permissions ('become: yes')")
mas.app_command('uninstall', app)
elif state == 'latest':
if not mas.is_installed(app):
mas.app_command('install', app)
elif mas.is_outdated(app):
mas.app_command('upgrade', app)
# Upgrade all apps if requested
mas._outdated = None # Clear cache
if upgrade and mas.outdated():
mas.upgrade_all()
# Exit with the collected data
mas.exit()
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "8b965fd91396735e0153390b4eff540d3aac3aff",
"index": 4916,
"step-1": "<mask token>\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None\n self._outdated = None\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {'changed': False}\n self.check_mas_tool()\n\n def app_command(self, command, id):\n \"\"\" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' \"\"\"\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(msg=\n \"Error running command '{0}' on app '{1}': {2}\".format(\n command, str(id), out.rstrip()))\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n \"\"\" Verifies that the `mas` tool is available in a recent version \"\"\"\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()\n ) < StrictVersion('1.5.0'):\n self.module.fail_json(msg=\n '`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n \"\"\" Verifies that the user is signed in to the Mac App Store \"\"\"\n if self._checked_signin:\n return\n rc, out, err = self.run(['account'])\n if out.split('\\n', 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg=\n 'You must be signed in to the Mac App Store')\n self._checked_signin = True\n\n def exit(self):\n \"\"\" Exit with the data we have collected over time \"\"\"\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n \"\"\" Returns the list of all app IDs; command can either be 'list' or 'outdated' \"\"\"\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split('\\n')\n if rows[0] == 'No installed apps found':\n rows = []\n apps = []\n for r in rows:\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n return apps\n\n def installed(self):\n \"\"\" Returns the list of installed apps \"\"\"\n if self._installed is None:\n self._installed = self.get_current_state('list')\n return self._installed\n\n def is_installed(self, id):\n \"\"\" Checks whether the given app is installed \"\"\"\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n \"\"\" Checks whether the given app is installed, but outdated \"\"\"\n return int(id) in self.outdated()\n\n def outdated(self):\n \"\"\" Returns the list of installed, but outdated apps \"\"\"\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n return self._outdated\n\n def run(self, cmd):\n \"\"\" Runs a command of the `mas` tool \"\"\"\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n \"\"\" Upgrades all installed apps and sets the correct result data \"\"\"\n outdated = self.outdated()\n if not self.module.check_mode:\n self.check_signin()\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' +\n out.rstrip())\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements\n ='int'), state=dict(type='str', default='present', choices=[\n 'absent', 'latest', 'present']), upgrade_all=dict(type='bool',\n default=False, aliases=['upgrade'])), supports_check_mode=True)\n mas = Mas(module)\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif state == 'absent':\n if mas.is_installed(app):\n if os.getuid() != 0:\n module.fail_json(msg=\n \"Uninstalling apps requires root permissions ('become: yes')\"\n )\n mas.app_command('uninstall', app)\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n mas._outdated = None\n if upgrade and mas.outdated():\n mas.upgrade_all()\n mas.exit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None\n self._outdated = None\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {'changed': False}\n self.check_mas_tool()\n\n def app_command(self, command, id):\n \"\"\" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' \"\"\"\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(msg=\n \"Error running command '{0}' on app '{1}': {2}\".format(\n command, str(id), out.rstrip()))\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n \"\"\" Verifies that the `mas` tool is available in a recent version \"\"\"\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()\n ) < StrictVersion('1.5.0'):\n self.module.fail_json(msg=\n '`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n \"\"\" Verifies that the user is signed in to the Mac App Store \"\"\"\n if self._checked_signin:\n return\n rc, out, err = self.run(['account'])\n if out.split('\\n', 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg=\n 'You must be signed in to the Mac App Store')\n self._checked_signin = True\n\n def exit(self):\n \"\"\" Exit with the data we have collected over time \"\"\"\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n \"\"\" Returns the list of all app IDs; command can either be 'list' or 'outdated' \"\"\"\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split('\\n')\n if rows[0] == 'No installed apps found':\n rows = []\n apps = []\n for r in rows:\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n return apps\n\n def installed(self):\n \"\"\" Returns the list of installed apps \"\"\"\n if self._installed is None:\n self._installed = self.get_current_state('list')\n return self._installed\n\n def is_installed(self, id):\n \"\"\" Checks whether the given app is installed \"\"\"\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n \"\"\" Checks whether the given app is installed, but outdated \"\"\"\n return int(id) in self.outdated()\n\n def outdated(self):\n \"\"\" Returns the list of installed, but outdated apps \"\"\"\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n return self._outdated\n\n def run(self, cmd):\n \"\"\" Runs a command of the `mas` tool \"\"\"\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n \"\"\" Upgrades all installed apps and sets the correct result data \"\"\"\n outdated = self.outdated()\n if not self.module.check_mode:\n self.check_signin()\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' +\n out.rstrip())\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements\n ='int'), state=dict(type='str', default='present', choices=[\n 'absent', 'latest', 'present']), upgrade_all=dict(type='bool',\n default=False, aliases=['upgrade'])), supports_check_mode=True)\n mas = Mas(module)\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif state == 'absent':\n if mas.is_installed(app):\n if os.getuid() != 0:\n module.fail_json(msg=\n \"Uninstalling apps requires root permissions ('become: yes')\"\n )\n mas.app_command('uninstall', app)\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n mas._outdated = None\n if upgrade and mas.outdated():\n mas.upgrade_all()\n mas.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\n__metaclass__ = type\nDOCUMENTATION = \"\"\"\nmodule: mas\nshort_description: Manage Mac App Store applications with mas-cli\ndescription:\n - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).\nversion_added: '0.2.0'\nauthor:\n - Michael Heap (@mheap)\n - Lukas Bestle (@lukasbestle)\noptions:\n id:\n description:\n - The Mac App Store identifier of the app(s) you want to manage.\n - This can be found by running C(mas search APP_NAME) on your machine.\n type: list\n elements: int\n state:\n description:\n - Desired state of the app installation.\n - The C(absent) value requires root permissions, also see the examples.\n type: str\n choices:\n - absent\n - latest\n - present\n default: present\n upgrade_all:\n description:\n - Upgrade all installed Mac App Store apps.\n type: bool\n default: \"no\"\n aliases: [\"upgrade\"]\nrequirements:\n - macOS 10.11+\n - \"mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path\"\n - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).\nnotes:\n - This module supports C(check_mode).\n\"\"\"\nEXAMPLES = \"\"\"\n- name: Install Keynote\n community.general.mas:\n id: 409183694\n state: present\n\n- name: Install Divvy with command mas installed in /usr/local/bin\n community.general.mas:\n id: 413857545\n state: present\n environment:\n PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}\n\n- name: Install a list of apps\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n\n- name: Ensure the latest Keynote version is installed\n community.general.mas:\n id: 409183694\n state: latest\n\n- name: Upgrade all installed Mac App Store apps\n community.general.mas:\n upgrade_all: yes\n\n- name: Install specific apps and also upgrade all others\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n upgrade_all: yes\n\n- name: Uninstall Divvy\n community.general.mas:\n id: 413857545\n state: absent\n become: yes # Uninstallation requires root permissions\n\"\"\"\nRETURN = ' # '\n<mask token>\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None\n self._outdated = None\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {'changed': False}\n self.check_mas_tool()\n\n def app_command(self, command, id):\n \"\"\" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' \"\"\"\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(msg=\n \"Error running command '{0}' on app '{1}': {2}\".format(\n command, str(id), out.rstrip()))\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n \"\"\" Verifies that the `mas` tool is available in a recent version \"\"\"\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()\n ) < StrictVersion('1.5.0'):\n self.module.fail_json(msg=\n '`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n \"\"\" Verifies that the user is signed in to the Mac App Store \"\"\"\n if self._checked_signin:\n return\n rc, out, err = self.run(['account'])\n if out.split('\\n', 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg=\n 'You must be signed in to the Mac App Store')\n self._checked_signin = True\n\n def exit(self):\n \"\"\" Exit with the data we have collected over time \"\"\"\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n \"\"\" Returns the list of all app IDs; command can either be 'list' or 'outdated' \"\"\"\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split('\\n')\n if rows[0] == 'No installed apps found':\n rows = []\n apps = []\n for r in rows:\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n return apps\n\n def installed(self):\n \"\"\" Returns the list of installed apps \"\"\"\n if self._installed is None:\n self._installed = self.get_current_state('list')\n return self._installed\n\n def is_installed(self, id):\n \"\"\" Checks whether the given app is installed \"\"\"\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n \"\"\" Checks whether the given app is installed, but outdated \"\"\"\n return int(id) in self.outdated()\n\n def outdated(self):\n \"\"\" Returns the list of installed, but outdated apps \"\"\"\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n return self._outdated\n\n def run(self, cmd):\n \"\"\" Runs a command of the `mas` tool \"\"\"\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n \"\"\" Upgrades all installed apps and sets the correct result data \"\"\"\n outdated = self.outdated()\n if not self.module.check_mode:\n self.check_signin()\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' +\n out.rstrip())\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements\n ='int'), state=dict(type='str', default='present', choices=[\n 'absent', 'latest', 'present']), upgrade_all=dict(type='bool',\n default=False, aliases=['upgrade'])), supports_check_mode=True)\n mas = Mas(module)\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif state == 'absent':\n if mas.is_installed(app):\n if os.getuid() != 0:\n module.fail_json(msg=\n \"Uninstalling apps requires root permissions ('become: yes')\"\n )\n mas.app_command('uninstall', app)\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n mas._outdated = None\n if upgrade and mas.outdated():\n mas.upgrade_all()\n mas.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from __future__ import absolute_import, division, print_function\n__metaclass__ = type\nDOCUMENTATION = \"\"\"\nmodule: mas\nshort_description: Manage Mac App Store applications with mas-cli\ndescription:\n - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).\nversion_added: '0.2.0'\nauthor:\n - Michael Heap (@mheap)\n - Lukas Bestle (@lukasbestle)\noptions:\n id:\n description:\n - The Mac App Store identifier of the app(s) you want to manage.\n - This can be found by running C(mas search APP_NAME) on your machine.\n type: list\n elements: int\n state:\n description:\n - Desired state of the app installation.\n - The C(absent) value requires root permissions, also see the examples.\n type: str\n choices:\n - absent\n - latest\n - present\n default: present\n upgrade_all:\n description:\n - Upgrade all installed Mac App Store apps.\n type: bool\n default: \"no\"\n aliases: [\"upgrade\"]\nrequirements:\n - macOS 10.11+\n - \"mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path\"\n - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).\nnotes:\n - This module supports C(check_mode).\n\"\"\"\nEXAMPLES = \"\"\"\n- name: Install Keynote\n community.general.mas:\n id: 409183694\n state: present\n\n- name: Install Divvy with command mas installed in /usr/local/bin\n community.general.mas:\n id: 413857545\n state: present\n environment:\n PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}\n\n- name: Install a list of apps\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n\n- name: Ensure the latest Keynote version is installed\n community.general.mas:\n id: 409183694\n state: latest\n\n- name: Upgrade all installed Mac App Store apps\n community.general.mas:\n upgrade_all: yes\n\n- name: Install specific apps and also upgrade all others\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n upgrade_all: yes\n\n- name: Uninstall Divvy\n community.general.mas:\n id: 413857545\n state: absent\n become: yes # Uninstallation requires root permissions\n\"\"\"\nRETURN = ' # '\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\nfrom distutils.version import StrictVersion\nimport os\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None\n self._outdated = None\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {'changed': False}\n self.check_mas_tool()\n\n def app_command(self, command, id):\n \"\"\" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' \"\"\"\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(msg=\n \"Error running command '{0}' on app '{1}': {2}\".format(\n command, str(id), out.rstrip()))\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n \"\"\" Verifies that the `mas` tool is available in a recent version \"\"\"\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()\n ) < StrictVersion('1.5.0'):\n self.module.fail_json(msg=\n '`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n \"\"\" Verifies that the user is signed in to the Mac App Store \"\"\"\n if self._checked_signin:\n return\n rc, out, err = self.run(['account'])\n if out.split('\\n', 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg=\n 'You must be signed in to the Mac App Store')\n self._checked_signin = True\n\n def exit(self):\n \"\"\" Exit with the data we have collected over time \"\"\"\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n \"\"\" Returns the list of all app IDs; command can either be 'list' or 'outdated' \"\"\"\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split('\\n')\n if rows[0] == 'No installed apps found':\n rows = []\n apps = []\n for r in rows:\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n return apps\n\n def installed(self):\n \"\"\" Returns the list of installed apps \"\"\"\n if self._installed is None:\n self._installed = self.get_current_state('list')\n return self._installed\n\n def is_installed(self, id):\n \"\"\" Checks whether the given app is installed \"\"\"\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n \"\"\" Checks whether the given app is installed, but outdated \"\"\"\n return int(id) in self.outdated()\n\n def outdated(self):\n \"\"\" Returns the list of installed, but outdated apps \"\"\"\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n return self._outdated\n\n def run(self, cmd):\n \"\"\" Runs a command of the `mas` tool \"\"\"\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n \"\"\" Upgrades all installed apps and sets the correct result data \"\"\"\n outdated = self.outdated()\n if not self.module.check_mode:\n self.check_signin()\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' +\n out.rstrip())\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements\n ='int'), state=dict(type='str', default='present', choices=[\n 'absent', 'latest', 'present']), upgrade_all=dict(type='bool',\n default=False, aliases=['upgrade'])), supports_check_mode=True)\n mas = Mas(module)\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif state == 'absent':\n if mas.is_installed(app):\n if os.getuid() != 0:\n module.fail_json(msg=\n \"Uninstalling apps requires root permissions ('become: yes')\"\n )\n mas.app_command('uninstall', app)\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n mas._outdated = None\n if upgrade and mas.outdated():\n mas.upgrade_all()\n mas.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2020, Lukas Bestle <project-ansible@lukasbestle.com>\n# Copyright: (c) 2017, Michael Heap <m@michaelheap.com>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\nmodule: mas\nshort_description: Manage Mac App Store applications with mas-cli\ndescription:\n - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).\nversion_added: '0.2.0'\nauthor:\n - Michael Heap (@mheap)\n - Lukas Bestle (@lukasbestle)\noptions:\n id:\n description:\n - The Mac App Store identifier of the app(s) you want to manage.\n - This can be found by running C(mas search APP_NAME) on your machine.\n type: list\n elements: int\n state:\n description:\n - Desired state of the app installation.\n - The C(absent) value requires root permissions, also see the examples.\n type: str\n choices:\n - absent\n - latest\n - present\n default: present\n upgrade_all:\n description:\n - Upgrade all installed Mac App Store apps.\n type: bool\n default: \"no\"\n aliases: [\"upgrade\"]\nrequirements:\n - macOS 10.11+\n - \"mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path\"\n - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).\nnotes:\n - This module supports C(check_mode).\n'''\n\nEXAMPLES = '''\n- name: Install Keynote\n community.general.mas:\n id: 409183694\n state: present\n\n- name: Install Divvy with command mas installed in /usr/local/bin\n community.general.mas:\n id: 413857545\n state: present\n environment:\n PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}\n\n- name: Install a list of apps\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n\n- name: Ensure the latest Keynote version is installed\n community.general.mas:\n id: 409183694\n state: latest\n\n- name: Upgrade all installed Mac App Store apps\n community.general.mas:\n upgrade_all: yes\n\n- name: Install specific apps and also upgrade all others\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n upgrade_all: yes\n\n- name: Uninstall Divvy\n community.general.mas:\n id: 413857545\n state: absent\n become: yes # Uninstallation requires root permissions\n'''\n\nRETURN = r''' # '''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\nfrom distutils.version import StrictVersion\nimport os\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n\n # Initialize data properties\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None # Populated only if needed\n self._outdated = None # Populated only if needed\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {\n 'changed': False\n }\n\n self.check_mas_tool()\n\n def app_command(self, command, id):\n ''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' '''\n\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(\n msg=\"Error running command '{0}' on app '{1}': {2}\".format(command, str(id), out.rstrip())\n )\n\n # No error or dry run\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n ''' Verifies that the `mas` tool is available in a recent version '''\n\n # Is the `mas` tool available at all?\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n\n # Is the version recent enough?\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()) < StrictVersion('1.5.0'):\n self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n ''' Verifies that the user is signed in to the Mac App Store '''\n\n # Only check this once per execution\n if self._checked_signin:\n return\n\n rc, out, err = self.run(['account'])\n if out.split(\"\\n\", 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg='You must be signed in to the Mac App Store')\n\n self._checked_signin = True\n\n def exit(self):\n ''' Exit with the data we have collected over time '''\n\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n ''' Returns the list of all app IDs; command can either be 'list' or 'outdated' '''\n\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split(\"\\n\")\n if rows[0] == \"No installed apps found\":\n rows = []\n apps = []\n for r in rows:\n # Format: \"123456789 App Name\"\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n\n return apps\n\n def installed(self):\n ''' Returns the list of installed apps '''\n\n # Populate cache if not already done\n if self._installed is None:\n self._installed = self.get_current_state('list')\n\n return self._installed\n\n def is_installed(self, id):\n ''' Checks whether the given app is installed '''\n\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n ''' Checks whether the given app is installed, but outdated '''\n\n return int(id) in self.outdated()\n\n def outdated(self):\n ''' Returns the list of installed, but outdated apps '''\n\n # Populate cache if not already done\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n\n return self._outdated\n\n def run(self, cmd):\n ''' Runs a command of the `mas` tool '''\n\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n ''' Upgrades all installed apps and sets the correct result data '''\n\n outdated = self.outdated()\n\n if not self.module.check_mode:\n self.check_signin()\n\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip())\n\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n id=dict(type='list', elements='int'),\n state=dict(type='str', default='present', choices=['absent', 'latest', 'present']),\n upgrade_all=dict(type='bool', default=False, aliases=['upgrade']),\n ),\n supports_check_mode=True\n )\n mas = Mas(module)\n\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n\n # Run operations on the given app IDs\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n\n elif state == 'absent':\n if mas.is_installed(app):\n # Ensure we are root\n if os.getuid() != 0:\n module.fail_json(msg=\"Uninstalling apps requires root permissions ('become: yes')\")\n\n mas.app_command('uninstall', app)\n\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n\n # Upgrade all apps if requested\n mas._outdated = None # Clear cache\n if upgrade and mas.outdated():\n mas.upgrade_all()\n\n # Exit with the collected data\n mas.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
14,
15,
16,
17,
18
]
}
|
[
14,
15,
16,
17,
18
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
aws_glue_integration_tests += deployment_patterns
<|reserved_special_token_1|>
<|reserved_special_token_0|>
aws_glue_integration_tests = []
deployment_patterns = [IntegrationTestFixture(name=
'how_to_use_great_expectations_in_aws_glue', user_flow_script=
'tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py'
, backend_dependencies=[BackendDependencies.SPARK, BackendDependencies.
AWS, BackendDependencies.AWS_GLUE])]
aws_glue_integration_tests += deployment_patterns
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from tests.integration.backend_dependencies import BackendDependencies
from tests.integration.integration_test_fixture import IntegrationTestFixture
aws_glue_integration_tests = []
deployment_patterns = [IntegrationTestFixture(name=
'how_to_use_great_expectations_in_aws_glue', user_flow_script=
'tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py'
, backend_dependencies=[BackendDependencies.SPARK, BackendDependencies.
AWS, BackendDependencies.AWS_GLUE])]
aws_glue_integration_tests += deployment_patterns
<|reserved_special_token_1|>
"""Note: AWS Glue split from spark since it requires different test dependencies."""
from tests.integration.backend_dependencies import BackendDependencies
from tests.integration.integration_test_fixture import IntegrationTestFixture
aws_glue_integration_tests = []
deployment_patterns = [
# TODO: The AWS_GLUE dependency is only being marked and not run at this time.
IntegrationTestFixture(
name="how_to_use_great_expectations_in_aws_glue",
user_flow_script="tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py",
backend_dependencies=[
BackendDependencies.SPARK,
BackendDependencies.AWS,
BackendDependencies.AWS_GLUE,
],
),
]
aws_glue_integration_tests += deployment_patterns
|
flexible
|
{
"blob_id": "e288403cb310bb7241b25e74d1b5bcc63967128c",
"index": 1031,
"step-1": "<mask token>\n",
"step-2": "<mask token>\naws_glue_integration_tests += deployment_patterns\n",
"step-3": "<mask token>\naws_glue_integration_tests = []\ndeployment_patterns = [IntegrationTestFixture(name=\n 'how_to_use_great_expectations_in_aws_glue', user_flow_script=\n 'tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py'\n , backend_dependencies=[BackendDependencies.SPARK, BackendDependencies.\n AWS, BackendDependencies.AWS_GLUE])]\naws_glue_integration_tests += deployment_patterns\n",
"step-4": "<mask token>\nfrom tests.integration.backend_dependencies import BackendDependencies\nfrom tests.integration.integration_test_fixture import IntegrationTestFixture\naws_glue_integration_tests = []\ndeployment_patterns = [IntegrationTestFixture(name=\n 'how_to_use_great_expectations_in_aws_glue', user_flow_script=\n 'tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py'\n , backend_dependencies=[BackendDependencies.SPARK, BackendDependencies.\n AWS, BackendDependencies.AWS_GLUE])]\naws_glue_integration_tests += deployment_patterns\n",
"step-5": "\"\"\"Note: AWS Glue split from spark since it requires different test dependencies.\"\"\"\nfrom tests.integration.backend_dependencies import BackendDependencies\nfrom tests.integration.integration_test_fixture import IntegrationTestFixture\n\naws_glue_integration_tests = []\n\ndeployment_patterns = [\n # TODO: The AWS_GLUE dependency is only being marked and not run at this time.\n IntegrationTestFixture(\n name=\"how_to_use_great_expectations_in_aws_glue\",\n user_flow_script=\"tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py\",\n backend_dependencies=[\n BackendDependencies.SPARK,\n BackendDependencies.AWS,\n BackendDependencies.AWS_GLUE,\n ],\n ),\n]\n\naws_glue_integration_tests += deployment_patterns\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Layer:
def __init__(self, m1, m2, f=T.nnet.relu, use_bias=True, zeros=False):
if zeros:
w = np.zeros((m1, m2))
else:
w = np.random.randn(m1, m2) * np.sqrt(2 / m1)
self.w = theano.shared(w)
self.params = [self.w]
self.use_bias = use_bias
if use_bias:
self.b = theano.shared(np.zeros(m2))
self.params += [self.b]
self.f = f
<|reserved_special_token_0|>
class Policy:
def __init__(self, ft, D, layer_sizes_mean=[], layer_sizes_var=[]):
self.ft = ft
self.D = D
self.layer_sizes_mean = layer_sizes_mean
self.layer_sizes_var = layer_sizes_var
self.mean_layers = []
m1 = D
for m2 in layer_sizes_mean:
layer = Layer(m1, m2)
self.mean_layers.append(layer)
m1 = m2
layer = Layer(m1, 1, lambda x: x, use_bias=False, zeros=True)
self.mean_layers.append(layer)
self.var_layers = []
M1 = D
for M2 in layer_sizes_var:
layer = Layer(m1, m2)
self.var_layers.append(layer)
m1 = m2
layer = Layer(m1, 1, T.nnet.softplus, use_bias=False, zeros=False)
self.var_layers.append(layer)
params = []
for layer in (self.mean_layers + self.var_layers):
params += layer.params
self.params = params
x = T.matrix('x')
actions = T.vector('actions')
advantages = T.vector('advantages')
def get_output(layers):
z = x
for layer in layers:
z = layer.forward(z)
return z.flatten()
mean = get_output(self.mean_layers)
var = get_output(self.var_layers) + 0.0001
self.predict_ = theano.function(inputs=[x], outputs=[mean, var],
allow_input_downcast=True)
def predict(self, x):
x = np.atleast_2d(x)
x = self.ft.transform(x)
return self.predict_(x)
def sample_action(self, x):
pred = self.predict(x)
mu = pred[0][0]
v = pred[1][0]
a = np.random.randn() * np.sqrt(v) + mu
return min(max(a, -1), 1)
def copy(self):
clone = Policy(self.ft, self.D, self.layer_sizes_mean, self.
layer_sizes_mean)
clone.copy_from(self)
return clone
def copy_from(self, other):
for p, q in zip(self.params, other.params):
v = q.get_value()
p.set_value(v)
def perturb_params(self):
for p in self.params:
v = p.get_value()
noise = np.random.randn(*v.shape) / np.sqrt(v.shape[0]) * 5.0
if np.random.random() < 0.1:
p.set_value(noise)
else:
p.set_value(v + noise)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Layer:
def __init__(self, m1, m2, f=T.nnet.relu, use_bias=True, zeros=False):
if zeros:
w = np.zeros((m1, m2))
else:
w = np.random.randn(m1, m2) * np.sqrt(2 / m1)
self.w = theano.shared(w)
self.params = [self.w]
self.use_bias = use_bias
if use_bias:
self.b = theano.shared(np.zeros(m2))
self.params += [self.b]
self.f = f
def forward(self, x):
if self.use_bias:
a = x.dot(self.w) + self.b
else:
a = x.dot(self.w)
return self.f(a)
class Policy:
def __init__(self, ft, D, layer_sizes_mean=[], layer_sizes_var=[]):
self.ft = ft
self.D = D
self.layer_sizes_mean = layer_sizes_mean
self.layer_sizes_var = layer_sizes_var
self.mean_layers = []
m1 = D
for m2 in layer_sizes_mean:
layer = Layer(m1, m2)
self.mean_layers.append(layer)
m1 = m2
layer = Layer(m1, 1, lambda x: x, use_bias=False, zeros=True)
self.mean_layers.append(layer)
self.var_layers = []
M1 = D
for M2 in layer_sizes_var:
layer = Layer(m1, m2)
self.var_layers.append(layer)
m1 = m2
layer = Layer(m1, 1, T.nnet.softplus, use_bias=False, zeros=False)
self.var_layers.append(layer)
params = []
for layer in (self.mean_layers + self.var_layers):
params += layer.params
self.params = params
x = T.matrix('x')
actions = T.vector('actions')
advantages = T.vector('advantages')
def get_output(layers):
z = x
for layer in layers:
z = layer.forward(z)
return z.flatten()
mean = get_output(self.mean_layers)
var = get_output(self.var_layers) + 0.0001
self.predict_ = theano.function(inputs=[x], outputs=[mean, var],
allow_input_downcast=True)
def predict(self, x):
x = np.atleast_2d(x)
x = self.ft.transform(x)
return self.predict_(x)
def sample_action(self, x):
pred = self.predict(x)
mu = pred[0][0]
v = pred[1][0]
a = np.random.randn() * np.sqrt(v) + mu
return min(max(a, -1), 1)
def copy(self):
clone = Policy(self.ft, self.D, self.layer_sizes_mean, self.
layer_sizes_mean)
clone.copy_from(self)
return clone
def copy_from(self, other):
for p, q in zip(self.params, other.params):
v = q.get_value()
p.set_value(v)
def perturb_params(self):
for p in self.params:
v = p.get_value()
noise = np.random.randn(*v.shape) / np.sqrt(v.shape[0]) * 5.0
if np.random.random() < 0.1:
p.set_value(noise)
else:
p.set_value(v + noise)
<|reserved_special_token_0|>
def series(env, T, policy, gamma, print_iters=False):
total_rewards = np.empty(T)
for i in range(T):
total_rewards[i] = episode(env, policy, gamma)
if print_iters:
print(i, 'Average so far:', total_rewards[:i + 1].mean())
avg_totalrewards = total_rewards.mean()
print('Average total rewards:', avg_totalrewards)
return avg_totalrewards
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Layer:
def __init__(self, m1, m2, f=T.nnet.relu, use_bias=True, zeros=False):
if zeros:
w = np.zeros((m1, m2))
else:
w = np.random.randn(m1, m2) * np.sqrt(2 / m1)
self.w = theano.shared(w)
self.params = [self.w]
self.use_bias = use_bias
if use_bias:
self.b = theano.shared(np.zeros(m2))
self.params += [self.b]
self.f = f
def forward(self, x):
if self.use_bias:
a = x.dot(self.w) + self.b
else:
a = x.dot(self.w)
return self.f(a)
class Policy:
def __init__(self, ft, D, layer_sizes_mean=[], layer_sizes_var=[]):
self.ft = ft
self.D = D
self.layer_sizes_mean = layer_sizes_mean
self.layer_sizes_var = layer_sizes_var
self.mean_layers = []
m1 = D
for m2 in layer_sizes_mean:
layer = Layer(m1, m2)
self.mean_layers.append(layer)
m1 = m2
layer = Layer(m1, 1, lambda x: x, use_bias=False, zeros=True)
self.mean_layers.append(layer)
self.var_layers = []
M1 = D
for M2 in layer_sizes_var:
layer = Layer(m1, m2)
self.var_layers.append(layer)
m1 = m2
layer = Layer(m1, 1, T.nnet.softplus, use_bias=False, zeros=False)
self.var_layers.append(layer)
params = []
for layer in (self.mean_layers + self.var_layers):
params += layer.params
self.params = params
x = T.matrix('x')
actions = T.vector('actions')
advantages = T.vector('advantages')
def get_output(layers):
z = x
for layer in layers:
z = layer.forward(z)
return z.flatten()
mean = get_output(self.mean_layers)
var = get_output(self.var_layers) + 0.0001
self.predict_ = theano.function(inputs=[x], outputs=[mean, var],
allow_input_downcast=True)
def predict(self, x):
x = np.atleast_2d(x)
x = self.ft.transform(x)
return self.predict_(x)
def sample_action(self, x):
pred = self.predict(x)
mu = pred[0][0]
v = pred[1][0]
a = np.random.randn() * np.sqrt(v) + mu
return min(max(a, -1), 1)
def copy(self):
clone = Policy(self.ft, self.D, self.layer_sizes_mean, self.
layer_sizes_mean)
clone.copy_from(self)
return clone
def copy_from(self, other):
for p, q in zip(self.params, other.params):
v = q.get_value()
p.set_value(v)
def perturb_params(self):
for p in self.params:
v = p.get_value()
noise = np.random.randn(*v.shape) / np.sqrt(v.shape[0]) * 5.0
if np.random.random() < 0.1:
p.set_value(noise)
else:
p.set_value(v + noise)
<|reserved_special_token_0|>
def series(env, T, policy, gamma, print_iters=False):
total_rewards = np.empty(T)
for i in range(T):
total_rewards[i] = episode(env, policy, gamma)
if print_iters:
print(i, 'Average so far:', total_rewards[:i + 1].mean())
avg_totalrewards = total_rewards.mean()
print('Average total rewards:', avg_totalrewards)
return avg_totalrewards
<|reserved_special_token_0|>
def main():
env = gym.make('MountainCarContinuous-v0')
ft = Transformer(env, n_components=100)
D = ft.dimensions
model = Policy(ft, D, [], [])
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
total_rewards, model = random_search(env, model, gamma)
print('max reward:', np.max(total_rewards))
avg_totalrewards = series(env, 100, model, gamma, print_iters=True)
print('avg reward over 100 episodes with best models:', avg_totalrewards)
plt.plot(total_rewards)
plt.title('Rewards')
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Layer:
def __init__(self, m1, m2, f=T.nnet.relu, use_bias=True, zeros=False):
if zeros:
w = np.zeros((m1, m2))
else:
w = np.random.randn(m1, m2) * np.sqrt(2 / m1)
self.w = theano.shared(w)
self.params = [self.w]
self.use_bias = use_bias
if use_bias:
self.b = theano.shared(np.zeros(m2))
self.params += [self.b]
self.f = f
def forward(self, x):
if self.use_bias:
a = x.dot(self.w) + self.b
else:
a = x.dot(self.w)
return self.f(a)
class Policy:
def __init__(self, ft, D, layer_sizes_mean=[], layer_sizes_var=[]):
self.ft = ft
self.D = D
self.layer_sizes_mean = layer_sizes_mean
self.layer_sizes_var = layer_sizes_var
self.mean_layers = []
m1 = D
for m2 in layer_sizes_mean:
layer = Layer(m1, m2)
self.mean_layers.append(layer)
m1 = m2
layer = Layer(m1, 1, lambda x: x, use_bias=False, zeros=True)
self.mean_layers.append(layer)
self.var_layers = []
M1 = D
for M2 in layer_sizes_var:
layer = Layer(m1, m2)
self.var_layers.append(layer)
m1 = m2
layer = Layer(m1, 1, T.nnet.softplus, use_bias=False, zeros=False)
self.var_layers.append(layer)
params = []
for layer in (self.mean_layers + self.var_layers):
params += layer.params
self.params = params
x = T.matrix('x')
actions = T.vector('actions')
advantages = T.vector('advantages')
def get_output(layers):
z = x
for layer in layers:
z = layer.forward(z)
return z.flatten()
mean = get_output(self.mean_layers)
var = get_output(self.var_layers) + 0.0001
self.predict_ = theano.function(inputs=[x], outputs=[mean, var],
allow_input_downcast=True)
def predict(self, x):
x = np.atleast_2d(x)
x = self.ft.transform(x)
return self.predict_(x)
def sample_action(self, x):
pred = self.predict(x)
mu = pred[0][0]
v = pred[1][0]
a = np.random.randn() * np.sqrt(v) + mu
return min(max(a, -1), 1)
def copy(self):
clone = Policy(self.ft, self.D, self.layer_sizes_mean, self.
layer_sizes_mean)
clone.copy_from(self)
return clone
def copy_from(self, other):
for p, q in zip(self.params, other.params):
v = q.get_value()
p.set_value(v)
def perturb_params(self):
for p in self.params:
v = p.get_value()
noise = np.random.randn(*v.shape) / np.sqrt(v.shape[0]) * 5.0
if np.random.random() < 0.1:
p.set_value(noise)
else:
p.set_value(v + noise)
def episode(env, policy, gamma):
observation = env.reset()
done = False
total_reward = 0
iterations = 0
while not done and iterations < 2000:
action = policy.sample_action(observation)
observation, reward, done, info = env.step([action])
total_reward += reward
iterations += 1
return total_reward
def series(env, T, policy, gamma, print_iters=False):
total_rewards = np.empty(T)
for i in range(T):
total_rewards[i] = episode(env, policy, gamma)
if print_iters:
print(i, 'Average so far:', total_rewards[:i + 1].mean())
avg_totalrewards = total_rewards.mean()
print('Average total rewards:', avg_totalrewards)
return avg_totalrewards
<|reserved_special_token_0|>
def main():
env = gym.make('MountainCarContinuous-v0')
ft = Transformer(env, n_components=100)
D = ft.dimensions
model = Policy(ft, D, [], [])
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
total_rewards, model = random_search(env, model, gamma)
print('max reward:', np.max(total_rewards))
avg_totalrewards = series(env, 100, model, gamma, print_iters=True)
print('avg reward over 100 episodes with best models:', avg_totalrewards)
plt.plot(total_rewards)
plt.title('Rewards')
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import gym
import os
import sys
import numpy as np
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
from gym import wrappers
from datetime import datetime
from mountain_car_v1_q_learning import Transformer
# so you can test different architectures
class Layer:
def __init__(self, m1, m2, f=T.nnet.relu, use_bias=True, zeros=False):
if zeros:
w = np.zeros((m1, m2))
else:
w = np.random.randn(m1, m2) * np.sqrt(2 / m1)
self.w = theano.shared(w)
self.params = [self.w]
self.use_bias = use_bias
if use_bias:
self.b = theano.shared(np.zeros(m2))
self.params += [self.b]
self.f = f
def forward(self, x):
if self.use_bias:
a = x.dot(self.w) + self.b
else:
a = x.dot(self.w)
return self.f(a)
# approximates pi(a | s)
class Policy:
def __init__(self, ft, D, layer_sizes_mean=[], layer_sizes_var=[]):
# save inputs for copy
self.ft = ft
self.D = D
self.layer_sizes_mean = layer_sizes_mean
self.layer_sizes_var = layer_sizes_var
##### model the mean #####
self.mean_layers = []
m1 = D
for m2 in layer_sizes_mean:
layer = Layer(m1, m2)
self.mean_layers.append(layer)
m1 = m2
# final layer
layer = Layer(m1, 1, lambda x: x, use_bias=False, zeros=True)
self.mean_layers.append(layer)
##### model the variance #####
self.var_layers = []
M1 = D
for M2 in layer_sizes_var:
layer = Layer(m1, m2)
self.var_layers.append(layer)
m1 = m2
# final layer
layer = Layer(m1, 1, T.nnet.softplus, use_bias=False, zeros=False)
self.var_layers.append(layer)
# get all params for gradient
params = []
for layer in (self.mean_layers + self.var_layers):
params += layer.params
self.params = params
# inputs and targets
x = T.matrix('x')
actions = T.vector('actions')
advantages = T.vector('advantages')
# calculate output and cost
def get_output(layers):
z = x
for layer in layers:
z = layer.forward(z)
return z.flatten()
mean = get_output(self.mean_layers)
var = get_output(self.var_layers) + 1e-4 # smoothing
self.predict_ = theano.function(
inputs=[x],
outputs=[mean, var],
allow_input_downcast=True
)
def predict(self, x):
x = np.atleast_2d(x)
x = self.ft.transform(x)
return self.predict_(x)
def sample_action(self, x):
pred = self.predict(x)
mu = pred[0][0]
v = pred[1][0]
a = np.random.randn()*np.sqrt(v) + mu
return min(max(a, -1), 1)
def copy(self):
clone = Policy(self.ft, self.D, self.layer_sizes_mean, self.layer_sizes_mean)
clone.copy_from(self)
return clone
def copy_from(self, other):
# self is being copied from other
for p, q in zip(self.params, other.params):
v = q.get_value()
p.set_value(v)
def perturb_params(self):
for p in self.params:
v = p.get_value()
noise = np.random.randn(*v.shape) / np.sqrt(v.shape[0]) * 5.0
if np.random.random() < 0.1:
# with probability 0.1 start completely from scratch
p.set_value(noise)
else:
p.set_value(v + noise)
def episode(env, policy, gamma):
observation = env.reset()
done = False
total_reward = 0
iterations = 0
while not done and iterations < 2000:
# if we reach 2000, just quit, don't want this going forever
# the 200 limit seems a bit early
action = policy.sample_action(observation)
# oddly, the mountain car environment requires the action to be in
# an object where the actual action is stored in object[0]
observation, reward, done, info = env.step([action])
total_reward += reward
iterations += 1
return total_reward
def series(env, T, policy, gamma, print_iters=False):
total_rewards = np.empty(T)
for i in range(T):
total_rewards[i] = episode(env, policy, gamma)
if print_iters:
print(i, "Average so far:", total_rewards[:(i+1)].mean())
avg_totalrewards = total_rewards.mean()
print("Average total rewards:", avg_totalrewards)
return avg_totalrewards
def random_search(env, policy, gamma):
total_rewards = []
best_avg_totalreward = float('-inf')
best_policy = policy
num_episodes_per_param_test = 3
for t in range(100):
tmp_model = best_policy.copy()
tmp_model.perturb_params()
avg_totalrewards = series(
env,
num_episodes_per_param_test,
tmp_model,
gamma
)
total_rewards.append(avg_totalrewards)
if avg_totalrewards > best_avg_totalreward:
best_policy = tmp_model
best_avg_totalreward = avg_totalrewards
return total_rewards, best_policy
def main():
env = gym.make('MountainCarContinuous-v0')
ft = Transformer(env, n_components=100)
D = ft.dimensions
model = Policy(ft, D, [], [])
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
total_rewards, model = random_search(env, model, gamma)
print("max reward:", np.max(total_rewards))
# play 100 episodes and check the average
avg_totalrewards = series(env, 100, model, gamma, print_iters=True)
print("avg reward over 100 episodes with best models:", avg_totalrewards)
plt.plot(total_rewards)
plt.title("Rewards")
plt.show()
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "63ee25791177ead5389c14990ce6da3e2c11b683",
"index": 6356,
"step-1": "<mask token>\n\n\nclass Layer:\n\n def __init__(self, m1, m2, f=T.nnet.relu, use_bias=True, zeros=False):\n if zeros:\n w = np.zeros((m1, m2))\n else:\n w = np.random.randn(m1, m2) * np.sqrt(2 / m1)\n self.w = theano.shared(w)\n self.params = [self.w]\n self.use_bias = use_bias\n if use_bias:\n self.b = theano.shared(np.zeros(m2))\n self.params += [self.b]\n self.f = f\n <mask token>\n\n\nclass Policy:\n\n def __init__(self, ft, D, layer_sizes_mean=[], layer_sizes_var=[]):\n self.ft = ft\n self.D = D\n self.layer_sizes_mean = layer_sizes_mean\n self.layer_sizes_var = layer_sizes_var\n self.mean_layers = []\n m1 = D\n for m2 in layer_sizes_mean:\n layer = Layer(m1, m2)\n self.mean_layers.append(layer)\n m1 = m2\n layer = Layer(m1, 1, lambda x: x, use_bias=False, zeros=True)\n self.mean_layers.append(layer)\n self.var_layers = []\n M1 = D\n for M2 in layer_sizes_var:\n layer = Layer(m1, m2)\n self.var_layers.append(layer)\n m1 = m2\n layer = Layer(m1, 1, T.nnet.softplus, use_bias=False, zeros=False)\n self.var_layers.append(layer)\n params = []\n for layer in (self.mean_layers + self.var_layers):\n params += layer.params\n self.params = params\n x = T.matrix('x')\n actions = T.vector('actions')\n advantages = T.vector('advantages')\n\n def get_output(layers):\n z = x\n for layer in layers:\n z = layer.forward(z)\n return z.flatten()\n mean = get_output(self.mean_layers)\n var = get_output(self.var_layers) + 0.0001\n self.predict_ = theano.function(inputs=[x], outputs=[mean, var],\n allow_input_downcast=True)\n\n def predict(self, x):\n x = np.atleast_2d(x)\n x = self.ft.transform(x)\n return self.predict_(x)\n\n def sample_action(self, x):\n pred = self.predict(x)\n mu = pred[0][0]\n v = pred[1][0]\n a = np.random.randn() * np.sqrt(v) + mu\n return min(max(a, -1), 1)\n\n def copy(self):\n clone = Policy(self.ft, self.D, self.layer_sizes_mean, self.\n layer_sizes_mean)\n clone.copy_from(self)\n return clone\n\n def copy_from(self, other):\n for p, q in zip(self.params, other.params):\n v = q.get_value()\n p.set_value(v)\n\n def perturb_params(self):\n for p in self.params:\n v = p.get_value()\n noise = np.random.randn(*v.shape) / np.sqrt(v.shape[0]) * 5.0\n if np.random.random() < 0.1:\n p.set_value(noise)\n else:\n p.set_value(v + noise)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Layer:\n\n def __init__(self, m1, m2, f=T.nnet.relu, use_bias=True, zeros=False):\n if zeros:\n w = np.zeros((m1, m2))\n else:\n w = np.random.randn(m1, m2) * np.sqrt(2 / m1)\n self.w = theano.shared(w)\n self.params = [self.w]\n self.use_bias = use_bias\n if use_bias:\n self.b = theano.shared(np.zeros(m2))\n self.params += [self.b]\n self.f = f\n\n def forward(self, x):\n if self.use_bias:\n a = x.dot(self.w) + self.b\n else:\n a = x.dot(self.w)\n return self.f(a)\n\n\nclass Policy:\n\n def __init__(self, ft, D, layer_sizes_mean=[], layer_sizes_var=[]):\n self.ft = ft\n self.D = D\n self.layer_sizes_mean = layer_sizes_mean\n self.layer_sizes_var = layer_sizes_var\n self.mean_layers = []\n m1 = D\n for m2 in layer_sizes_mean:\n layer = Layer(m1, m2)\n self.mean_layers.append(layer)\n m1 = m2\n layer = Layer(m1, 1, lambda x: x, use_bias=False, zeros=True)\n self.mean_layers.append(layer)\n self.var_layers = []\n M1 = D\n for M2 in layer_sizes_var:\n layer = Layer(m1, m2)\n self.var_layers.append(layer)\n m1 = m2\n layer = Layer(m1, 1, T.nnet.softplus, use_bias=False, zeros=False)\n self.var_layers.append(layer)\n params = []\n for layer in (self.mean_layers + self.var_layers):\n params += layer.params\n self.params = params\n x = T.matrix('x')\n actions = T.vector('actions')\n advantages = T.vector('advantages')\n\n def get_output(layers):\n z = x\n for layer in layers:\n z = layer.forward(z)\n return z.flatten()\n mean = get_output(self.mean_layers)\n var = get_output(self.var_layers) + 0.0001\n self.predict_ = theano.function(inputs=[x], outputs=[mean, var],\n allow_input_downcast=True)\n\n def predict(self, x):\n x = np.atleast_2d(x)\n x = self.ft.transform(x)\n return self.predict_(x)\n\n def sample_action(self, x):\n pred = self.predict(x)\n mu = pred[0][0]\n v = pred[1][0]\n a = np.random.randn() * np.sqrt(v) + mu\n return min(max(a, -1), 1)\n\n def copy(self):\n clone = Policy(self.ft, self.D, self.layer_sizes_mean, self.\n layer_sizes_mean)\n clone.copy_from(self)\n return clone\n\n def copy_from(self, other):\n for p, q in zip(self.params, other.params):\n v = q.get_value()\n p.set_value(v)\n\n def perturb_params(self):\n for p in self.params:\n v = p.get_value()\n noise = np.random.randn(*v.shape) / np.sqrt(v.shape[0]) * 5.0\n if np.random.random() < 0.1:\n p.set_value(noise)\n else:\n p.set_value(v + noise)\n\n\n<mask token>\n\n\ndef series(env, T, policy, gamma, print_iters=False):\n total_rewards = np.empty(T)\n for i in range(T):\n total_rewards[i] = episode(env, policy, gamma)\n if print_iters:\n print(i, 'Average so far:', total_rewards[:i + 1].mean())\n avg_totalrewards = total_rewards.mean()\n print('Average total rewards:', avg_totalrewards)\n return avg_totalrewards\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Layer:\n\n def __init__(self, m1, m2, f=T.nnet.relu, use_bias=True, zeros=False):\n if zeros:\n w = np.zeros((m1, m2))\n else:\n w = np.random.randn(m1, m2) * np.sqrt(2 / m1)\n self.w = theano.shared(w)\n self.params = [self.w]\n self.use_bias = use_bias\n if use_bias:\n self.b = theano.shared(np.zeros(m2))\n self.params += [self.b]\n self.f = f\n\n def forward(self, x):\n if self.use_bias:\n a = x.dot(self.w) + self.b\n else:\n a = x.dot(self.w)\n return self.f(a)\n\n\nclass Policy:\n\n def __init__(self, ft, D, layer_sizes_mean=[], layer_sizes_var=[]):\n self.ft = ft\n self.D = D\n self.layer_sizes_mean = layer_sizes_mean\n self.layer_sizes_var = layer_sizes_var\n self.mean_layers = []\n m1 = D\n for m2 in layer_sizes_mean:\n layer = Layer(m1, m2)\n self.mean_layers.append(layer)\n m1 = m2\n layer = Layer(m1, 1, lambda x: x, use_bias=False, zeros=True)\n self.mean_layers.append(layer)\n self.var_layers = []\n M1 = D\n for M2 in layer_sizes_var:\n layer = Layer(m1, m2)\n self.var_layers.append(layer)\n m1 = m2\n layer = Layer(m1, 1, T.nnet.softplus, use_bias=False, zeros=False)\n self.var_layers.append(layer)\n params = []\n for layer in (self.mean_layers + self.var_layers):\n params += layer.params\n self.params = params\n x = T.matrix('x')\n actions = T.vector('actions')\n advantages = T.vector('advantages')\n\n def get_output(layers):\n z = x\n for layer in layers:\n z = layer.forward(z)\n return z.flatten()\n mean = get_output(self.mean_layers)\n var = get_output(self.var_layers) + 0.0001\n self.predict_ = theano.function(inputs=[x], outputs=[mean, var],\n allow_input_downcast=True)\n\n def predict(self, x):\n x = np.atleast_2d(x)\n x = self.ft.transform(x)\n return self.predict_(x)\n\n def sample_action(self, x):\n pred = self.predict(x)\n mu = pred[0][0]\n v = pred[1][0]\n a = np.random.randn() * np.sqrt(v) + mu\n return min(max(a, -1), 1)\n\n def copy(self):\n clone = Policy(self.ft, self.D, self.layer_sizes_mean, self.\n layer_sizes_mean)\n clone.copy_from(self)\n return clone\n\n def copy_from(self, other):\n for p, q in zip(self.params, other.params):\n v = q.get_value()\n p.set_value(v)\n\n def perturb_params(self):\n for p in self.params:\n v = p.get_value()\n noise = np.random.randn(*v.shape) / np.sqrt(v.shape[0]) * 5.0\n if np.random.random() < 0.1:\n p.set_value(noise)\n else:\n p.set_value(v + noise)\n\n\n<mask token>\n\n\ndef series(env, T, policy, gamma, print_iters=False):\n total_rewards = np.empty(T)\n for i in range(T):\n total_rewards[i] = episode(env, policy, gamma)\n if print_iters:\n print(i, 'Average so far:', total_rewards[:i + 1].mean())\n avg_totalrewards = total_rewards.mean()\n print('Average total rewards:', avg_totalrewards)\n return avg_totalrewards\n\n\n<mask token>\n\n\ndef main():\n env = gym.make('MountainCarContinuous-v0')\n ft = Transformer(env, n_components=100)\n D = ft.dimensions\n model = Policy(ft, D, [], [])\n gamma = 0.99\n if 'monitor' in sys.argv:\n filename = os.path.basename(__file__).split('.')[0]\n monitor_dir = './' + filename + '_' + str(datetime.now())\n env = wrappers.Monitor(env, monitor_dir)\n total_rewards, model = random_search(env, model, gamma)\n print('max reward:', np.max(total_rewards))\n avg_totalrewards = series(env, 100, model, gamma, print_iters=True)\n print('avg reward over 100 episodes with best models:', avg_totalrewards)\n plt.plot(total_rewards)\n plt.title('Rewards')\n plt.show()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Layer:\n\n def __init__(self, m1, m2, f=T.nnet.relu, use_bias=True, zeros=False):\n if zeros:\n w = np.zeros((m1, m2))\n else:\n w = np.random.randn(m1, m2) * np.sqrt(2 / m1)\n self.w = theano.shared(w)\n self.params = [self.w]\n self.use_bias = use_bias\n if use_bias:\n self.b = theano.shared(np.zeros(m2))\n self.params += [self.b]\n self.f = f\n\n def forward(self, x):\n if self.use_bias:\n a = x.dot(self.w) + self.b\n else:\n a = x.dot(self.w)\n return self.f(a)\n\n\nclass Policy:\n\n def __init__(self, ft, D, layer_sizes_mean=[], layer_sizes_var=[]):\n self.ft = ft\n self.D = D\n self.layer_sizes_mean = layer_sizes_mean\n self.layer_sizes_var = layer_sizes_var\n self.mean_layers = []\n m1 = D\n for m2 in layer_sizes_mean:\n layer = Layer(m1, m2)\n self.mean_layers.append(layer)\n m1 = m2\n layer = Layer(m1, 1, lambda x: x, use_bias=False, zeros=True)\n self.mean_layers.append(layer)\n self.var_layers = []\n M1 = D\n for M2 in layer_sizes_var:\n layer = Layer(m1, m2)\n self.var_layers.append(layer)\n m1 = m2\n layer = Layer(m1, 1, T.nnet.softplus, use_bias=False, zeros=False)\n self.var_layers.append(layer)\n params = []\n for layer in (self.mean_layers + self.var_layers):\n params += layer.params\n self.params = params\n x = T.matrix('x')\n actions = T.vector('actions')\n advantages = T.vector('advantages')\n\n def get_output(layers):\n z = x\n for layer in layers:\n z = layer.forward(z)\n return z.flatten()\n mean = get_output(self.mean_layers)\n var = get_output(self.var_layers) + 0.0001\n self.predict_ = theano.function(inputs=[x], outputs=[mean, var],\n allow_input_downcast=True)\n\n def predict(self, x):\n x = np.atleast_2d(x)\n x = self.ft.transform(x)\n return self.predict_(x)\n\n def sample_action(self, x):\n pred = self.predict(x)\n mu = pred[0][0]\n v = pred[1][0]\n a = np.random.randn() * np.sqrt(v) + mu\n return min(max(a, -1), 1)\n\n def copy(self):\n clone = Policy(self.ft, self.D, self.layer_sizes_mean, self.\n layer_sizes_mean)\n clone.copy_from(self)\n return clone\n\n def copy_from(self, other):\n for p, q in zip(self.params, other.params):\n v = q.get_value()\n p.set_value(v)\n\n def perturb_params(self):\n for p in self.params:\n v = p.get_value()\n noise = np.random.randn(*v.shape) / np.sqrt(v.shape[0]) * 5.0\n if np.random.random() < 0.1:\n p.set_value(noise)\n else:\n p.set_value(v + noise)\n\n\ndef episode(env, policy, gamma):\n observation = env.reset()\n done = False\n total_reward = 0\n iterations = 0\n while not done and iterations < 2000:\n action = policy.sample_action(observation)\n observation, reward, done, info = env.step([action])\n total_reward += reward\n iterations += 1\n return total_reward\n\n\ndef series(env, T, policy, gamma, print_iters=False):\n total_rewards = np.empty(T)\n for i in range(T):\n total_rewards[i] = episode(env, policy, gamma)\n if print_iters:\n print(i, 'Average so far:', total_rewards[:i + 1].mean())\n avg_totalrewards = total_rewards.mean()\n print('Average total rewards:', avg_totalrewards)\n return avg_totalrewards\n\n\n<mask token>\n\n\ndef main():\n env = gym.make('MountainCarContinuous-v0')\n ft = Transformer(env, n_components=100)\n D = ft.dimensions\n model = Policy(ft, D, [], [])\n gamma = 0.99\n if 'monitor' in sys.argv:\n filename = os.path.basename(__file__).split('.')[0]\n monitor_dir = './' + filename + '_' + str(datetime.now())\n env = wrappers.Monitor(env, monitor_dir)\n total_rewards, model = random_search(env, model, gamma)\n print('max reward:', np.max(total_rewards))\n avg_totalrewards = series(env, 100, model, gamma, print_iters=True)\n print('avg reward over 100 episodes with best models:', avg_totalrewards)\n plt.plot(total_rewards)\n plt.title('Rewards')\n plt.show()\n\n\n<mask token>\n",
"step-5": "import gym\nimport os\nimport sys\nimport numpy as np\nimport theano\nimport theano.tensor as T\nimport matplotlib.pyplot as plt\nfrom gym import wrappers\nfrom datetime import datetime\n\nfrom mountain_car_v1_q_learning import Transformer\n\n\n\n# so you can test different architectures\nclass Layer:\n\n def __init__(self, m1, m2, f=T.nnet.relu, use_bias=True, zeros=False):\n\n if zeros:\n w = np.zeros((m1, m2))\n else:\n w = np.random.randn(m1, m2) * np.sqrt(2 / m1)\n\n self.w = theano.shared(w)\n self.params = [self.w]\n self.use_bias = use_bias\n\n if use_bias:\n self.b = theano.shared(np.zeros(m2))\n self.params += [self.b]\n\n self.f = f\n\n def forward(self, x):\n\n if self.use_bias:\n a = x.dot(self.w) + self.b\n else:\n a = x.dot(self.w)\n\n return self.f(a)\n\n\n# approximates pi(a | s)\nclass Policy:\n\n\n def __init__(self, ft, D, layer_sizes_mean=[], layer_sizes_var=[]):\n # save inputs for copy\n self.ft = ft\n self.D = D\n self.layer_sizes_mean = layer_sizes_mean\n self.layer_sizes_var = layer_sizes_var\n\n ##### model the mean #####\n\n self.mean_layers = []\n m1 = D\n for m2 in layer_sizes_mean:\n layer = Layer(m1, m2)\n self.mean_layers.append(layer)\n m1 = m2\n\n # final layer\n layer = Layer(m1, 1, lambda x: x, use_bias=False, zeros=True)\n self.mean_layers.append(layer)\n\n\n ##### model the variance #####\n self.var_layers = []\n M1 = D\n for M2 in layer_sizes_var:\n layer = Layer(m1, m2)\n self.var_layers.append(layer)\n m1 = m2\n\n # final layer\n layer = Layer(m1, 1, T.nnet.softplus, use_bias=False, zeros=False)\n self.var_layers.append(layer)\n\n # get all params for gradient\n params = []\n for layer in (self.mean_layers + self.var_layers):\n params += layer.params\n self.params = params\n\n # inputs and targets\n x = T.matrix('x')\n actions = T.vector('actions')\n advantages = T.vector('advantages')\n\n # calculate output and cost\n def get_output(layers):\n z = x\n for layer in layers:\n z = layer.forward(z)\n return z.flatten()\n\n mean = get_output(self.mean_layers)\n var = get_output(self.var_layers) + 1e-4 # smoothing\n\n\n self.predict_ = theano.function(\n inputs=[x],\n outputs=[mean, var],\n allow_input_downcast=True\n )\n\n def predict(self, x):\n\n x = np.atleast_2d(x)\n x = self.ft.transform(x)\n\n return self.predict_(x)\n\n def sample_action(self, x):\n\n pred = self.predict(x)\n mu = pred[0][0]\n v = pred[1][0]\n a = np.random.randn()*np.sqrt(v) + mu\n\n return min(max(a, -1), 1)\n\n def copy(self):\n\n clone = Policy(self.ft, self.D, self.layer_sizes_mean, self.layer_sizes_mean)\n clone.copy_from(self)\n return clone\n\n def copy_from(self, other):\n # self is being copied from other\n for p, q in zip(self.params, other.params):\n v = q.get_value()\n p.set_value(v)\n\n def perturb_params(self):\n\n for p in self.params:\n v = p.get_value()\n noise = np.random.randn(*v.shape) / np.sqrt(v.shape[0]) * 5.0\n if np.random.random() < 0.1:\n # with probability 0.1 start completely from scratch\n p.set_value(noise)\n else:\n p.set_value(v + noise)\n\n\ndef episode(env, policy, gamma):\n\n observation = env.reset()\n done = False\n total_reward = 0\n iterations = 0\n\n while not done and iterations < 2000:\n # if we reach 2000, just quit, don't want this going forever\n # the 200 limit seems a bit early\n action = policy.sample_action(observation)\n # oddly, the mountain car environment requires the action to be in\n # an object where the actual action is stored in object[0]\n observation, reward, done, info = env.step([action])\n\n total_reward += reward\n iterations += 1\n\n return total_reward\n\n\ndef series(env, T, policy, gamma, print_iters=False):\n\n total_rewards = np.empty(T)\n\n for i in range(T):\n total_rewards[i] = episode(env, policy, gamma)\n\n if print_iters:\n print(i, \"Average so far:\", total_rewards[:(i+1)].mean())\n\n avg_totalrewards = total_rewards.mean()\n print(\"Average total rewards:\", avg_totalrewards)\n return avg_totalrewards\n\n\ndef random_search(env, policy, gamma):\n\n total_rewards = []\n best_avg_totalreward = float('-inf')\n best_policy = policy\n num_episodes_per_param_test = 3\n\n for t in range(100):\n tmp_model = best_policy.copy()\n\n tmp_model.perturb_params()\n\n avg_totalrewards = series(\n env,\n num_episodes_per_param_test,\n tmp_model,\n gamma\n )\n total_rewards.append(avg_totalrewards)\n\n if avg_totalrewards > best_avg_totalreward:\n best_policy = tmp_model\n best_avg_totalreward = avg_totalrewards\n\n return total_rewards, best_policy\n\n\ndef main():\n env = gym.make('MountainCarContinuous-v0')\n ft = Transformer(env, n_components=100)\n D = ft.dimensions\n model = Policy(ft, D, [], [])\n gamma = 0.99\n\n if 'monitor' in sys.argv:\n filename = os.path.basename(__file__).split('.')[0]\n monitor_dir = './' + filename + '_' + str(datetime.now())\n env = wrappers.Monitor(env, monitor_dir)\n\n\n total_rewards, model = random_search(env, model, gamma)\n\n print(\"max reward:\", np.max(total_rewards))\n\n # play 100 episodes and check the average\n avg_totalrewards = series(env, 100, model, gamma, print_iters=True)\n print(\"avg reward over 100 episodes with best models:\", avg_totalrewards)\n\n plt.plot(total_rewards)\n plt.title(\"Rewards\")\n plt.show()\n\n\nif __name__ == '__main__':\n main()",
"step-ids": [
9,
11,
12,
13,
17
]
}
|
[
9,
11,
12,
13,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
graph.write_png('mydecisiontree.png')
<|reserved_special_token_0|>
plt.show()
print(X)
print(y)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df = pandas.read_csv('show.csv')
d = {'UK': 0, 'USA': 1, 'N': 2}
df['Nationality'] = df['Nationality'].map(d)
d = {'YES': 1, 'NO': 0}
df['Go'] = df['Go'].map(d)
features = ['Age', 'Experience', 'Rank', 'Nationality']
X = df[features]
y = df['Go']
dtree = DecisionTreeClassifier()
dtree = dtree.fit(X, y)
data = tree.export_graphviz(dtree, out_file=None, feature_names=features)
graph = pydotplus.graph_from_dot_data(data)
graph.write_png('mydecisiontree.png')
img = pltimg.imread('mydecisiontree.png')
imgplot = plt.imshow(img)
plt.show()
print(X)
print(y)
<|reserved_special_token_1|>
import cv2
import pandas
from sklearn import tree
import pydotplus
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import matplotlib.image as pltimg
df = pandas.read_csv('show.csv')
d = {'UK': 0, 'USA': 1, 'N': 2}
df['Nationality'] = df['Nationality'].map(d)
d = {'YES': 1, 'NO': 0}
df['Go'] = df['Go'].map(d)
features = ['Age', 'Experience', 'Rank', 'Nationality']
X = df[features]
y = df['Go']
dtree = DecisionTreeClassifier()
dtree = dtree.fit(X, y)
data = tree.export_graphviz(dtree, out_file=None, feature_names=features)
graph = pydotplus.graph_from_dot_data(data)
graph.write_png('mydecisiontree.png')
img = pltimg.imread('mydecisiontree.png')
imgplot = plt.imshow(img)
plt.show()
print(X)
print(y)
<|reserved_special_token_1|>
import cv2
import pandas
from sklearn import tree
import pydotplus
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import matplotlib.image as pltimg
df = pandas.read_csv("show.csv")
d = {'UK': 0, 'USA': 1, 'N': 2}
df['Nationality'] = df['Nationality'].map(d)
d = {'YES': 1, 'NO': 0}
df['Go'] = df['Go'].map(d)
######
features = ['Age', 'Experience', 'Rank', 'Nationality']
X = df[features]
y = df['Go']
#####
dtree = DecisionTreeClassifier()
dtree = dtree.fit(X, y)
data = tree.export_graphviz(dtree, out_file=None, feature_names=features)
graph = pydotplus.graph_from_dot_data(data)
graph.write_png('mydecisiontree.png')
img=pltimg.imread('mydecisiontree.png')
imgplot = plt.imshow(img)
plt.show()
print(X)
print(y)
|
flexible
|
{
"blob_id": "c9cf65eeec49eba004312491cdd2321200fa6a61",
"index": 469,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngraph.write_png('mydecisiontree.png')\n<mask token>\nplt.show()\nprint(X)\nprint(y)\n",
"step-3": "<mask token>\ndf = pandas.read_csv('show.csv')\nd = {'UK': 0, 'USA': 1, 'N': 2}\ndf['Nationality'] = df['Nationality'].map(d)\nd = {'YES': 1, 'NO': 0}\ndf['Go'] = df['Go'].map(d)\nfeatures = ['Age', 'Experience', 'Rank', 'Nationality']\nX = df[features]\ny = df['Go']\ndtree = DecisionTreeClassifier()\ndtree = dtree.fit(X, y)\ndata = tree.export_graphviz(dtree, out_file=None, feature_names=features)\ngraph = pydotplus.graph_from_dot_data(data)\ngraph.write_png('mydecisiontree.png')\nimg = pltimg.imread('mydecisiontree.png')\nimgplot = plt.imshow(img)\nplt.show()\nprint(X)\nprint(y)\n",
"step-4": "import cv2\nimport pandas\nfrom sklearn import tree\nimport pydotplus\nfrom sklearn.tree import DecisionTreeClassifier\nimport matplotlib.pyplot as plt\nimport matplotlib.image as pltimg\ndf = pandas.read_csv('show.csv')\nd = {'UK': 0, 'USA': 1, 'N': 2}\ndf['Nationality'] = df['Nationality'].map(d)\nd = {'YES': 1, 'NO': 0}\ndf['Go'] = df['Go'].map(d)\nfeatures = ['Age', 'Experience', 'Rank', 'Nationality']\nX = df[features]\ny = df['Go']\ndtree = DecisionTreeClassifier()\ndtree = dtree.fit(X, y)\ndata = tree.export_graphviz(dtree, out_file=None, feature_names=features)\ngraph = pydotplus.graph_from_dot_data(data)\ngraph.write_png('mydecisiontree.png')\nimg = pltimg.imread('mydecisiontree.png')\nimgplot = plt.imshow(img)\nplt.show()\nprint(X)\nprint(y)\n",
"step-5": "import cv2\nimport pandas\nfrom sklearn import tree\nimport pydotplus\nfrom sklearn.tree import DecisionTreeClassifier\nimport matplotlib.pyplot as plt\nimport matplotlib.image as pltimg\n\ndf = pandas.read_csv(\"show.csv\")\nd = {'UK': 0, 'USA': 1, 'N': 2}\ndf['Nationality'] = df['Nationality'].map(d)\nd = {'YES': 1, 'NO': 0}\ndf['Go'] = df['Go'].map(d)\n\n######\nfeatures = ['Age', 'Experience', 'Rank', 'Nationality']\nX = df[features]\ny = df['Go']\n#####\ndtree = DecisionTreeClassifier()\ndtree = dtree.fit(X, y)\ndata = tree.export_graphviz(dtree, out_file=None, feature_names=features)\ngraph = pydotplus.graph_from_dot_data(data)\ngraph.write_png('mydecisiontree.png')\n\nimg=pltimg.imread('mydecisiontree.png')\nimgplot = plt.imshow(img)\nplt.show()\nprint(X)\nprint(y)\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from PIL import Image, ImageFilter
import numpy as np
import glob
from numpy import array
import matplotlib.pyplot as plt
from skimage import morphology
import scipy.ndimage
def sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1, display1 = True):
if (display1):
new_list = []
new_list.append(stack)
new_list.append(stack)
new_list.append(stack)
new_list.append(stack)
sample_stack(new_list, 2, 2, 0, 1, False)
else:
fig,ax = plt.subplots(rows,cols,figsize=[12,12])
for i in range((rows*cols)):
ind = start_with + i*show_every
ax[int(i/rows),int(i % rows)].set_title('slice %d' % ind)
ax[int(i/rows),int(i % rows)].imshow(stack[ind],cmap='gray')
ax[int(i/rows),int(i % rows)].axis('off')
plt.show()
"""
datapath = "jpg_images/"
img0 = Image.open("jpg_images/maskedimage" + str(0) + ".jpg")
counter = 0
img1 = []
for f in glob.glob('/Users/paulmccabe/Desktop/jpg images/*.jpg'):
path = "jpg_images/maskedimage" + str(counter) + ".jpg"
img0 = Image.open(path).convert('L')
img1.append(array(img0))
counter += 1
print("Counter: " + str(counter))
imgs_to_process_orig = np.stack([s for s in img1])
"""
id = 2
imgs = np.load("/Users/paulmccabe/Desktop/Segmentation Project/" + "justmask_%d.npy" % (id))
counter = 0
print("Saving as jpg Images...")
for img in imgs:
scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' + '/jpg mask images/justmask{}.jpg'.format(counter), img)
counter += 1
counter = 0
#print("Re-Importing jpg Images...")
#for f in glob.glob('/Users/paulmccabe/Desktop/Segmentation Project/jpg mask images/*.jpg'):
# path = "jpg_images/maskedimage" + str(counter) + ".jpg"
# img0 = Image.open(path).convert('L')
# img1.append(array(img0))
# counter += 1
imgs[imgs == 1] = 255
list = []
for img in imgs:
PIL_img = Image.fromarray(img.astype('uint8'))
PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES)
np_img = array(PIL_edge)
dilation = morphology.dilation(np_img, np.ones([4,4]))
list.append(dilation)
imgs_after_processing = np.stack([s for s in list])
np.save("/Users/paulmccabe/Desktop/Segmentation Project" + "/justedge_%d.npy" % (id), imgs_after_processing[:284])
#sample_stack(np_img)
|
normal
|
{
"blob_id": "371c1c9e3ccf7dae35d435bdb013e0462f3add5d",
"index": 4831,
"step-1": "<mask token>\n\n\ndef sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1,\n display1=True):\n if display1:\n new_list = []\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n sample_stack(new_list, 2, 2, 0, 1, False)\n else:\n fig, ax = plt.subplots(rows, cols, figsize=[12, 12])\n for i in range(rows * cols):\n ind = start_with + i * show_every\n ax[int(i / rows), int(i % rows)].set_title('slice %d' % ind)\n ax[int(i / rows), int(i % rows)].imshow(stack[ind], cmap='gray')\n ax[int(i / rows), int(i % rows)].axis('off')\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1,\n display1=True):\n if display1:\n new_list = []\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n sample_stack(new_list, 2, 2, 0, 1, False)\n else:\n fig, ax = plt.subplots(rows, cols, figsize=[12, 12])\n for i in range(rows * cols):\n ind = start_with + i * show_every\n ax[int(i / rows), int(i % rows)].set_title('slice %d' % ind)\n ax[int(i / rows), int(i % rows)].imshow(stack[ind], cmap='gray')\n ax[int(i / rows), int(i % rows)].axis('off')\n plt.show()\n\n\n<mask token>\nprint('Saving as jpg Images...')\nfor img in imgs:\n scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' +\n '/jpg mask images/justmask{}.jpg'.format(counter), img)\n counter += 1\n<mask token>\nfor img in imgs:\n PIL_img = Image.fromarray(img.astype('uint8'))\n PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES)\n np_img = array(PIL_edge)\n dilation = morphology.dilation(np_img, np.ones([4, 4]))\n list.append(dilation)\n<mask token>\nnp.save('/Users/paulmccabe/Desktop/Segmentation Project' + \n '/justedge_%d.npy' % id, imgs_after_processing[:284])\n",
"step-3": "<mask token>\n\n\ndef sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1,\n display1=True):\n if display1:\n new_list = []\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n sample_stack(new_list, 2, 2, 0, 1, False)\n else:\n fig, ax = plt.subplots(rows, cols, figsize=[12, 12])\n for i in range(rows * cols):\n ind = start_with + i * show_every\n ax[int(i / rows), int(i % rows)].set_title('slice %d' % ind)\n ax[int(i / rows), int(i % rows)].imshow(stack[ind], cmap='gray')\n ax[int(i / rows), int(i % rows)].axis('off')\n plt.show()\n\n\n<mask token>\nid = 2\nimgs = np.load('/Users/paulmccabe/Desktop/Segmentation Project/' + \n 'justmask_%d.npy' % id)\ncounter = 0\nprint('Saving as jpg Images...')\nfor img in imgs:\n scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' +\n '/jpg mask images/justmask{}.jpg'.format(counter), img)\n counter += 1\ncounter = 0\nimgs[imgs == 1] = 255\nlist = []\nfor img in imgs:\n PIL_img = Image.fromarray(img.astype('uint8'))\n PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES)\n np_img = array(PIL_edge)\n dilation = morphology.dilation(np_img, np.ones([4, 4]))\n list.append(dilation)\nimgs_after_processing = np.stack([s for s in list])\nnp.save('/Users/paulmccabe/Desktop/Segmentation Project' + \n '/justedge_%d.npy' % id, imgs_after_processing[:284])\n",
"step-4": "from PIL import Image, ImageFilter\nimport numpy as np\nimport glob\nfrom numpy import array\nimport matplotlib.pyplot as plt\nfrom skimage import morphology\nimport scipy.ndimage\n\n\ndef sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1,\n display1=True):\n if display1:\n new_list = []\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n sample_stack(new_list, 2, 2, 0, 1, False)\n else:\n fig, ax = plt.subplots(rows, cols, figsize=[12, 12])\n for i in range(rows * cols):\n ind = start_with + i * show_every\n ax[int(i / rows), int(i % rows)].set_title('slice %d' % ind)\n ax[int(i / rows), int(i % rows)].imshow(stack[ind], cmap='gray')\n ax[int(i / rows), int(i % rows)].axis('off')\n plt.show()\n\n\n<mask token>\nid = 2\nimgs = np.load('/Users/paulmccabe/Desktop/Segmentation Project/' + \n 'justmask_%d.npy' % id)\ncounter = 0\nprint('Saving as jpg Images...')\nfor img in imgs:\n scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' +\n '/jpg mask images/justmask{}.jpg'.format(counter), img)\n counter += 1\ncounter = 0\nimgs[imgs == 1] = 255\nlist = []\nfor img in imgs:\n PIL_img = Image.fromarray(img.astype('uint8'))\n PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES)\n np_img = array(PIL_edge)\n dilation = morphology.dilation(np_img, np.ones([4, 4]))\n list.append(dilation)\nimgs_after_processing = np.stack([s for s in list])\nnp.save('/Users/paulmccabe/Desktop/Segmentation Project' + \n '/justedge_%d.npy' % id, imgs_after_processing[:284])\n",
"step-5": "from PIL import Image, ImageFilter\nimport numpy as np\nimport glob\nfrom numpy import array\nimport matplotlib.pyplot as plt\nfrom skimage import morphology\nimport scipy.ndimage\n\ndef sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1, display1 = True):\n if (display1):\n new_list = []\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n sample_stack(new_list, 2, 2, 0, 1, False)\n else:\n fig,ax = plt.subplots(rows,cols,figsize=[12,12])\n for i in range((rows*cols)):\n ind = start_with + i*show_every\n ax[int(i/rows),int(i % rows)].set_title('slice %d' % ind)\n ax[int(i/rows),int(i % rows)].imshow(stack[ind],cmap='gray')\n ax[int(i/rows),int(i % rows)].axis('off')\n plt.show()\n\"\"\"\ndatapath = \"jpg_images/\"\nimg0 = Image.open(\"jpg_images/maskedimage\" + str(0) + \".jpg\")\ncounter = 0\nimg1 = []\nfor f in glob.glob('/Users/paulmccabe/Desktop/jpg images/*.jpg'):\n path = \"jpg_images/maskedimage\" + str(counter) + \".jpg\"\n img0 = Image.open(path).convert('L')\n img1.append(array(img0))\n counter += 1\nprint(\"Counter: \" + str(counter))\nimgs_to_process_orig = np.stack([s for s in img1])\n\"\"\"\nid = 2\n\nimgs = np.load(\"/Users/paulmccabe/Desktop/Segmentation Project/\" + \"justmask_%d.npy\" % (id))\ncounter = 0\nprint(\"Saving as jpg Images...\")\nfor img in imgs:\n scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' + '/jpg mask images/justmask{}.jpg'.format(counter), img)\n counter += 1\ncounter = 0\n#print(\"Re-Importing jpg Images...\")\n#for f in glob.glob('/Users/paulmccabe/Desktop/Segmentation Project/jpg mask images/*.jpg'):\n# path = \"jpg_images/maskedimage\" + str(counter) + \".jpg\"\n# img0 = Image.open(path).convert('L')\n# img1.append(array(img0))\n# counter += 1\nimgs[imgs == 1] = 255\nlist = []\nfor img in imgs:\n PIL_img = Image.fromarray(img.astype('uint8'))\n PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES)\n np_img = array(PIL_edge)\n dilation = morphology.dilation(np_img, np.ones([4,4]))\n list.append(dilation)\n\nimgs_after_processing = np.stack([s for s in list])\n\nnp.save(\"/Users/paulmccabe/Desktop/Segmentation Project\" + \"/justedge_%d.npy\" % (id), imgs_after_processing[:284])\n\n#sample_stack(np_img)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import sqlite3
import sys
import threading
from time import sleep
sq = None
def get_queue(category, parser):
if sq == None:
return liteQueue(category, parser)
return sq
"""
SqLite Job Handler class for Links
"""
class liteQueue:
_create = "CREATE TABLE IF NOT EXISTS link ( 'url' TEXT,'category' TEXT,'origin' TEXT, 'thumb' TEXT, 'fetched' INTEGER,'fetched_imgs' INTEGER,PRIMARY KEY(url));"
_putList = "INSERT OR IGNORE INTO link VALUES(?, ?, ?, ?, ?, ?)"
_iterate = "SELECT * FROM LINK WHERE FETCHED = 0"
_write_lock = "BEGIN IMMEDIATE"
_pop_get_many = "SELECT URL, CATEGORY, ORIGIN, THUMB, ROWID FROM LINK WHERE FETCHED = 0 ORDER BY ROWID ASC LIMIT "
_pop_del_many = "UPDATE LINK SET FETCHED=1 WHERE FETCHED = 0 AND (ROWID >= ? AND ROWID <=?)"
def __init__(self, category, parser):
self.conn_url = "databases/" + parser + "_" + category + ".db"
self._connection_cache = {}
with self._get_conn() as conn:
conn.execute(self._create)
def _get_conn(self):
id = threading.current_thread().ident
if id not in self._connection_cache:
self._connection_cache[id] = sqlite3.Connection(self.conn_url, timeout=60)
return self._connection_cache[id]
def __iter__(self):
with self._get_conn() as conn:
for result in conn.execute(self._iterate):
yield result
def put_many(self, list_obj):
with self._get_conn() as conn:
try:
conn.cursor().executemany(self._putList, list_obj)
except Exception as e:
print(e)
def pop_many(self, amount, sleep_wait=True):
keep_pooling = True
sql_pop = self._pop_get_many + str(amount)
with self._get_conn() as conn:
result = None
while keep_pooling:
conn.execute(self._write_lock) # lock the database
cursor = conn.execute(sql_pop)
result = cursor.fetchall()
if(len(result) > 0):
keep_pooling = False
id_first = int(result[0][4])
id_last = int(result[-1][4])
conn.execute(self._pop_del_many, (id_first, id_last))
conn.commit() # unlock the database
return result
else:
conn.commit() # unlock the database
return None
|
normal
|
{
"blob_id": "ed6eda4b6dbf3e94d8efb53004b19cd9c49e927e",
"index": 3979,
"step-1": "<mask token>\n\n\nclass liteQueue:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _get_conn(self):\n id = threading.current_thread().ident\n if id not in self._connection_cache:\n self._connection_cache[id] = sqlite3.Connection(self.conn_url,\n timeout=60)\n return self._connection_cache[id]\n\n def __iter__(self):\n with self._get_conn() as conn:\n for result in conn.execute(self._iterate):\n yield result\n\n def put_many(self, list_obj):\n with self._get_conn() as conn:\n try:\n conn.cursor().executemany(self._putList, list_obj)\n except Exception as e:\n print(e)\n\n def pop_many(self, amount, sleep_wait=True):\n keep_pooling = True\n sql_pop = self._pop_get_many + str(amount)\n with self._get_conn() as conn:\n result = None\n while keep_pooling:\n conn.execute(self._write_lock)\n cursor = conn.execute(sql_pop)\n result = cursor.fetchall()\n if len(result) > 0:\n keep_pooling = False\n id_first = int(result[0][4])\n id_last = int(result[-1][4])\n conn.execute(self._pop_del_many, (id_first, id_last))\n conn.commit()\n return result\n else:\n conn.commit()\n return None\n",
"step-2": "<mask token>\n\n\nclass liteQueue:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, category, parser):\n self.conn_url = 'databases/' + parser + '_' + category + '.db'\n self._connection_cache = {}\n with self._get_conn() as conn:\n conn.execute(self._create)\n\n def _get_conn(self):\n id = threading.current_thread().ident\n if id not in self._connection_cache:\n self._connection_cache[id] = sqlite3.Connection(self.conn_url,\n timeout=60)\n return self._connection_cache[id]\n\n def __iter__(self):\n with self._get_conn() as conn:\n for result in conn.execute(self._iterate):\n yield result\n\n def put_many(self, list_obj):\n with self._get_conn() as conn:\n try:\n conn.cursor().executemany(self._putList, list_obj)\n except Exception as e:\n print(e)\n\n def pop_many(self, amount, sleep_wait=True):\n keep_pooling = True\n sql_pop = self._pop_get_many + str(amount)\n with self._get_conn() as conn:\n result = None\n while keep_pooling:\n conn.execute(self._write_lock)\n cursor = conn.execute(sql_pop)\n result = cursor.fetchall()\n if len(result) > 0:\n keep_pooling = False\n id_first = int(result[0][4])\n id_last = int(result[-1][4])\n conn.execute(self._pop_del_many, (id_first, id_last))\n conn.commit()\n return result\n else:\n conn.commit()\n return None\n",
"step-3": "<mask token>\n\n\nclass liteQueue:\n _create = (\n \"CREATE TABLE IF NOT EXISTS link ( 'url' TEXT,'category'\\tTEXT,'origin' TEXT, 'thumb' TEXT, 'fetched' INTEGER,'fetched_imgs'\\tINTEGER,PRIMARY KEY(url));\"\n )\n _putList = 'INSERT OR IGNORE INTO link VALUES(?, ?, ?, ?, ?, ?)'\n _iterate = 'SELECT * FROM LINK WHERE FETCHED = 0'\n _write_lock = 'BEGIN IMMEDIATE'\n _pop_get_many = (\n 'SELECT URL, CATEGORY, ORIGIN, THUMB, ROWID FROM LINK WHERE FETCHED = 0 ORDER BY ROWID ASC LIMIT '\n )\n _pop_del_many = (\n 'UPDATE LINK SET FETCHED=1 WHERE FETCHED = 0 AND (ROWID >= ? AND ROWID <=?)'\n )\n\n def __init__(self, category, parser):\n self.conn_url = 'databases/' + parser + '_' + category + '.db'\n self._connection_cache = {}\n with self._get_conn() as conn:\n conn.execute(self._create)\n\n def _get_conn(self):\n id = threading.current_thread().ident\n if id not in self._connection_cache:\n self._connection_cache[id] = sqlite3.Connection(self.conn_url,\n timeout=60)\n return self._connection_cache[id]\n\n def __iter__(self):\n with self._get_conn() as conn:\n for result in conn.execute(self._iterate):\n yield result\n\n def put_many(self, list_obj):\n with self._get_conn() as conn:\n try:\n conn.cursor().executemany(self._putList, list_obj)\n except Exception as e:\n print(e)\n\n def pop_many(self, amount, sleep_wait=True):\n keep_pooling = True\n sql_pop = self._pop_get_many + str(amount)\n with self._get_conn() as conn:\n result = None\n while keep_pooling:\n conn.execute(self._write_lock)\n cursor = conn.execute(sql_pop)\n result = cursor.fetchall()\n if len(result) > 0:\n keep_pooling = False\n id_first = int(result[0][4])\n id_last = int(result[-1][4])\n conn.execute(self._pop_del_many, (id_first, id_last))\n conn.commit()\n return result\n else:\n conn.commit()\n return None\n",
"step-4": "<mask token>\n\n\ndef get_queue(category, parser):\n if sq == None:\n return liteQueue(category, parser)\n return sq\n\n\n<mask token>\n\n\nclass liteQueue:\n _create = (\n \"CREATE TABLE IF NOT EXISTS link ( 'url' TEXT,'category'\\tTEXT,'origin' TEXT, 'thumb' TEXT, 'fetched' INTEGER,'fetched_imgs'\\tINTEGER,PRIMARY KEY(url));\"\n )\n _putList = 'INSERT OR IGNORE INTO link VALUES(?, ?, ?, ?, ?, ?)'\n _iterate = 'SELECT * FROM LINK WHERE FETCHED = 0'\n _write_lock = 'BEGIN IMMEDIATE'\n _pop_get_many = (\n 'SELECT URL, CATEGORY, ORIGIN, THUMB, ROWID FROM LINK WHERE FETCHED = 0 ORDER BY ROWID ASC LIMIT '\n )\n _pop_del_many = (\n 'UPDATE LINK SET FETCHED=1 WHERE FETCHED = 0 AND (ROWID >= ? AND ROWID <=?)'\n )\n\n def __init__(self, category, parser):\n self.conn_url = 'databases/' + parser + '_' + category + '.db'\n self._connection_cache = {}\n with self._get_conn() as conn:\n conn.execute(self._create)\n\n def _get_conn(self):\n id = threading.current_thread().ident\n if id not in self._connection_cache:\n self._connection_cache[id] = sqlite3.Connection(self.conn_url,\n timeout=60)\n return self._connection_cache[id]\n\n def __iter__(self):\n with self._get_conn() as conn:\n for result in conn.execute(self._iterate):\n yield result\n\n def put_many(self, list_obj):\n with self._get_conn() as conn:\n try:\n conn.cursor().executemany(self._putList, list_obj)\n except Exception as e:\n print(e)\n\n def pop_many(self, amount, sleep_wait=True):\n keep_pooling = True\n sql_pop = self._pop_get_many + str(amount)\n with self._get_conn() as conn:\n result = None\n while keep_pooling:\n conn.execute(self._write_lock)\n cursor = conn.execute(sql_pop)\n result = cursor.fetchall()\n if len(result) > 0:\n keep_pooling = False\n id_first = int(result[0][4])\n id_last = int(result[-1][4])\n conn.execute(self._pop_del_many, (id_first, id_last))\n conn.commit()\n return result\n else:\n conn.commit()\n return None\n",
"step-5": "import sqlite3\nimport sys\nimport threading\nfrom time import sleep\n\nsq = None\n\ndef get_queue(category, parser):\n if sq == None:\n return liteQueue(category, parser)\n return sq\n\n\"\"\"\nSqLite Job Handler class for Links\n\"\"\"\nclass liteQueue:\n _create = \"CREATE TABLE IF NOT EXISTS link ( 'url' TEXT,'category'\tTEXT,'origin' TEXT, 'thumb' TEXT, 'fetched' INTEGER,'fetched_imgs'\tINTEGER,PRIMARY KEY(url));\"\n _putList = \"INSERT OR IGNORE INTO link VALUES(?, ?, ?, ?, ?, ?)\"\n _iterate = \"SELECT * FROM LINK WHERE FETCHED = 0\"\n _write_lock = \"BEGIN IMMEDIATE\"\n _pop_get_many = \"SELECT URL, CATEGORY, ORIGIN, THUMB, ROWID FROM LINK WHERE FETCHED = 0 ORDER BY ROWID ASC LIMIT \"\n _pop_del_many = \"UPDATE LINK SET FETCHED=1 WHERE FETCHED = 0 AND (ROWID >= ? AND ROWID <=?)\"\n\n def __init__(self, category, parser):\n self.conn_url = \"databases/\" + parser + \"_\" + category + \".db\"\n self._connection_cache = {}\n with self._get_conn() as conn:\n conn.execute(self._create)\n \n def _get_conn(self):\n id = threading.current_thread().ident\n if id not in self._connection_cache:\n self._connection_cache[id] = sqlite3.Connection(self.conn_url, timeout=60)\n return self._connection_cache[id]\n\n def __iter__(self):\n with self._get_conn() as conn:\n for result in conn.execute(self._iterate):\n yield result\n \n def put_many(self, list_obj):\n with self._get_conn() as conn:\n try:\n conn.cursor().executemany(self._putList, list_obj)\n except Exception as e:\n print(e)\n \n def pop_many(self, amount, sleep_wait=True):\n keep_pooling = True\n sql_pop = self._pop_get_many + str(amount)\n with self._get_conn() as conn:\n result = None\n while keep_pooling:\n conn.execute(self._write_lock) # lock the database\n cursor = conn.execute(sql_pop)\n result = cursor.fetchall()\n\n if(len(result) > 0):\n keep_pooling = False\n id_first = int(result[0][4])\n id_last = int(result[-1][4])\n conn.execute(self._pop_del_many, (id_first, id_last))\n conn.commit() # unlock the database\n return result\n else:\n conn.commit() # unlock the database\n return None",
"step-ids": [
5,
6,
7,
8,
11
]
}
|
[
5,
6,
7,
8,
11
] |
import requests
from multiprocessing import Process
from atomic_counter import AtomicCounter
class Downloader:
def __init__(self, src_url, num_threads):
try:
header = requests.head(src_url).headers
self.url = src_url
self.file_size = int(header.get('content-length'))
self.file_name = src_url.split('/')[-1]
self.num_threads = num_threads
self.chunk_size = self.file_size // self.num_threads
with open(self.file_name, 'wb') as f:
f.write(b'\x00' * self.file_size)
except requests.exceptions.ConnectionError:
print('Connection error, please check your internet connection.')
def _worker(self, download_range: tuple, counter: AtomicCounter):
start, end = download_range
header = {'Range': 'bytes=' + str(start) + '-' + str(end)}
r = requests.get(self.url, headers=header, stream=True, timeout=30)
binary_content = r.content
counter.increment_by_value(end - start + 1)
print(counter.get_value() / self.file_size)
with open(self.file_name, 'wb') as f:
f.seek(start)
f.write(binary_content)
def download(self) -> None:
download_ranges = []
for i in range(self.num_threads):
start = i * self.chunk_size
if i == self.num_threads - 1:
end = self.file_size
else:
end = start + self.chunk_size - 1
download_ranges.append((start, end))
atomic_counter = AtomicCounter()
process_pool = [Process(target=self._worker,
args=(download_ranges[i], atomic_counter))
for i in range(self.num_threads)]
for p in process_pool:
p.start()
for p in process_pool:
p.join()
if __name__ == "__main__":
downloader = Downloader(
'https://download-cf.jetbrains.com/idea/ideaIC-2019.3.3.tar.gz', 4)
downloader.download()
|
normal
|
{
"blob_id": "3dc3bbd00f9c2d00093bf8669963d96f5019b2da",
"index": 4648,
"step-1": "<mask token>\n\n\nclass Downloader:\n <mask token>\n\n def _worker(self, download_range: tuple, counter: AtomicCounter):\n start, end = download_range\n header = {'Range': 'bytes=' + str(start) + '-' + str(end)}\n r = requests.get(self.url, headers=header, stream=True, timeout=30)\n binary_content = r.content\n counter.increment_by_value(end - start + 1)\n print(counter.get_value() / self.file_size)\n with open(self.file_name, 'wb') as f:\n f.seek(start)\n f.write(binary_content)\n\n def download(self) ->None:\n download_ranges = []\n for i in range(self.num_threads):\n start = i * self.chunk_size\n if i == self.num_threads - 1:\n end = self.file_size\n else:\n end = start + self.chunk_size - 1\n download_ranges.append((start, end))\n atomic_counter = AtomicCounter()\n process_pool = [Process(target=self._worker, args=(download_ranges[\n i], atomic_counter)) for i in range(self.num_threads)]\n for p in process_pool:\n p.start()\n for p in process_pool:\n p.join()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Downloader:\n\n def __init__(self, src_url, num_threads):\n try:\n header = requests.head(src_url).headers\n self.url = src_url\n self.file_size = int(header.get('content-length'))\n self.file_name = src_url.split('/')[-1]\n self.num_threads = num_threads\n self.chunk_size = self.file_size // self.num_threads\n with open(self.file_name, 'wb') as f:\n f.write(b'\\x00' * self.file_size)\n except requests.exceptions.ConnectionError:\n print('Connection error, please check your internet connection.')\n\n def _worker(self, download_range: tuple, counter: AtomicCounter):\n start, end = download_range\n header = {'Range': 'bytes=' + str(start) + '-' + str(end)}\n r = requests.get(self.url, headers=header, stream=True, timeout=30)\n binary_content = r.content\n counter.increment_by_value(end - start + 1)\n print(counter.get_value() / self.file_size)\n with open(self.file_name, 'wb') as f:\n f.seek(start)\n f.write(binary_content)\n\n def download(self) ->None:\n download_ranges = []\n for i in range(self.num_threads):\n start = i * self.chunk_size\n if i == self.num_threads - 1:\n end = self.file_size\n else:\n end = start + self.chunk_size - 1\n download_ranges.append((start, end))\n atomic_counter = AtomicCounter()\n process_pool = [Process(target=self._worker, args=(download_ranges[\n i], atomic_counter)) for i in range(self.num_threads)]\n for p in process_pool:\n p.start()\n for p in process_pool:\n p.join()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Downloader:\n\n def __init__(self, src_url, num_threads):\n try:\n header = requests.head(src_url).headers\n self.url = src_url\n self.file_size = int(header.get('content-length'))\n self.file_name = src_url.split('/')[-1]\n self.num_threads = num_threads\n self.chunk_size = self.file_size // self.num_threads\n with open(self.file_name, 'wb') as f:\n f.write(b'\\x00' * self.file_size)\n except requests.exceptions.ConnectionError:\n print('Connection error, please check your internet connection.')\n\n def _worker(self, download_range: tuple, counter: AtomicCounter):\n start, end = download_range\n header = {'Range': 'bytes=' + str(start) + '-' + str(end)}\n r = requests.get(self.url, headers=header, stream=True, timeout=30)\n binary_content = r.content\n counter.increment_by_value(end - start + 1)\n print(counter.get_value() / self.file_size)\n with open(self.file_name, 'wb') as f:\n f.seek(start)\n f.write(binary_content)\n\n def download(self) ->None:\n download_ranges = []\n for i in range(self.num_threads):\n start = i * self.chunk_size\n if i == self.num_threads - 1:\n end = self.file_size\n else:\n end = start + self.chunk_size - 1\n download_ranges.append((start, end))\n atomic_counter = AtomicCounter()\n process_pool = [Process(target=self._worker, args=(download_ranges[\n i], atomic_counter)) for i in range(self.num_threads)]\n for p in process_pool:\n p.start()\n for p in process_pool:\n p.join()\n\n\nif __name__ == '__main__':\n downloader = Downloader(\n 'https://download-cf.jetbrains.com/idea/ideaIC-2019.3.3.tar.gz', 4)\n downloader.download()\n",
"step-4": "import requests\nfrom multiprocessing import Process\nfrom atomic_counter import AtomicCounter\n\n\nclass Downloader:\n\n def __init__(self, src_url, num_threads):\n try:\n header = requests.head(src_url).headers\n self.url = src_url\n self.file_size = int(header.get('content-length'))\n self.file_name = src_url.split('/')[-1]\n self.num_threads = num_threads\n self.chunk_size = self.file_size // self.num_threads\n with open(self.file_name, 'wb') as f:\n f.write(b'\\x00' * self.file_size)\n except requests.exceptions.ConnectionError:\n print('Connection error, please check your internet connection.')\n\n def _worker(self, download_range: tuple, counter: AtomicCounter):\n start, end = download_range\n header = {'Range': 'bytes=' + str(start) + '-' + str(end)}\n r = requests.get(self.url, headers=header, stream=True, timeout=30)\n binary_content = r.content\n counter.increment_by_value(end - start + 1)\n print(counter.get_value() / self.file_size)\n with open(self.file_name, 'wb') as f:\n f.seek(start)\n f.write(binary_content)\n\n def download(self) ->None:\n download_ranges = []\n for i in range(self.num_threads):\n start = i * self.chunk_size\n if i == self.num_threads - 1:\n end = self.file_size\n else:\n end = start + self.chunk_size - 1\n download_ranges.append((start, end))\n atomic_counter = AtomicCounter()\n process_pool = [Process(target=self._worker, args=(download_ranges[\n i], atomic_counter)) for i in range(self.num_threads)]\n for p in process_pool:\n p.start()\n for p in process_pool:\n p.join()\n\n\nif __name__ == '__main__':\n downloader = Downloader(\n 'https://download-cf.jetbrains.com/idea/ideaIC-2019.3.3.tar.gz', 4)\n downloader.download()\n",
"step-5": "import requests\nfrom multiprocessing import Process\nfrom atomic_counter import AtomicCounter\n\n\nclass Downloader:\n def __init__(self, src_url, num_threads):\n try:\n header = requests.head(src_url).headers\n self.url = src_url\n self.file_size = int(header.get('content-length'))\n self.file_name = src_url.split('/')[-1]\n self.num_threads = num_threads\n self.chunk_size = self.file_size // self.num_threads\n\n with open(self.file_name, 'wb') as f:\n f.write(b'\\x00' * self.file_size)\n\n except requests.exceptions.ConnectionError:\n print('Connection error, please check your internet connection.')\n\n def _worker(self, download_range: tuple, counter: AtomicCounter):\n start, end = download_range\n header = {'Range': 'bytes=' + str(start) + '-' + str(end)}\n\n r = requests.get(self.url, headers=header, stream=True, timeout=30)\n binary_content = r.content\n counter.increment_by_value(end - start + 1)\n print(counter.get_value() / self.file_size)\n\n with open(self.file_name, 'wb') as f:\n f.seek(start)\n f.write(binary_content)\n\n def download(self) -> None:\n download_ranges = []\n\n for i in range(self.num_threads):\n start = i * self.chunk_size\n if i == self.num_threads - 1:\n end = self.file_size\n else:\n end = start + self.chunk_size - 1\n\n download_ranges.append((start, end))\n\n atomic_counter = AtomicCounter()\n process_pool = [Process(target=self._worker,\n args=(download_ranges[i], atomic_counter))\n for i in range(self.num_threads)]\n\n for p in process_pool:\n p.start()\n\n for p in process_pool:\n p.join()\n\n\nif __name__ == \"__main__\":\n downloader = Downloader(\n 'https://download-cf.jetbrains.com/idea/ideaIC-2019.3.3.tar.gz', 4)\n downloader.download()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]
) ->None:
"""
Does a random walk, setting the cells as it goes, until it cant find a
path.
"""
maze_idx = lambda p: p[1] * width + p[0]
north = lambda p: (p[0], p[1] - 1)
east = lambda p: (p[0] + 1, p[1])
south = lambda p: (p[0], p[1] + 1)
west = lambda p: (p[0] - 1, p[1])
def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:
"""
Returns a list of possible neighbours.
Can pass arg to only count visited neighbours
"""
possible_points = dict()
p_pt = north(pt)
if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):
possible_points[p_pt] = 'N'
p_pt = east(pt)
if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or
visited):
possible_points[p_pt] = 'E'
p_pt = south(pt)
if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or
visited):
possible_points[p_pt] = 'S'
p_pt = west(pt)
if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):
possible_points[p_pt] = 'W'
return possible_points
starting_n = check_neighbours(start, True)
if starting_n:
neigh, dire = random.choice(tuple(starting_n.items()))
maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]
maze[maze_idx(start)] |= DIRS[dire]
step = start
while (possible_n := check_neighbours(step)):
next_step, direction = random.choice(tuple(possible_n.items()))
maze[maze_idx(step)] |= DIRS[direction]
maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]
step = next_step
def gen_maze(width: int, height: int) ->list[int]:
maze = init_maze(width, height)
maze_idx = lambda p: p[1] * width + p[0]
for y in range(height):
for x in range(width):
if not maze[maze_idx((x, y))]:
walk_maze(maze, width, height, (x, y))
return maze
def print_maze(maze: list[int], width: int, height: int) ->None:
"""
Print an ASCII maze!!!! Maybe works??
"""
maze_idx = lambda p: p[1] * width + p[0]
print(' ' + '_' * (2 * width - 1))
for y in range(height):
for x in range(width):
if maze[maze_idx((x, y))] & DIRS['W']:
if maze[maze_idx((x, y))] & DIRS['S']:
print(' ', end='')
else:
print('_', end='')
else:
print('|', end='')
if maze[maze_idx((x, y))] & DIRS['S']:
print(' ', end='')
else:
print('_', end='')
print('|')
def main():
width = height = 10
if len(argv) > 2:
width = int(argv[1])
height = int(argv[2])
print(f'Generating maze size {width}x{height}')
maze = gen_maze(width, height)
print_maze(maze, width, height)
return maze
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def init_maze(width: int, height: int) ->list[int]:
"""
Set up a 2D list with 0 as starting value. Basically an empty maze
"""
return [0] * width * height
def walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]
) ->None:
"""
Does a random walk, setting the cells as it goes, until it cant find a
path.
"""
maze_idx = lambda p: p[1] * width + p[0]
north = lambda p: (p[0], p[1] - 1)
east = lambda p: (p[0] + 1, p[1])
south = lambda p: (p[0], p[1] + 1)
west = lambda p: (p[0] - 1, p[1])
def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:
"""
Returns a list of possible neighbours.
Can pass arg to only count visited neighbours
"""
possible_points = dict()
p_pt = north(pt)
if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):
possible_points[p_pt] = 'N'
p_pt = east(pt)
if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or
visited):
possible_points[p_pt] = 'E'
p_pt = south(pt)
if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or
visited):
possible_points[p_pt] = 'S'
p_pt = west(pt)
if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):
possible_points[p_pt] = 'W'
return possible_points
starting_n = check_neighbours(start, True)
if starting_n:
neigh, dire = random.choice(tuple(starting_n.items()))
maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]
maze[maze_idx(start)] |= DIRS[dire]
step = start
while (possible_n := check_neighbours(step)):
next_step, direction = random.choice(tuple(possible_n.items()))
maze[maze_idx(step)] |= DIRS[direction]
maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]
step = next_step
def gen_maze(width: int, height: int) ->list[int]:
maze = init_maze(width, height)
maze_idx = lambda p: p[1] * width + p[0]
for y in range(height):
for x in range(width):
if not maze[maze_idx((x, y))]:
walk_maze(maze, width, height, (x, y))
return maze
def print_maze(maze: list[int], width: int, height: int) ->None:
"""
Print an ASCII maze!!!! Maybe works??
"""
maze_idx = lambda p: p[1] * width + p[0]
print(' ' + '_' * (2 * width - 1))
for y in range(height):
for x in range(width):
if maze[maze_idx((x, y))] & DIRS['W']:
if maze[maze_idx((x, y))] & DIRS['S']:
print(' ', end='')
else:
print('_', end='')
else:
print('|', end='')
if maze[maze_idx((x, y))] & DIRS['S']:
print(' ', end='')
else:
print('_', end='')
print('|')
def main():
width = height = 10
if len(argv) > 2:
width = int(argv[1])
height = int(argv[2])
print(f'Generating maze size {width}x{height}')
maze = gen_maze(width, height)
print_maze(maze, width, height)
return maze
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DIRS = {'N': 1 << 0, 'E': 1 << 1, 'S': 1 << 2, 'W': 1 << 3}
O_DIRS = {'N': 'S', 'E': 'W', 'S': 'N', 'W': 'E'}
def init_maze(width: int, height: int) ->list[int]:
"""
Set up a 2D list with 0 as starting value. Basically an empty maze
"""
return [0] * width * height
def walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]
) ->None:
"""
Does a random walk, setting the cells as it goes, until it cant find a
path.
"""
maze_idx = lambda p: p[1] * width + p[0]
north = lambda p: (p[0], p[1] - 1)
east = lambda p: (p[0] + 1, p[1])
south = lambda p: (p[0], p[1] + 1)
west = lambda p: (p[0] - 1, p[1])
def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:
"""
Returns a list of possible neighbours.
Can pass arg to only count visited neighbours
"""
possible_points = dict()
p_pt = north(pt)
if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):
possible_points[p_pt] = 'N'
p_pt = east(pt)
if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or
visited):
possible_points[p_pt] = 'E'
p_pt = south(pt)
if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or
visited):
possible_points[p_pt] = 'S'
p_pt = west(pt)
if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):
possible_points[p_pt] = 'W'
return possible_points
starting_n = check_neighbours(start, True)
if starting_n:
neigh, dire = random.choice(tuple(starting_n.items()))
maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]
maze[maze_idx(start)] |= DIRS[dire]
step = start
while (possible_n := check_neighbours(step)):
next_step, direction = random.choice(tuple(possible_n.items()))
maze[maze_idx(step)] |= DIRS[direction]
maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]
step = next_step
def gen_maze(width: int, height: int) ->list[int]:
maze = init_maze(width, height)
maze_idx = lambda p: p[1] * width + p[0]
for y in range(height):
for x in range(width):
if not maze[maze_idx((x, y))]:
walk_maze(maze, width, height, (x, y))
return maze
def print_maze(maze: list[int], width: int, height: int) ->None:
"""
Print an ASCII maze!!!! Maybe works??
"""
maze_idx = lambda p: p[1] * width + p[0]
print(' ' + '_' * (2 * width - 1))
for y in range(height):
for x in range(width):
if maze[maze_idx((x, y))] & DIRS['W']:
if maze[maze_idx((x, y))] & DIRS['S']:
print(' ', end='')
else:
print('_', end='')
else:
print('|', end='')
if maze[maze_idx((x, y))] & DIRS['S']:
print(' ', end='')
else:
print('_', end='')
print('|')
def main():
width = height = 10
if len(argv) > 2:
width = int(argv[1])
height = int(argv[2])
print(f'Generating maze size {width}x{height}')
maze = gen_maze(width, height)
print_maze(maze, width, height)
return maze
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from sys import argv
from enum import Enum
import random
DIRS = {'N': 1 << 0, 'E': 1 << 1, 'S': 1 << 2, 'W': 1 << 3}
O_DIRS = {'N': 'S', 'E': 'W', 'S': 'N', 'W': 'E'}
def init_maze(width: int, height: int) ->list[int]:
"""
Set up a 2D list with 0 as starting value. Basically an empty maze
"""
return [0] * width * height
def walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]
) ->None:
"""
Does a random walk, setting the cells as it goes, until it cant find a
path.
"""
maze_idx = lambda p: p[1] * width + p[0]
north = lambda p: (p[0], p[1] - 1)
east = lambda p: (p[0] + 1, p[1])
south = lambda p: (p[0], p[1] + 1)
west = lambda p: (p[0] - 1, p[1])
def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:
"""
Returns a list of possible neighbours.
Can pass arg to only count visited neighbours
"""
possible_points = dict()
p_pt = north(pt)
if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):
possible_points[p_pt] = 'N'
p_pt = east(pt)
if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or
visited):
possible_points[p_pt] = 'E'
p_pt = south(pt)
if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or
visited):
possible_points[p_pt] = 'S'
p_pt = west(pt)
if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):
possible_points[p_pt] = 'W'
return possible_points
starting_n = check_neighbours(start, True)
if starting_n:
neigh, dire = random.choice(tuple(starting_n.items()))
maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]
maze[maze_idx(start)] |= DIRS[dire]
step = start
while (possible_n := check_neighbours(step)):
next_step, direction = random.choice(tuple(possible_n.items()))
maze[maze_idx(step)] |= DIRS[direction]
maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]
step = next_step
def gen_maze(width: int, height: int) ->list[int]:
maze = init_maze(width, height)
maze_idx = lambda p: p[1] * width + p[0]
for y in range(height):
for x in range(width):
if not maze[maze_idx((x, y))]:
walk_maze(maze, width, height, (x, y))
return maze
def print_maze(maze: list[int], width: int, height: int) ->None:
"""
Print an ASCII maze!!!! Maybe works??
"""
maze_idx = lambda p: p[1] * width + p[0]
print(' ' + '_' * (2 * width - 1))
for y in range(height):
for x in range(width):
if maze[maze_idx((x, y))] & DIRS['W']:
if maze[maze_idx((x, y))] & DIRS['S']:
print(' ', end='')
else:
print('_', end='')
else:
print('|', end='')
if maze[maze_idx((x, y))] & DIRS['S']:
print(' ', end='')
else:
print('_', end='')
print('|')
def main():
width = height = 10
if len(argv) > 2:
width = int(argv[1])
height = int(argv[2])
print(f'Generating maze size {width}x{height}')
maze = gen_maze(width, height)
print_maze(maze, width, height)
return maze
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
##
# hunt_and_kill.py
# 05 Oct 2021
# Generates a maze using the hunt and kill algorithm
# S
from sys import argv
from enum import Enum
import random
# Cardinal directions, can be OR'd and AND'd
DIRS = {
'N': 1 << 0,
'E': 1 << 1,
'S': 1 << 2,
'W': 1 << 3
}
O_DIRS = {
'N': 'S',
'E': 'W',
'S': 'N',
'W': 'E'
}
def init_maze(width: int, height: int) -> list[int]:
"""
Set up a 2D list with 0 as starting value. Basically an empty maze
"""
return [0] * width * height
def walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]) -> None:
"""
Does a random walk, setting the cells as it goes, until it cant find a
path.
"""
# Shortcut for accessing maze
maze_idx = lambda p: p[1] * width + p[0]
# Shortcut funcs for surrounding points
north = lambda p: (p[0] , p[1] -1)
east = lambda p: (p[0] +1, p[1] )
south = lambda p: (p[0] , p[1] +1)
west = lambda p: (p[0] -1, p[1] )
def check_neighbours(pt, visited=False) -> list[tuple[int, int]]:
"""
Returns a list of possible neighbours.
Can pass arg to only count visited neighbours
"""
# Points will be added to this list if they havent been traversed yet
possible_points = dict()
# -- NORTH
p_pt = north(pt)
# This mess of a condition will evaluate to true if the cell is visited and the user is asking for a visited cell. Viceversa.
if pt[1] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "N"
# -- EAST
p_pt = east(pt)
if pt[0] < width - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "E"
# -- SOUTH
p_pt = south(pt)
if pt[1] < height - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "S"
# -- WEST
p_pt = west(pt)
if pt[0] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "W"
return possible_points
# First, connect to a random neighbour that has been visited.
starting_n = check_neighbours(start, True)
if starting_n:
neigh, dire = random.choice(tuple(starting_n.items()))
maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]
maze[maze_idx(start)] |= DIRS[dire]
step = start
# Walk randomly until out of options
while possible_n := check_neighbours(step):
next_step, direction = random.choice(tuple(possible_n.items()))
# Connect the two cells
maze[maze_idx(step)] |= DIRS[direction]
maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]
# Go to next
step = next_step
def gen_maze(width: int, height: int) -> list[int]:
maze = init_maze(width, height)
maze_idx = lambda p: p[1] * width + p[0]
for y in range(height):
for x in range(width):
if not maze[maze_idx((x, y))]:
walk_maze(maze, width, height, (x, y))
return maze
def print_maze(maze: list[int], width: int, height: int) -> None:
"""
Print an ASCII maze!!!! Maybe works??
"""
maze_idx = lambda p: p[1] * width + p[0]
# top row
print(' ' + '_' * (2 * width - 1))
for y in range(height):
for x in range(width):
# left wall
if maze[maze_idx((x, y))] & DIRS["W"]:
# leave wall open if you can also go down
if maze[maze_idx((x, y))] & DIRS["S"]:
print(' ', end='')
else:
print('_', end='')
else:
print('|', end='')
if maze[maze_idx((x, y))] & DIRS["S"]:
print(' ', end='')
else:
print('_', end='')
# right wall
print('|')
def main():
width = height = 10
if len(argv) > 2:
width = int(argv[1])
height = int(argv[2])
print(f"Generating maze size {width}x{height}")
maze = gen_maze(width, height)
print_maze(maze, width, height)
return maze
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "54002bc7e2a1991d2405acbe1d399e8803ac5582",
"index": 7210,
"step-1": "<mask token>\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]\n ) ->None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n north = lambda p: (p[0], p[1] - 1)\n east = lambda p: (p[0] + 1, p[1])\n south = lambda p: (p[0], p[1] + 1)\n west = lambda p: (p[0] - 1, p[1])\n\n def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n possible_points = dict()\n p_pt = north(pt)\n if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'N'\n p_pt = east(pt)\n if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'E'\n p_pt = south(pt)\n if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'S'\n p_pt = west(pt)\n if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'W'\n return possible_points\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n step = start\n while (possible_n := check_neighbours(step)):\n next_step, direction = random.choice(tuple(possible_n.items()))\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n step = next_step\n\n\ndef gen_maze(width: int, height: int) ->list[int]:\n maze = init_maze(width, height)\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n return maze\n\n\ndef print_maze(maze: list[int], width: int, height: int) ->None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n print(' ' + '_' * (2 * width - 1))\n for y in range(height):\n for x in range(width):\n if maze[maze_idx((x, y))] & DIRS['W']:\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n else:\n print('|', end='')\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n print('|')\n\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n print(f'Generating maze size {width}x{height}')\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init_maze(width: int, height: int) ->list[int]:\n \"\"\"\n Set up a 2D list with 0 as starting value. Basically an empty maze\n \"\"\"\n return [0] * width * height\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]\n ) ->None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n north = lambda p: (p[0], p[1] - 1)\n east = lambda p: (p[0] + 1, p[1])\n south = lambda p: (p[0], p[1] + 1)\n west = lambda p: (p[0] - 1, p[1])\n\n def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n possible_points = dict()\n p_pt = north(pt)\n if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'N'\n p_pt = east(pt)\n if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'E'\n p_pt = south(pt)\n if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'S'\n p_pt = west(pt)\n if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'W'\n return possible_points\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n step = start\n while (possible_n := check_neighbours(step)):\n next_step, direction = random.choice(tuple(possible_n.items()))\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n step = next_step\n\n\ndef gen_maze(width: int, height: int) ->list[int]:\n maze = init_maze(width, height)\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n return maze\n\n\ndef print_maze(maze: list[int], width: int, height: int) ->None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n print(' ' + '_' * (2 * width - 1))\n for y in range(height):\n for x in range(width):\n if maze[maze_idx((x, y))] & DIRS['W']:\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n else:\n print('|', end='')\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n print('|')\n\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n print(f'Generating maze size {width}x{height}')\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nDIRS = {'N': 1 << 0, 'E': 1 << 1, 'S': 1 << 2, 'W': 1 << 3}\nO_DIRS = {'N': 'S', 'E': 'W', 'S': 'N', 'W': 'E'}\n\n\ndef init_maze(width: int, height: int) ->list[int]:\n \"\"\"\n Set up a 2D list with 0 as starting value. Basically an empty maze\n \"\"\"\n return [0] * width * height\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]\n ) ->None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n north = lambda p: (p[0], p[1] - 1)\n east = lambda p: (p[0] + 1, p[1])\n south = lambda p: (p[0], p[1] + 1)\n west = lambda p: (p[0] - 1, p[1])\n\n def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n possible_points = dict()\n p_pt = north(pt)\n if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'N'\n p_pt = east(pt)\n if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'E'\n p_pt = south(pt)\n if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'S'\n p_pt = west(pt)\n if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'W'\n return possible_points\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n step = start\n while (possible_n := check_neighbours(step)):\n next_step, direction = random.choice(tuple(possible_n.items()))\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n step = next_step\n\n\ndef gen_maze(width: int, height: int) ->list[int]:\n maze = init_maze(width, height)\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n return maze\n\n\ndef print_maze(maze: list[int], width: int, height: int) ->None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n print(' ' + '_' * (2 * width - 1))\n for y in range(height):\n for x in range(width):\n if maze[maze_idx((x, y))] & DIRS['W']:\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n else:\n print('|', end='')\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n print('|')\n\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n print(f'Generating maze size {width}x{height}')\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from sys import argv\nfrom enum import Enum\nimport random\nDIRS = {'N': 1 << 0, 'E': 1 << 1, 'S': 1 << 2, 'W': 1 << 3}\nO_DIRS = {'N': 'S', 'E': 'W', 'S': 'N', 'W': 'E'}\n\n\ndef init_maze(width: int, height: int) ->list[int]:\n \"\"\"\n Set up a 2D list with 0 as starting value. Basically an empty maze\n \"\"\"\n return [0] * width * height\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]\n ) ->None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n north = lambda p: (p[0], p[1] - 1)\n east = lambda p: (p[0] + 1, p[1])\n south = lambda p: (p[0], p[1] + 1)\n west = lambda p: (p[0] - 1, p[1])\n\n def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n possible_points = dict()\n p_pt = north(pt)\n if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'N'\n p_pt = east(pt)\n if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'E'\n p_pt = south(pt)\n if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'S'\n p_pt = west(pt)\n if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'W'\n return possible_points\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n step = start\n while (possible_n := check_neighbours(step)):\n next_step, direction = random.choice(tuple(possible_n.items()))\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n step = next_step\n\n\ndef gen_maze(width: int, height: int) ->list[int]:\n maze = init_maze(width, height)\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n return maze\n\n\ndef print_maze(maze: list[int], width: int, height: int) ->None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n print(' ' + '_' * (2 * width - 1))\n for y in range(height):\n for x in range(width):\n if maze[maze_idx((x, y))] & DIRS['W']:\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n else:\n print('|', end='')\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n print('|')\n\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n print(f'Generating maze size {width}x{height}')\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "##\n# hunt_and_kill.py\n# 05 Oct 2021\n# Generates a maze using the hunt and kill algorithm\n# S\nfrom sys import argv\nfrom enum import Enum\nimport random\n\n# Cardinal directions, can be OR'd and AND'd\nDIRS = {\n 'N': 1 << 0,\n 'E': 1 << 1,\n 'S': 1 << 2,\n 'W': 1 << 3\n}\n\nO_DIRS = {\n 'N': 'S',\n 'E': 'W',\n 'S': 'N',\n 'W': 'E'\n}\n\ndef init_maze(width: int, height: int) -> list[int]:\n \"\"\"\n Set up a 2D list with 0 as starting value. Basically an empty maze\n \"\"\"\n return [0] * width * height\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]) -> None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n # Shortcut for accessing maze\n maze_idx = lambda p: p[1] * width + p[0]\n\n # Shortcut funcs for surrounding points\n north = lambda p: (p[0] , p[1] -1)\n east = lambda p: (p[0] +1, p[1] )\n south = lambda p: (p[0] , p[1] +1)\n west = lambda p: (p[0] -1, p[1] )\n\n def check_neighbours(pt, visited=False) -> list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n # Points will be added to this list if they havent been traversed yet\n possible_points = dict()\n\n # -- NORTH\n p_pt = north(pt)\n # This mess of a condition will evaluate to true if the cell is visited and the user is asking for a visited cell. Viceversa.\n if pt[1] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"N\"\n\n # -- EAST\n p_pt = east(pt)\n if pt[0] < width - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"E\"\n\n # -- SOUTH\n p_pt = south(pt)\n if pt[1] < height - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"S\"\n\n # -- WEST\n p_pt = west(pt)\n if pt[0] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"W\"\n\n return possible_points\n\n # First, connect to a random neighbour that has been visited.\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n\n step = start\n\n # Walk randomly until out of options\n while possible_n := check_neighbours(step):\n next_step, direction = random.choice(tuple(possible_n.items()))\n\n # Connect the two cells\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n\n # Go to next\n step = next_step\n\n\n\ndef gen_maze(width: int, height: int) -> list[int]:\n maze = init_maze(width, height)\n\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n\n return maze\n\ndef print_maze(maze: list[int], width: int, height: int) -> None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n\n # top row\n print(' ' + '_' * (2 * width - 1))\n\n for y in range(height):\n for x in range(width):\n # left wall\n if maze[maze_idx((x, y))] & DIRS[\"W\"]:\n # leave wall open if you can also go down\n if maze[maze_idx((x, y))] & DIRS[\"S\"]:\n print(' ', end='')\n else:\n print('_', end='')\n\n else:\n print('|', end='')\n\n if maze[maze_idx((x, y))] & DIRS[\"S\"]:\n print(' ', end='')\n else:\n print('_', end='')\n # right wall\n print('|')\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n\n print(f\"Generating maze size {width}x{height}\")\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class Area(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
unique_together = 'name', 'city'
ordering = 'name',
<|reserved_special_token_0|>
class ApartmentQuerySet(QuerySet):
def available(self):
return self.filter(is_available=True, tenant__is_active=True)
class ApartmentManager(models.Manager):
def get_query_set(self):
return ApartmentQuerySet(self.model).select_related('area',
'area__city')
def available(self):
return self.get_query_set().available()
def search(self, search):
if search:
search = search.strip()
if not search:
return self.none()
search_fields = ('area__name__iexact', 'area__city__name__iexact',
'postcode__iexact', 'address__icontains')
criteria = [Q(**{field: search}) for field in search_fields]
return self.filter(reduce(operator.or_, criteria))
class Apartment(models.Model):
TYPE_APARTMENT = 1
TYPE_DETACHED = 2
TYPE_SEMIDETACHED = 3
TYPE_COTTAGE = 4
TYPE_CHOICES = (TYPE_APARTMENT, 'Apartment'), (TYPE_DETACHED,
'Detached house'), (TYPE_SEMIDETACHED, 'Semi-detached house'), (
TYPE_COTTAGE, 'Cottage')
SAUNA_NONE = 1
SAUNA_SHARED = 2
SAUNA_OWN = 3
SAUNA_CHOICES = (SAUNA_NONE, 'No sauna'), (SAUNA_SHARED,
'Shared/communal sauna'), (SAUNA_OWN, 'Own sauna')
ROOM_CHOICES = (1, '1 room'), (2, '2 rooms'), (3, '3 rooms'), (4, '4 rooms'
)
LANDLORD_TENANT = 1
LANDLORD_PRIVATE = 2
LANDLORD_AGENCY = 3
LANDLORD_CHOICES = (LANDLORD_TENANT, 'Occupant'), (LANDLORD_PRIVATE,
'Private landlord'), (LANDLORD_AGENCY, 'Rental agency')
area = models.ForeignKey(Area)
tenant = models.ForeignKey(User)
landlord = models.IntegerField(choices=LANDLORD_CHOICES, default=
LANDLORD_PRIVATE)
agency = models.CharField(max_length=100, null=True, blank=True)
agency_website = models.URLField(null=True, blank=True)
address = models.CharField(max_length=100)
postcode = models.CharField(max_length=7)
latitude = models.FloatField(null=True, blank=True)
longitude = models.FloatField(null=True, blank=True)
added_on = models.DateTimeField(auto_now_add=True)
is_available = models.BooleanField(default=True)
available_from = models.DateField(null=True, blank=True)
available_to = models.DateField(null=True, blank=True)
is_shared = models.BooleanField('Shared accomodation', default=False)
type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_APARTMENT)
num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)
floor = models.IntegerField(null=True, blank=True)
lift = models.BooleanField(default=False)
num_floors = models.IntegerField(null=True, blank=True)
sauna = models.IntegerField(choices=SAUNA_CHOICES, default=SAUNA_NONE)
rent_pcm = models.DecimalField(decimal_places=2, max_digits=8)
deposit = models.DecimalField(decimal_places=2, max_digits=8, null=True,
blank=True)
smoking = models.BooleanField(default=False)
pets = models.BooleanField(default=False)
size = models.FloatField('Size (sqm)')
garden_size = models.FloatField(null=True, blank=True)
furnished = models.BooleanField(default=False)
cable = models.BooleanField(default=False)
broadband = models.BooleanField(default=False)
satellite = models.BooleanField(default=False)
balcony = models.BooleanField(default=False)
parking = models.BooleanField(default=False)
garage = models.BooleanField(default=False)
bike_storage = models.BooleanField(default=False)
extra_storage = models.BooleanField(default=False)
gym = models.BooleanField(default=False)
laundry = models.BooleanField(default=False)
description = models.TextField(null=True, blank=True)
kitchen_amenities = models.TextField(null=True, blank=True)
furniture = models.TextField(null=True, blank=True)
heating = models.TextField(null=True, blank=True)
other_amenities = models.TextField(null=True, blank=True)
objects = ApartmentManager()
def __unicode__(self):
return self.get_full_address()
@permalink
def get_absolute_url(self):
return 'apartments:detail', [str(self.id)]
def get_full_address(self):
return '{0}, {1} {2}'.format(self.address, self.postcode, self.area
.city.name.upper())
def is_agency_landlord(self):
return self.landlord == self.LANDLORD_AGENCY
def get_location(self):
searchable = '{0}, {1} {2}, Finland'.format(self.address, self.
postcode, self.area.city)
address, (lat, lng) = geocoder.geocode(searchable, exactly_one=True)
return lat, lng
def save(self, *args, **kwargs):
self.latitude, self.longitude = self.get_location()
super(Apartment, self).save(*args, **kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class City(models.Model):
<|reserved_special_token_0|>
class Meta:
ordering = 'name',
verbose_name_plural = 'cities'
def __unicode__(self):
return self.name
class Area(models.Model):
name = models.CharField(max_length=50)
city = models.ForeignKey(City)
class Meta:
unique_together = 'name', 'city'
ordering = 'name',
def __unicode__(self):
return self.name
class ApartmentQuerySet(QuerySet):
def available(self):
return self.filter(is_available=True, tenant__is_active=True)
class ApartmentManager(models.Manager):
def get_query_set(self):
return ApartmentQuerySet(self.model).select_related('area',
'area__city')
def available(self):
return self.get_query_set().available()
def search(self, search):
if search:
search = search.strip()
if not search:
return self.none()
search_fields = ('area__name__iexact', 'area__city__name__iexact',
'postcode__iexact', 'address__icontains')
criteria = [Q(**{field: search}) for field in search_fields]
return self.filter(reduce(operator.or_, criteria))
class Apartment(models.Model):
TYPE_APARTMENT = 1
TYPE_DETACHED = 2
TYPE_SEMIDETACHED = 3
TYPE_COTTAGE = 4
TYPE_CHOICES = (TYPE_APARTMENT, 'Apartment'), (TYPE_DETACHED,
'Detached house'), (TYPE_SEMIDETACHED, 'Semi-detached house'), (
TYPE_COTTAGE, 'Cottage')
SAUNA_NONE = 1
SAUNA_SHARED = 2
SAUNA_OWN = 3
SAUNA_CHOICES = (SAUNA_NONE, 'No sauna'), (SAUNA_SHARED,
'Shared/communal sauna'), (SAUNA_OWN, 'Own sauna')
ROOM_CHOICES = (1, '1 room'), (2, '2 rooms'), (3, '3 rooms'), (4, '4 rooms'
)
LANDLORD_TENANT = 1
LANDLORD_PRIVATE = 2
LANDLORD_AGENCY = 3
LANDLORD_CHOICES = (LANDLORD_TENANT, 'Occupant'), (LANDLORD_PRIVATE,
'Private landlord'), (LANDLORD_AGENCY, 'Rental agency')
area = models.ForeignKey(Area)
tenant = models.ForeignKey(User)
landlord = models.IntegerField(choices=LANDLORD_CHOICES, default=
LANDLORD_PRIVATE)
agency = models.CharField(max_length=100, null=True, blank=True)
agency_website = models.URLField(null=True, blank=True)
address = models.CharField(max_length=100)
postcode = models.CharField(max_length=7)
latitude = models.FloatField(null=True, blank=True)
longitude = models.FloatField(null=True, blank=True)
added_on = models.DateTimeField(auto_now_add=True)
is_available = models.BooleanField(default=True)
available_from = models.DateField(null=True, blank=True)
available_to = models.DateField(null=True, blank=True)
is_shared = models.BooleanField('Shared accomodation', default=False)
type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_APARTMENT)
num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)
floor = models.IntegerField(null=True, blank=True)
lift = models.BooleanField(default=False)
num_floors = models.IntegerField(null=True, blank=True)
sauna = models.IntegerField(choices=SAUNA_CHOICES, default=SAUNA_NONE)
rent_pcm = models.DecimalField(decimal_places=2, max_digits=8)
deposit = models.DecimalField(decimal_places=2, max_digits=8, null=True,
blank=True)
smoking = models.BooleanField(default=False)
pets = models.BooleanField(default=False)
size = models.FloatField('Size (sqm)')
garden_size = models.FloatField(null=True, blank=True)
furnished = models.BooleanField(default=False)
cable = models.BooleanField(default=False)
broadband = models.BooleanField(default=False)
satellite = models.BooleanField(default=False)
balcony = models.BooleanField(default=False)
parking = models.BooleanField(default=False)
garage = models.BooleanField(default=False)
bike_storage = models.BooleanField(default=False)
extra_storage = models.BooleanField(default=False)
gym = models.BooleanField(default=False)
laundry = models.BooleanField(default=False)
description = models.TextField(null=True, blank=True)
kitchen_amenities = models.TextField(null=True, blank=True)
furniture = models.TextField(null=True, blank=True)
heating = models.TextField(null=True, blank=True)
other_amenities = models.TextField(null=True, blank=True)
objects = ApartmentManager()
def __unicode__(self):
return self.get_full_address()
@permalink
def get_absolute_url(self):
return 'apartments:detail', [str(self.id)]
def get_full_address(self):
return '{0}, {1} {2}'.format(self.address, self.postcode, self.area
.city.name.upper())
def is_agency_landlord(self):
return self.landlord == self.LANDLORD_AGENCY
def get_location(self):
searchable = '{0}, {1} {2}, Finland'.format(self.address, self.
postcode, self.area.city)
address, (lat, lng) = geocoder.geocode(searchable, exactly_one=True)
return lat, lng
def save(self, *args, **kwargs):
self.latitude, self.longitude = self.get_location()
super(Apartment, self).save(*args, **kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class City(models.Model):
name = models.CharField(max_length=30, unique=True)
class Meta:
ordering = 'name',
verbose_name_plural = 'cities'
def __unicode__(self):
return self.name
class Area(models.Model):
name = models.CharField(max_length=50)
city = models.ForeignKey(City)
class Meta:
unique_together = 'name', 'city'
ordering = 'name',
def __unicode__(self):
return self.name
class ApartmentQuerySet(QuerySet):
def available(self):
return self.filter(is_available=True, tenant__is_active=True)
class ApartmentManager(models.Manager):
def get_query_set(self):
return ApartmentQuerySet(self.model).select_related('area',
'area__city')
def available(self):
return self.get_query_set().available()
def search(self, search):
if search:
search = search.strip()
if not search:
return self.none()
search_fields = ('area__name__iexact', 'area__city__name__iexact',
'postcode__iexact', 'address__icontains')
criteria = [Q(**{field: search}) for field in search_fields]
return self.filter(reduce(operator.or_, criteria))
class Apartment(models.Model):
TYPE_APARTMENT = 1
TYPE_DETACHED = 2
TYPE_SEMIDETACHED = 3
TYPE_COTTAGE = 4
TYPE_CHOICES = (TYPE_APARTMENT, 'Apartment'), (TYPE_DETACHED,
'Detached house'), (TYPE_SEMIDETACHED, 'Semi-detached house'), (
TYPE_COTTAGE, 'Cottage')
SAUNA_NONE = 1
SAUNA_SHARED = 2
SAUNA_OWN = 3
SAUNA_CHOICES = (SAUNA_NONE, 'No sauna'), (SAUNA_SHARED,
'Shared/communal sauna'), (SAUNA_OWN, 'Own sauna')
ROOM_CHOICES = (1, '1 room'), (2, '2 rooms'), (3, '3 rooms'), (4, '4 rooms'
)
LANDLORD_TENANT = 1
LANDLORD_PRIVATE = 2
LANDLORD_AGENCY = 3
LANDLORD_CHOICES = (LANDLORD_TENANT, 'Occupant'), (LANDLORD_PRIVATE,
'Private landlord'), (LANDLORD_AGENCY, 'Rental agency')
area = models.ForeignKey(Area)
tenant = models.ForeignKey(User)
landlord = models.IntegerField(choices=LANDLORD_CHOICES, default=
LANDLORD_PRIVATE)
agency = models.CharField(max_length=100, null=True, blank=True)
agency_website = models.URLField(null=True, blank=True)
address = models.CharField(max_length=100)
postcode = models.CharField(max_length=7)
latitude = models.FloatField(null=True, blank=True)
longitude = models.FloatField(null=True, blank=True)
added_on = models.DateTimeField(auto_now_add=True)
is_available = models.BooleanField(default=True)
available_from = models.DateField(null=True, blank=True)
available_to = models.DateField(null=True, blank=True)
is_shared = models.BooleanField('Shared accomodation', default=False)
type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_APARTMENT)
num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)
floor = models.IntegerField(null=True, blank=True)
lift = models.BooleanField(default=False)
num_floors = models.IntegerField(null=True, blank=True)
sauna = models.IntegerField(choices=SAUNA_CHOICES, default=SAUNA_NONE)
rent_pcm = models.DecimalField(decimal_places=2, max_digits=8)
deposit = models.DecimalField(decimal_places=2, max_digits=8, null=True,
blank=True)
smoking = models.BooleanField(default=False)
pets = models.BooleanField(default=False)
size = models.FloatField('Size (sqm)')
garden_size = models.FloatField(null=True, blank=True)
furnished = models.BooleanField(default=False)
cable = models.BooleanField(default=False)
broadband = models.BooleanField(default=False)
satellite = models.BooleanField(default=False)
balcony = models.BooleanField(default=False)
parking = models.BooleanField(default=False)
garage = models.BooleanField(default=False)
bike_storage = models.BooleanField(default=False)
extra_storage = models.BooleanField(default=False)
gym = models.BooleanField(default=False)
laundry = models.BooleanField(default=False)
description = models.TextField(null=True, blank=True)
kitchen_amenities = models.TextField(null=True, blank=True)
furniture = models.TextField(null=True, blank=True)
heating = models.TextField(null=True, blank=True)
other_amenities = models.TextField(null=True, blank=True)
objects = ApartmentManager()
def __unicode__(self):
return self.get_full_address()
@permalink
def get_absolute_url(self):
return 'apartments:detail', [str(self.id)]
def get_full_address(self):
return '{0}, {1} {2}'.format(self.address, self.postcode, self.area
.city.name.upper())
def is_agency_landlord(self):
return self.landlord == self.LANDLORD_AGENCY
def get_location(self):
searchable = '{0}, {1} {2}, Finland'.format(self.address, self.
postcode, self.area.city)
address, (lat, lng) = geocoder.geocode(searchable, exactly_one=True)
return lat, lng
def save(self, *args, **kwargs):
self.latitude, self.longitude = self.get_location()
super(Apartment, self).save(*args, **kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
geocoder = geopy.geocoders.Google()
class City(models.Model):
name = models.CharField(max_length=30, unique=True)
class Meta:
ordering = 'name',
verbose_name_plural = 'cities'
def __unicode__(self):
return self.name
class Area(models.Model):
name = models.CharField(max_length=50)
city = models.ForeignKey(City)
class Meta:
unique_together = 'name', 'city'
ordering = 'name',
def __unicode__(self):
return self.name
class ApartmentQuerySet(QuerySet):
def available(self):
return self.filter(is_available=True, tenant__is_active=True)
class ApartmentManager(models.Manager):
def get_query_set(self):
return ApartmentQuerySet(self.model).select_related('area',
'area__city')
def available(self):
return self.get_query_set().available()
def search(self, search):
if search:
search = search.strip()
if not search:
return self.none()
search_fields = ('area__name__iexact', 'area__city__name__iexact',
'postcode__iexact', 'address__icontains')
criteria = [Q(**{field: search}) for field in search_fields]
return self.filter(reduce(operator.or_, criteria))
class Apartment(models.Model):
TYPE_APARTMENT = 1
TYPE_DETACHED = 2
TYPE_SEMIDETACHED = 3
TYPE_COTTAGE = 4
TYPE_CHOICES = (TYPE_APARTMENT, 'Apartment'), (TYPE_DETACHED,
'Detached house'), (TYPE_SEMIDETACHED, 'Semi-detached house'), (
TYPE_COTTAGE, 'Cottage')
SAUNA_NONE = 1
SAUNA_SHARED = 2
SAUNA_OWN = 3
SAUNA_CHOICES = (SAUNA_NONE, 'No sauna'), (SAUNA_SHARED,
'Shared/communal sauna'), (SAUNA_OWN, 'Own sauna')
ROOM_CHOICES = (1, '1 room'), (2, '2 rooms'), (3, '3 rooms'), (4, '4 rooms'
)
LANDLORD_TENANT = 1
LANDLORD_PRIVATE = 2
LANDLORD_AGENCY = 3
LANDLORD_CHOICES = (LANDLORD_TENANT, 'Occupant'), (LANDLORD_PRIVATE,
'Private landlord'), (LANDLORD_AGENCY, 'Rental agency')
area = models.ForeignKey(Area)
tenant = models.ForeignKey(User)
landlord = models.IntegerField(choices=LANDLORD_CHOICES, default=
LANDLORD_PRIVATE)
agency = models.CharField(max_length=100, null=True, blank=True)
agency_website = models.URLField(null=True, blank=True)
address = models.CharField(max_length=100)
postcode = models.CharField(max_length=7)
latitude = models.FloatField(null=True, blank=True)
longitude = models.FloatField(null=True, blank=True)
added_on = models.DateTimeField(auto_now_add=True)
is_available = models.BooleanField(default=True)
available_from = models.DateField(null=True, blank=True)
available_to = models.DateField(null=True, blank=True)
is_shared = models.BooleanField('Shared accomodation', default=False)
type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_APARTMENT)
num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)
floor = models.IntegerField(null=True, blank=True)
lift = models.BooleanField(default=False)
num_floors = models.IntegerField(null=True, blank=True)
sauna = models.IntegerField(choices=SAUNA_CHOICES, default=SAUNA_NONE)
rent_pcm = models.DecimalField(decimal_places=2, max_digits=8)
deposit = models.DecimalField(decimal_places=2, max_digits=8, null=True,
blank=True)
smoking = models.BooleanField(default=False)
pets = models.BooleanField(default=False)
size = models.FloatField('Size (sqm)')
garden_size = models.FloatField(null=True, blank=True)
furnished = models.BooleanField(default=False)
cable = models.BooleanField(default=False)
broadband = models.BooleanField(default=False)
satellite = models.BooleanField(default=False)
balcony = models.BooleanField(default=False)
parking = models.BooleanField(default=False)
garage = models.BooleanField(default=False)
bike_storage = models.BooleanField(default=False)
extra_storage = models.BooleanField(default=False)
gym = models.BooleanField(default=False)
laundry = models.BooleanField(default=False)
description = models.TextField(null=True, blank=True)
kitchen_amenities = models.TextField(null=True, blank=True)
furniture = models.TextField(null=True, blank=True)
heating = models.TextField(null=True, blank=True)
other_amenities = models.TextField(null=True, blank=True)
objects = ApartmentManager()
def __unicode__(self):
return self.get_full_address()
@permalink
def get_absolute_url(self):
return 'apartments:detail', [str(self.id)]
def get_full_address(self):
return '{0}, {1} {2}'.format(self.address, self.postcode, self.area
.city.name.upper())
def is_agency_landlord(self):
return self.landlord == self.LANDLORD_AGENCY
def get_location(self):
searchable = '{0}, {1} {2}, Finland'.format(self.address, self.
postcode, self.area.city)
address, (lat, lng) = geocoder.geocode(searchable, exactly_one=True)
return lat, lng
def save(self, *args, **kwargs):
self.latitude, self.longitude = self.get_location()
super(Apartment, self).save(*args, **kwargs)
<|reserved_special_token_1|>
import datetime
import operator
import geopy
from django.db import models
from django.db.models import Q
from django.db.models.query import QuerySet
from django.db.models import permalink
from django.contrib.auth.models import User
geocoder = geopy.geocoders.Google()
class City(models.Model):
name = models.CharField(max_length=30, unique=True)
class Meta:
ordering = ('name',)
verbose_name_plural = "cities"
def __unicode__(self):
return self.name
class Area(models.Model):
name = models.CharField(max_length=50)
city = models.ForeignKey(City)
class Meta:
unique_together = ('name', 'city')
ordering = ('name',)
def __unicode__(self):
return self.name
class ApartmentQuerySet(QuerySet):
def available(self):
return self.filter(
is_available=True,
tenant__is_active=True,
)
class ApartmentManager(models.Manager):
def get_query_set(self):
return ApartmentQuerySet(self.model).select_related(
'area', 'area__city')
def available(self):
return self.get_query_set().available()
def search(self, search):
if search:
search = search.strip()
if not search:
return self.none()
search_fields = (
'area__name__iexact',
'area__city__name__iexact',
'postcode__iexact',
'address__icontains',
)
criteria = [Q(**{field : search})
for field in search_fields]
return self.filter(reduce(operator.or_, criteria))
class Apartment(models.Model):
TYPE_APARTMENT = 1
TYPE_DETACHED = 2
TYPE_SEMIDETACHED = 3
TYPE_COTTAGE =4
TYPE_CHOICES = (
(TYPE_APARTMENT, "Apartment"),
(TYPE_DETACHED, "Detached house"),
(TYPE_SEMIDETACHED, "Semi-detached house"),
(TYPE_COTTAGE, "Cottage"),
)
SAUNA_NONE = 1
SAUNA_SHARED = 2
SAUNA_OWN = 3
SAUNA_CHOICES = (
(SAUNA_NONE, "No sauna"),
(SAUNA_SHARED, "Shared/communal sauna"),
(SAUNA_OWN, "Own sauna"),
)
ROOM_CHOICES = (
(1, "1 room"),
(2, "2 rooms"),
(3, "3 rooms"),
(4, "4 rooms"),
)
LANDLORD_TENANT = 1
LANDLORD_PRIVATE = 2
LANDLORD_AGENCY = 3
LANDLORD_CHOICES = (
(LANDLORD_TENANT, "Occupant"),
(LANDLORD_PRIVATE, "Private landlord"),
(LANDLORD_AGENCY, "Rental agency"),
)
area = models.ForeignKey(Area)
tenant = models.ForeignKey(User)
landlord = models.IntegerField(
choices=LANDLORD_CHOICES,
default=LANDLORD_PRIVATE,
)
agency = models.CharField(max_length=100, null=True, blank=True)
agency_website = models.URLField(null=True, blank=True)
address = models.CharField(max_length=100)
postcode = models.CharField(max_length=7)
latitude = models.FloatField(null=True, blank=True)
longitude = models.FloatField(null=True, blank=True)
added_on = models.DateTimeField(auto_now_add=True)
is_available = models.BooleanField(default=True)
available_from = models.DateField(null=True, blank=True)
available_to = models.DateField(null=True, blank=True)
is_shared = models.BooleanField('Shared accomodation', default=False)
type = models.IntegerField(
choices=TYPE_CHOICES,
default=TYPE_APARTMENT,
)
num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)
floor = models.IntegerField(null=True, blank=True)
lift = models.BooleanField(default=False)
num_floors = models.IntegerField(null=True, blank=True)
sauna = models.IntegerField(
choices=SAUNA_CHOICES,
default=SAUNA_NONE,
)
rent_pcm = models.DecimalField(
decimal_places=2,
max_digits=8,
)
deposit = models.DecimalField(
decimal_places=2,
max_digits=8,
null=True,
blank=True
)
smoking = models.BooleanField(default=False)
pets = models.BooleanField(default=False)
size = models.FloatField('Size (sqm)')
garden_size = models.FloatField(null=True, blank=True)
furnished = models.BooleanField(default=False)
cable = models.BooleanField(default=False)
broadband = models.BooleanField(default=False)
satellite = models.BooleanField(default=False)
balcony = models.BooleanField(default=False)
parking = models.BooleanField(default=False)
garage = models.BooleanField(default=False)
bike_storage = models.BooleanField(default=False)
extra_storage = models.BooleanField(default=False)
gym = models.BooleanField(default=False)
laundry = models.BooleanField(default=False)
description = models.TextField(null=True, blank=True)
kitchen_amenities = models.TextField(null=True, blank=True)
furniture = models.TextField(null=True, blank=True)
heating = models.TextField(null=True, blank=True)
other_amenities = models.TextField(null=True, blank=True)
objects = ApartmentManager()
def __unicode__(self):
return self.get_full_address()
@permalink
def get_absolute_url(self):
return ('apartments:detail', [str(self.id)])
def get_full_address(self):
return "{0}, {1} {2}".format(
self.address,
self.postcode,
self.area.city.name.upper()
)
def is_agency_landlord(self):
return self.landlord == self.LANDLORD_AGENCY
def get_location(self):
searchable = "{0}, {1} {2}, Finland".format(
self.address,
self.postcode,
self.area.city,
)
address, (lat, lng) = geocoder.geocode(
searchable,
exactly_one=True
)
return lat, lng
def save(self, *args, **kwargs):
self.latitude, self.longitude = self.get_location()
super(Apartment, self).save(*args, **kwargs)
|
flexible
|
{
"blob_id": "89ba805e47a9727573e1e25371a70fb887ee170d",
"index": 9141,
"step-1": "<mask token>\n\n\nclass Area(models.Model):\n <mask token>\n <mask token>\n\n\n class Meta:\n unique_together = 'name', 'city'\n ordering = 'name',\n <mask token>\n\n\nclass ApartmentQuerySet(QuerySet):\n\n def available(self):\n return self.filter(is_available=True, tenant__is_active=True)\n\n\nclass ApartmentManager(models.Manager):\n\n def get_query_set(self):\n return ApartmentQuerySet(self.model).select_related('area',\n 'area__city')\n\n def available(self):\n return self.get_query_set().available()\n\n def search(self, search):\n if search:\n search = search.strip()\n if not search:\n return self.none()\n search_fields = ('area__name__iexact', 'area__city__name__iexact',\n 'postcode__iexact', 'address__icontains')\n criteria = [Q(**{field: search}) for field in search_fields]\n return self.filter(reduce(operator.or_, criteria))\n\n\nclass Apartment(models.Model):\n TYPE_APARTMENT = 1\n TYPE_DETACHED = 2\n TYPE_SEMIDETACHED = 3\n TYPE_COTTAGE = 4\n TYPE_CHOICES = (TYPE_APARTMENT, 'Apartment'), (TYPE_DETACHED,\n 'Detached house'), (TYPE_SEMIDETACHED, 'Semi-detached house'), (\n TYPE_COTTAGE, 'Cottage')\n SAUNA_NONE = 1\n SAUNA_SHARED = 2\n SAUNA_OWN = 3\n SAUNA_CHOICES = (SAUNA_NONE, 'No sauna'), (SAUNA_SHARED,\n 'Shared/communal sauna'), (SAUNA_OWN, 'Own sauna')\n ROOM_CHOICES = (1, '1 room'), (2, '2 rooms'), (3, '3 rooms'), (4, '4 rooms'\n )\n LANDLORD_TENANT = 1\n LANDLORD_PRIVATE = 2\n LANDLORD_AGENCY = 3\n LANDLORD_CHOICES = (LANDLORD_TENANT, 'Occupant'), (LANDLORD_PRIVATE,\n 'Private landlord'), (LANDLORD_AGENCY, 'Rental agency')\n area = models.ForeignKey(Area)\n tenant = models.ForeignKey(User)\n landlord = models.IntegerField(choices=LANDLORD_CHOICES, default=\n LANDLORD_PRIVATE)\n agency = models.CharField(max_length=100, null=True, blank=True)\n agency_website = models.URLField(null=True, blank=True)\n address = models.CharField(max_length=100)\n postcode = models.CharField(max_length=7)\n latitude = models.FloatField(null=True, blank=True)\n longitude = models.FloatField(null=True, blank=True)\n added_on = models.DateTimeField(auto_now_add=True)\n is_available = models.BooleanField(default=True)\n available_from = models.DateField(null=True, blank=True)\n available_to = models.DateField(null=True, blank=True)\n is_shared = models.BooleanField('Shared accomodation', default=False)\n type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_APARTMENT)\n num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)\n floor = models.IntegerField(null=True, blank=True)\n lift = models.BooleanField(default=False)\n num_floors = models.IntegerField(null=True, blank=True)\n sauna = models.IntegerField(choices=SAUNA_CHOICES, default=SAUNA_NONE)\n rent_pcm = models.DecimalField(decimal_places=2, max_digits=8)\n deposit = models.DecimalField(decimal_places=2, max_digits=8, null=True,\n blank=True)\n smoking = models.BooleanField(default=False)\n pets = models.BooleanField(default=False)\n size = models.FloatField('Size (sqm)')\n garden_size = models.FloatField(null=True, blank=True)\n furnished = models.BooleanField(default=False)\n cable = models.BooleanField(default=False)\n broadband = models.BooleanField(default=False)\n satellite = models.BooleanField(default=False)\n balcony = models.BooleanField(default=False)\n parking = models.BooleanField(default=False)\n garage = models.BooleanField(default=False)\n bike_storage = models.BooleanField(default=False)\n extra_storage = models.BooleanField(default=False)\n gym = models.BooleanField(default=False)\n laundry = models.BooleanField(default=False)\n description = models.TextField(null=True, blank=True)\n kitchen_amenities = models.TextField(null=True, blank=True)\n furniture = models.TextField(null=True, blank=True)\n heating = models.TextField(null=True, blank=True)\n other_amenities = models.TextField(null=True, blank=True)\n objects = ApartmentManager()\n\n def __unicode__(self):\n return self.get_full_address()\n\n @permalink\n def get_absolute_url(self):\n return 'apartments:detail', [str(self.id)]\n\n def get_full_address(self):\n return '{0}, {1} {2}'.format(self.address, self.postcode, self.area\n .city.name.upper())\n\n def is_agency_landlord(self):\n return self.landlord == self.LANDLORD_AGENCY\n\n def get_location(self):\n searchable = '{0}, {1} {2}, Finland'.format(self.address, self.\n postcode, self.area.city)\n address, (lat, lng) = geocoder.geocode(searchable, exactly_one=True)\n return lat, lng\n\n def save(self, *args, **kwargs):\n self.latitude, self.longitude = self.get_location()\n super(Apartment, self).save(*args, **kwargs)\n",
"step-2": "<mask token>\n\n\nclass City(models.Model):\n <mask token>\n\n\n class Meta:\n ordering = 'name',\n verbose_name_plural = 'cities'\n\n def __unicode__(self):\n return self.name\n\n\nclass Area(models.Model):\n name = models.CharField(max_length=50)\n city = models.ForeignKey(City)\n\n\n class Meta:\n unique_together = 'name', 'city'\n ordering = 'name',\n\n def __unicode__(self):\n return self.name\n\n\nclass ApartmentQuerySet(QuerySet):\n\n def available(self):\n return self.filter(is_available=True, tenant__is_active=True)\n\n\nclass ApartmentManager(models.Manager):\n\n def get_query_set(self):\n return ApartmentQuerySet(self.model).select_related('area',\n 'area__city')\n\n def available(self):\n return self.get_query_set().available()\n\n def search(self, search):\n if search:\n search = search.strip()\n if not search:\n return self.none()\n search_fields = ('area__name__iexact', 'area__city__name__iexact',\n 'postcode__iexact', 'address__icontains')\n criteria = [Q(**{field: search}) for field in search_fields]\n return self.filter(reduce(operator.or_, criteria))\n\n\nclass Apartment(models.Model):\n TYPE_APARTMENT = 1\n TYPE_DETACHED = 2\n TYPE_SEMIDETACHED = 3\n TYPE_COTTAGE = 4\n TYPE_CHOICES = (TYPE_APARTMENT, 'Apartment'), (TYPE_DETACHED,\n 'Detached house'), (TYPE_SEMIDETACHED, 'Semi-detached house'), (\n TYPE_COTTAGE, 'Cottage')\n SAUNA_NONE = 1\n SAUNA_SHARED = 2\n SAUNA_OWN = 3\n SAUNA_CHOICES = (SAUNA_NONE, 'No sauna'), (SAUNA_SHARED,\n 'Shared/communal sauna'), (SAUNA_OWN, 'Own sauna')\n ROOM_CHOICES = (1, '1 room'), (2, '2 rooms'), (3, '3 rooms'), (4, '4 rooms'\n )\n LANDLORD_TENANT = 1\n LANDLORD_PRIVATE = 2\n LANDLORD_AGENCY = 3\n LANDLORD_CHOICES = (LANDLORD_TENANT, 'Occupant'), (LANDLORD_PRIVATE,\n 'Private landlord'), (LANDLORD_AGENCY, 'Rental agency')\n area = models.ForeignKey(Area)\n tenant = models.ForeignKey(User)\n landlord = models.IntegerField(choices=LANDLORD_CHOICES, default=\n LANDLORD_PRIVATE)\n agency = models.CharField(max_length=100, null=True, blank=True)\n agency_website = models.URLField(null=True, blank=True)\n address = models.CharField(max_length=100)\n postcode = models.CharField(max_length=7)\n latitude = models.FloatField(null=True, blank=True)\n longitude = models.FloatField(null=True, blank=True)\n added_on = models.DateTimeField(auto_now_add=True)\n is_available = models.BooleanField(default=True)\n available_from = models.DateField(null=True, blank=True)\n available_to = models.DateField(null=True, blank=True)\n is_shared = models.BooleanField('Shared accomodation', default=False)\n type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_APARTMENT)\n num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)\n floor = models.IntegerField(null=True, blank=True)\n lift = models.BooleanField(default=False)\n num_floors = models.IntegerField(null=True, blank=True)\n sauna = models.IntegerField(choices=SAUNA_CHOICES, default=SAUNA_NONE)\n rent_pcm = models.DecimalField(decimal_places=2, max_digits=8)\n deposit = models.DecimalField(decimal_places=2, max_digits=8, null=True,\n blank=True)\n smoking = models.BooleanField(default=False)\n pets = models.BooleanField(default=False)\n size = models.FloatField('Size (sqm)')\n garden_size = models.FloatField(null=True, blank=True)\n furnished = models.BooleanField(default=False)\n cable = models.BooleanField(default=False)\n broadband = models.BooleanField(default=False)\n satellite = models.BooleanField(default=False)\n balcony = models.BooleanField(default=False)\n parking = models.BooleanField(default=False)\n garage = models.BooleanField(default=False)\n bike_storage = models.BooleanField(default=False)\n extra_storage = models.BooleanField(default=False)\n gym = models.BooleanField(default=False)\n laundry = models.BooleanField(default=False)\n description = models.TextField(null=True, blank=True)\n kitchen_amenities = models.TextField(null=True, blank=True)\n furniture = models.TextField(null=True, blank=True)\n heating = models.TextField(null=True, blank=True)\n other_amenities = models.TextField(null=True, blank=True)\n objects = ApartmentManager()\n\n def __unicode__(self):\n return self.get_full_address()\n\n @permalink\n def get_absolute_url(self):\n return 'apartments:detail', [str(self.id)]\n\n def get_full_address(self):\n return '{0}, {1} {2}'.format(self.address, self.postcode, self.area\n .city.name.upper())\n\n def is_agency_landlord(self):\n return self.landlord == self.LANDLORD_AGENCY\n\n def get_location(self):\n searchable = '{0}, {1} {2}, Finland'.format(self.address, self.\n postcode, self.area.city)\n address, (lat, lng) = geocoder.geocode(searchable, exactly_one=True)\n return lat, lng\n\n def save(self, *args, **kwargs):\n self.latitude, self.longitude = self.get_location()\n super(Apartment, self).save(*args, **kwargs)\n",
"step-3": "<mask token>\n\n\nclass City(models.Model):\n name = models.CharField(max_length=30, unique=True)\n\n\n class Meta:\n ordering = 'name',\n verbose_name_plural = 'cities'\n\n def __unicode__(self):\n return self.name\n\n\nclass Area(models.Model):\n name = models.CharField(max_length=50)\n city = models.ForeignKey(City)\n\n\n class Meta:\n unique_together = 'name', 'city'\n ordering = 'name',\n\n def __unicode__(self):\n return self.name\n\n\nclass ApartmentQuerySet(QuerySet):\n\n def available(self):\n return self.filter(is_available=True, tenant__is_active=True)\n\n\nclass ApartmentManager(models.Manager):\n\n def get_query_set(self):\n return ApartmentQuerySet(self.model).select_related('area',\n 'area__city')\n\n def available(self):\n return self.get_query_set().available()\n\n def search(self, search):\n if search:\n search = search.strip()\n if not search:\n return self.none()\n search_fields = ('area__name__iexact', 'area__city__name__iexact',\n 'postcode__iexact', 'address__icontains')\n criteria = [Q(**{field: search}) for field in search_fields]\n return self.filter(reduce(operator.or_, criteria))\n\n\nclass Apartment(models.Model):\n TYPE_APARTMENT = 1\n TYPE_DETACHED = 2\n TYPE_SEMIDETACHED = 3\n TYPE_COTTAGE = 4\n TYPE_CHOICES = (TYPE_APARTMENT, 'Apartment'), (TYPE_DETACHED,\n 'Detached house'), (TYPE_SEMIDETACHED, 'Semi-detached house'), (\n TYPE_COTTAGE, 'Cottage')\n SAUNA_NONE = 1\n SAUNA_SHARED = 2\n SAUNA_OWN = 3\n SAUNA_CHOICES = (SAUNA_NONE, 'No sauna'), (SAUNA_SHARED,\n 'Shared/communal sauna'), (SAUNA_OWN, 'Own sauna')\n ROOM_CHOICES = (1, '1 room'), (2, '2 rooms'), (3, '3 rooms'), (4, '4 rooms'\n )\n LANDLORD_TENANT = 1\n LANDLORD_PRIVATE = 2\n LANDLORD_AGENCY = 3\n LANDLORD_CHOICES = (LANDLORD_TENANT, 'Occupant'), (LANDLORD_PRIVATE,\n 'Private landlord'), (LANDLORD_AGENCY, 'Rental agency')\n area = models.ForeignKey(Area)\n tenant = models.ForeignKey(User)\n landlord = models.IntegerField(choices=LANDLORD_CHOICES, default=\n LANDLORD_PRIVATE)\n agency = models.CharField(max_length=100, null=True, blank=True)\n agency_website = models.URLField(null=True, blank=True)\n address = models.CharField(max_length=100)\n postcode = models.CharField(max_length=7)\n latitude = models.FloatField(null=True, blank=True)\n longitude = models.FloatField(null=True, blank=True)\n added_on = models.DateTimeField(auto_now_add=True)\n is_available = models.BooleanField(default=True)\n available_from = models.DateField(null=True, blank=True)\n available_to = models.DateField(null=True, blank=True)\n is_shared = models.BooleanField('Shared accomodation', default=False)\n type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_APARTMENT)\n num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)\n floor = models.IntegerField(null=True, blank=True)\n lift = models.BooleanField(default=False)\n num_floors = models.IntegerField(null=True, blank=True)\n sauna = models.IntegerField(choices=SAUNA_CHOICES, default=SAUNA_NONE)\n rent_pcm = models.DecimalField(decimal_places=2, max_digits=8)\n deposit = models.DecimalField(decimal_places=2, max_digits=8, null=True,\n blank=True)\n smoking = models.BooleanField(default=False)\n pets = models.BooleanField(default=False)\n size = models.FloatField('Size (sqm)')\n garden_size = models.FloatField(null=True, blank=True)\n furnished = models.BooleanField(default=False)\n cable = models.BooleanField(default=False)\n broadband = models.BooleanField(default=False)\n satellite = models.BooleanField(default=False)\n balcony = models.BooleanField(default=False)\n parking = models.BooleanField(default=False)\n garage = models.BooleanField(default=False)\n bike_storage = models.BooleanField(default=False)\n extra_storage = models.BooleanField(default=False)\n gym = models.BooleanField(default=False)\n laundry = models.BooleanField(default=False)\n description = models.TextField(null=True, blank=True)\n kitchen_amenities = models.TextField(null=True, blank=True)\n furniture = models.TextField(null=True, blank=True)\n heating = models.TextField(null=True, blank=True)\n other_amenities = models.TextField(null=True, blank=True)\n objects = ApartmentManager()\n\n def __unicode__(self):\n return self.get_full_address()\n\n @permalink\n def get_absolute_url(self):\n return 'apartments:detail', [str(self.id)]\n\n def get_full_address(self):\n return '{0}, {1} {2}'.format(self.address, self.postcode, self.area\n .city.name.upper())\n\n def is_agency_landlord(self):\n return self.landlord == self.LANDLORD_AGENCY\n\n def get_location(self):\n searchable = '{0}, {1} {2}, Finland'.format(self.address, self.\n postcode, self.area.city)\n address, (lat, lng) = geocoder.geocode(searchable, exactly_one=True)\n return lat, lng\n\n def save(self, *args, **kwargs):\n self.latitude, self.longitude = self.get_location()\n super(Apartment, self).save(*args, **kwargs)\n",
"step-4": "<mask token>\ngeocoder = geopy.geocoders.Google()\n\n\nclass City(models.Model):\n name = models.CharField(max_length=30, unique=True)\n\n\n class Meta:\n ordering = 'name',\n verbose_name_plural = 'cities'\n\n def __unicode__(self):\n return self.name\n\n\nclass Area(models.Model):\n name = models.CharField(max_length=50)\n city = models.ForeignKey(City)\n\n\n class Meta:\n unique_together = 'name', 'city'\n ordering = 'name',\n\n def __unicode__(self):\n return self.name\n\n\nclass ApartmentQuerySet(QuerySet):\n\n def available(self):\n return self.filter(is_available=True, tenant__is_active=True)\n\n\nclass ApartmentManager(models.Manager):\n\n def get_query_set(self):\n return ApartmentQuerySet(self.model).select_related('area',\n 'area__city')\n\n def available(self):\n return self.get_query_set().available()\n\n def search(self, search):\n if search:\n search = search.strip()\n if not search:\n return self.none()\n search_fields = ('area__name__iexact', 'area__city__name__iexact',\n 'postcode__iexact', 'address__icontains')\n criteria = [Q(**{field: search}) for field in search_fields]\n return self.filter(reduce(operator.or_, criteria))\n\n\nclass Apartment(models.Model):\n TYPE_APARTMENT = 1\n TYPE_DETACHED = 2\n TYPE_SEMIDETACHED = 3\n TYPE_COTTAGE = 4\n TYPE_CHOICES = (TYPE_APARTMENT, 'Apartment'), (TYPE_DETACHED,\n 'Detached house'), (TYPE_SEMIDETACHED, 'Semi-detached house'), (\n TYPE_COTTAGE, 'Cottage')\n SAUNA_NONE = 1\n SAUNA_SHARED = 2\n SAUNA_OWN = 3\n SAUNA_CHOICES = (SAUNA_NONE, 'No sauna'), (SAUNA_SHARED,\n 'Shared/communal sauna'), (SAUNA_OWN, 'Own sauna')\n ROOM_CHOICES = (1, '1 room'), (2, '2 rooms'), (3, '3 rooms'), (4, '4 rooms'\n )\n LANDLORD_TENANT = 1\n LANDLORD_PRIVATE = 2\n LANDLORD_AGENCY = 3\n LANDLORD_CHOICES = (LANDLORD_TENANT, 'Occupant'), (LANDLORD_PRIVATE,\n 'Private landlord'), (LANDLORD_AGENCY, 'Rental agency')\n area = models.ForeignKey(Area)\n tenant = models.ForeignKey(User)\n landlord = models.IntegerField(choices=LANDLORD_CHOICES, default=\n LANDLORD_PRIVATE)\n agency = models.CharField(max_length=100, null=True, blank=True)\n agency_website = models.URLField(null=True, blank=True)\n address = models.CharField(max_length=100)\n postcode = models.CharField(max_length=7)\n latitude = models.FloatField(null=True, blank=True)\n longitude = models.FloatField(null=True, blank=True)\n added_on = models.DateTimeField(auto_now_add=True)\n is_available = models.BooleanField(default=True)\n available_from = models.DateField(null=True, blank=True)\n available_to = models.DateField(null=True, blank=True)\n is_shared = models.BooleanField('Shared accomodation', default=False)\n type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_APARTMENT)\n num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)\n floor = models.IntegerField(null=True, blank=True)\n lift = models.BooleanField(default=False)\n num_floors = models.IntegerField(null=True, blank=True)\n sauna = models.IntegerField(choices=SAUNA_CHOICES, default=SAUNA_NONE)\n rent_pcm = models.DecimalField(decimal_places=2, max_digits=8)\n deposit = models.DecimalField(decimal_places=2, max_digits=8, null=True,\n blank=True)\n smoking = models.BooleanField(default=False)\n pets = models.BooleanField(default=False)\n size = models.FloatField('Size (sqm)')\n garden_size = models.FloatField(null=True, blank=True)\n furnished = models.BooleanField(default=False)\n cable = models.BooleanField(default=False)\n broadband = models.BooleanField(default=False)\n satellite = models.BooleanField(default=False)\n balcony = models.BooleanField(default=False)\n parking = models.BooleanField(default=False)\n garage = models.BooleanField(default=False)\n bike_storage = models.BooleanField(default=False)\n extra_storage = models.BooleanField(default=False)\n gym = models.BooleanField(default=False)\n laundry = models.BooleanField(default=False)\n description = models.TextField(null=True, blank=True)\n kitchen_amenities = models.TextField(null=True, blank=True)\n furniture = models.TextField(null=True, blank=True)\n heating = models.TextField(null=True, blank=True)\n other_amenities = models.TextField(null=True, blank=True)\n objects = ApartmentManager()\n\n def __unicode__(self):\n return self.get_full_address()\n\n @permalink\n def get_absolute_url(self):\n return 'apartments:detail', [str(self.id)]\n\n def get_full_address(self):\n return '{0}, {1} {2}'.format(self.address, self.postcode, self.area\n .city.name.upper())\n\n def is_agency_landlord(self):\n return self.landlord == self.LANDLORD_AGENCY\n\n def get_location(self):\n searchable = '{0}, {1} {2}, Finland'.format(self.address, self.\n postcode, self.area.city)\n address, (lat, lng) = geocoder.geocode(searchable, exactly_one=True)\n return lat, lng\n\n def save(self, *args, **kwargs):\n self.latitude, self.longitude = self.get_location()\n super(Apartment, self).save(*args, **kwargs)\n",
"step-5": "import datetime\nimport operator\n\nimport geopy\n\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.db.models.query import QuerySet\nfrom django.db.models import permalink\nfrom django.contrib.auth.models import User\n\n\ngeocoder = geopy.geocoders.Google()\n\n\nclass City(models.Model):\n\n name = models.CharField(max_length=30, unique=True)\n\n class Meta:\n ordering = ('name',)\n verbose_name_plural = \"cities\"\n\n def __unicode__(self):\n return self.name\n\n\nclass Area(models.Model):\n\n name = models.CharField(max_length=50)\n city = models.ForeignKey(City)\n\n class Meta:\n unique_together = ('name', 'city')\n ordering = ('name',)\n\n def __unicode__(self):\n return self.name\n\n\nclass ApartmentQuerySet(QuerySet):\n\n def available(self):\n\n return self.filter(\n is_available=True,\n tenant__is_active=True,\n )\n\n\nclass ApartmentManager(models.Manager):\n\n def get_query_set(self):\n return ApartmentQuerySet(self.model).select_related(\n 'area', 'area__city')\n\n def available(self):\n return self.get_query_set().available()\n\n def search(self, search):\n\n if search:\n search = search.strip()\n\n if not search:\n return self.none()\n\n search_fields = (\n 'area__name__iexact',\n 'area__city__name__iexact',\n 'postcode__iexact',\n 'address__icontains',\n )\n\n criteria = [Q(**{field : search}) \n for field in search_fields]\n\n return self.filter(reduce(operator.or_, criteria))\n \n\nclass Apartment(models.Model):\n\n TYPE_APARTMENT = 1\n TYPE_DETACHED = 2\n TYPE_SEMIDETACHED = 3\n TYPE_COTTAGE =4 \n\n TYPE_CHOICES = (\n (TYPE_APARTMENT, \"Apartment\"),\n (TYPE_DETACHED, \"Detached house\"),\n (TYPE_SEMIDETACHED, \"Semi-detached house\"),\n (TYPE_COTTAGE, \"Cottage\"),\n )\n\n SAUNA_NONE = 1\n SAUNA_SHARED = 2\n SAUNA_OWN = 3\n\n SAUNA_CHOICES = (\n (SAUNA_NONE, \"No sauna\"),\n (SAUNA_SHARED, \"Shared/communal sauna\"),\n (SAUNA_OWN, \"Own sauna\"),\n )\n\n ROOM_CHOICES = (\n (1, \"1 room\"),\n (2, \"2 rooms\"),\n (3, \"3 rooms\"),\n (4, \"4 rooms\"),\n )\n\n LANDLORD_TENANT = 1\n LANDLORD_PRIVATE = 2\n LANDLORD_AGENCY = 3\n\n LANDLORD_CHOICES = (\n (LANDLORD_TENANT, \"Occupant\"),\n (LANDLORD_PRIVATE, \"Private landlord\"),\n (LANDLORD_AGENCY, \"Rental agency\"),\n )\n\n area = models.ForeignKey(Area)\n tenant = models.ForeignKey(User)\n \n landlord = models.IntegerField(\n choices=LANDLORD_CHOICES,\n default=LANDLORD_PRIVATE,\n )\n\n agency = models.CharField(max_length=100, null=True, blank=True)\n agency_website = models.URLField(null=True, blank=True)\n\n address = models.CharField(max_length=100)\n postcode = models.CharField(max_length=7)\n\n latitude = models.FloatField(null=True, blank=True)\n longitude = models.FloatField(null=True, blank=True)\n\n added_on = models.DateTimeField(auto_now_add=True)\n is_available = models.BooleanField(default=True)\n available_from = models.DateField(null=True, blank=True)\n available_to = models.DateField(null=True, blank=True)\n\n is_shared = models.BooleanField('Shared accomodation', default=False)\n\n type = models.IntegerField(\n choices=TYPE_CHOICES,\n default=TYPE_APARTMENT,\n )\n\n num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)\n floor = models.IntegerField(null=True, blank=True)\n lift = models.BooleanField(default=False)\n num_floors = models.IntegerField(null=True, blank=True)\n\n sauna = models.IntegerField(\n choices=SAUNA_CHOICES,\n default=SAUNA_NONE,\n )\n\n rent_pcm = models.DecimalField(\n decimal_places=2,\n max_digits=8,\n )\n\n deposit = models.DecimalField(\n decimal_places=2,\n max_digits=8,\n null=True,\n blank=True\n )\n\n smoking = models.BooleanField(default=False)\n pets = models.BooleanField(default=False)\n\n size = models.FloatField('Size (sqm)')\n garden_size = models.FloatField(null=True, blank=True)\n\n furnished = models.BooleanField(default=False)\n cable = models.BooleanField(default=False)\n broadband = models.BooleanField(default=False)\n satellite = models.BooleanField(default=False)\n balcony = models.BooleanField(default=False)\n\n parking = models.BooleanField(default=False)\n garage = models.BooleanField(default=False)\n bike_storage = models.BooleanField(default=False)\n extra_storage = models.BooleanField(default=False)\n gym = models.BooleanField(default=False)\n laundry = models.BooleanField(default=False)\n\n description = models.TextField(null=True, blank=True)\n kitchen_amenities = models.TextField(null=True, blank=True)\n furniture = models.TextField(null=True, blank=True)\n heating = models.TextField(null=True, blank=True)\n other_amenities = models.TextField(null=True, blank=True)\n\n\n objects = ApartmentManager()\n\n\n def __unicode__(self):\n return self.get_full_address()\n\n @permalink\n def get_absolute_url(self):\n return ('apartments:detail', [str(self.id)])\n\n def get_full_address(self):\n\n return \"{0}, {1} {2}\".format(\n self.address,\n self.postcode,\n self.area.city.name.upper()\n )\n\n \n def is_agency_landlord(self):\n return self.landlord == self.LANDLORD_AGENCY\n\n def get_location(self):\n\n searchable = \"{0}, {1} {2}, Finland\".format(\n self.address,\n self.postcode,\n self.area.city,\n )\n\n address, (lat, lng) = geocoder.geocode(\n searchable,\n exactly_one=True\n )\n\n return lat, lng\n\n def save(self, *args, **kwargs):\n self.latitude, self.longitude = self.get_location()\n super(Apartment, self).save(*args, **kwargs)\n\n \n",
"step-ids": [
15,
19,
20,
21,
23
]
}
|
[
15,
19,
20,
21,
23
] |
class BucketSort:
def __init__(self, a):
self.a = a
def result(self, bucketCount=10):
buckets = [[] for i in range(bucketCount + 1)]
maxElement = max(self.a)
minElement = min(self.a)
bucketRange = (maxElement - minElement + 1) / bucketCount
for i in range(len(self.a)):
bucketIndex = int((self.a[i] - minElement) / bucketRange)
buckets[bucketIndex].append(self.a[i])
for i in range(len(buckets)):
buckets[i] = sorted(buckets[i])
self.a = []
for bucket in buckets:
self.a.extend(bucket)
return self.a
|
normal
|
{
"blob_id": "3b803850418638bf65528088044918e93ecabff6",
"index": 3085,
"step-1": "<mask token>\n",
"step-2": "class BucketSort:\n <mask token>\n <mask token>\n",
"step-3": "class BucketSort:\n <mask token>\n\n def result(self, bucketCount=10):\n buckets = [[] for i in range(bucketCount + 1)]\n maxElement = max(self.a)\n minElement = min(self.a)\n bucketRange = (maxElement - minElement + 1) / bucketCount\n for i in range(len(self.a)):\n bucketIndex = int((self.a[i] - minElement) / bucketRange)\n buckets[bucketIndex].append(self.a[i])\n for i in range(len(buckets)):\n buckets[i] = sorted(buckets[i])\n self.a = []\n for bucket in buckets:\n self.a.extend(bucket)\n return self.a\n",
"step-4": "class BucketSort:\n\n def __init__(self, a):\n self.a = a\n\n def result(self, bucketCount=10):\n buckets = [[] for i in range(bucketCount + 1)]\n maxElement = max(self.a)\n minElement = min(self.a)\n bucketRange = (maxElement - minElement + 1) / bucketCount\n for i in range(len(self.a)):\n bucketIndex = int((self.a[i] - minElement) / bucketRange)\n buckets[bucketIndex].append(self.a[i])\n for i in range(len(buckets)):\n buckets[i] = sorted(buckets[i])\n self.a = []\n for bucket in buckets:\n self.a.extend(bucket)\n return self.a\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
get = requests.get('https://api.github.com/user', auth=(argv[1], argv[2])
).json().get('id')
print(get)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from sys import argv
import requests
if __name__ == '__main__':
get = requests.get('https://api.github.com/user', auth=(argv[1], argv[2])
).json().get('id')
print(get)
<|reserved_special_token_1|>
#!/usr/bin/python3
"""display your id from github.
"""
from sys import argv
import requests
if __name__ == "__main__":
get = requests.get('https://api.github.com/user',
auth=(argv[1], argv[2])).json().get('id')
print(get)
|
flexible
|
{
"blob_id": "8280f321b102cace462761f9ece2aebf9e28a432",
"index": 3941,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n get = requests.get('https://api.github.com/user', auth=(argv[1], argv[2])\n ).json().get('id')\n print(get)\n",
"step-3": "<mask token>\nfrom sys import argv\nimport requests\nif __name__ == '__main__':\n get = requests.get('https://api.github.com/user', auth=(argv[1], argv[2])\n ).json().get('id')\n print(get)\n",
"step-4": "#!/usr/bin/python3\n\"\"\"display your id from github.\n\"\"\"\nfrom sys import argv\nimport requests\n\n\nif __name__ == \"__main__\":\n get = requests.get('https://api.github.com/user',\n auth=(argv[1], argv[2])).json().get('id')\n print(get)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
import pandas as pd
import matplotlib.pyplot as plt
f = open('Maradona-goals.json')
jsonObject = json.load(f)
f.close()
l = []
for c, cl in jsonObject.items():
for d in cl:
d.update({'player' : c})
l.append(d)
df = pd.DataFrame(l)
labels = df["year"]
width = 0.75
fig = plt.figure(figsize=(16,8))
ax = fig.add_subplot(1,1,1)
ax.set_xticks(labels)
ax.set_xticklabels(labels, rotation=45)
ax.set_yticks(range(0,45))
ax.bar(labels, df["club_goals"], width, label='Club')
ax.bar(labels, df["country_goals"], width, label='Country')
#ax.grid(color='LIGHTGRAY')
ax.set_ylabel('Goals')
ax.set_xlabel('Years')
ax.set_title('Goals by year')
ax.legend()
plt.show()
|
normal
|
{
"blob_id": "33e9e45fbe0e3143d75d34c1db283c01e2693f68",
"index": 4967,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nf.close()\n<mask token>\nfor c, cl in jsonObject.items():\n for d in cl:\n d.update({'player': c})\n l.append(d)\n<mask token>\nax.set_xticks(labels)\nax.set_xticklabels(labels, rotation=45)\nax.set_yticks(range(0, 45))\nax.bar(labels, df['club_goals'], width, label='Club')\nax.bar(labels, df['country_goals'], width, label='Country')\nax.set_ylabel('Goals')\nax.set_xlabel('Years')\nax.set_title('Goals by year')\nax.legend()\nplt.show()\n",
"step-3": "<mask token>\nf = open('Maradona-goals.json')\njsonObject = json.load(f)\nf.close()\nl = []\nfor c, cl in jsonObject.items():\n for d in cl:\n d.update({'player': c})\n l.append(d)\ndf = pd.DataFrame(l)\nlabels = df['year']\nwidth = 0.75\nfig = plt.figure(figsize=(16, 8))\nax = fig.add_subplot(1, 1, 1)\nax.set_xticks(labels)\nax.set_xticklabels(labels, rotation=45)\nax.set_yticks(range(0, 45))\nax.bar(labels, df['club_goals'], width, label='Club')\nax.bar(labels, df['country_goals'], width, label='Country')\nax.set_ylabel('Goals')\nax.set_xlabel('Years')\nax.set_title('Goals by year')\nax.legend()\nplt.show()\n",
"step-4": "import json\nimport pandas as pd\nimport matplotlib.pyplot as plt\nf = open('Maradona-goals.json')\njsonObject = json.load(f)\nf.close()\nl = []\nfor c, cl in jsonObject.items():\n for d in cl:\n d.update({'player': c})\n l.append(d)\ndf = pd.DataFrame(l)\nlabels = df['year']\nwidth = 0.75\nfig = plt.figure(figsize=(16, 8))\nax = fig.add_subplot(1, 1, 1)\nax.set_xticks(labels)\nax.set_xticklabels(labels, rotation=45)\nax.set_yticks(range(0, 45))\nax.bar(labels, df['club_goals'], width, label='Club')\nax.bar(labels, df['country_goals'], width, label='Country')\nax.set_ylabel('Goals')\nax.set_xlabel('Years')\nax.set_title('Goals by year')\nax.legend()\nplt.show()\n",
"step-5": "import json\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nf = open('Maradona-goals.json')\njsonObject = json.load(f)\n\nf.close()\n\nl = []\nfor c, cl in jsonObject.items():\n for d in cl:\n d.update({'player' : c})\n l.append(d)\ndf = pd.DataFrame(l)\n\nlabels = df[\"year\"]\n\nwidth = 0.75\n\nfig = plt.figure(figsize=(16,8))\nax = fig.add_subplot(1,1,1)\n\nax.set_xticks(labels)\nax.set_xticklabels(labels, rotation=45)\nax.set_yticks(range(0,45))\n\nax.bar(labels, df[\"club_goals\"], width, label='Club')\nax.bar(labels, df[\"country_goals\"], width, label='Country')\n\n#ax.grid(color='LIGHTGRAY')\nax.set_ylabel('Goals')\nax.set_xlabel('Years')\nax.set_title('Goals by year')\nax.legend()\n\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def reorder_sentences(output_sentences, input):
def custom_sort(s1, s2):
return input.find(s1) - input.find(s2)
output_sentences.sort(key=functools.cmp_to_key(custom_sort))
return output_sentences
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def clean(string):
string = re.sub("\\'s", " 's", string)
string = re.sub("\\'ve", " 've", string)
string = re.sub("n\\'t", " n't", string)
string = re.sub("\\'re", " 're", string)
string = re.sub("\\'d", " 'd", string)
string = re.sub("\\'ll", " 'll", string)
string = re.sub('!', ' ! ', string)
string = re.sub('\\\\\\(', ' \\( ', string)
string = re.sub('\\\\\\)', ' \\) ', string)
string = re.sub('\\?', ' \\? ', string)
string = re.sub('\\]\\]', '', string)
string = re.sub('\\n', '', string)
string = string.rstrip()
string = remove_text_inside_brackets(string, '(){}[]')
return string.strip()
def remove_text_inside_brackets(text, brackets):
count = [0] * (len(brackets) // 2)
saved_chars = []
for character in text:
for i, b in enumerate(brackets):
if character == b:
kind, is_close = divmod(i, 2)
count[kind] += (-1) ** is_close
if count[kind] < 0:
count[kind] = 0
else:
break
else:
if not any(count):
saved_chars.append(character)
return ''.join(saved_chars)
def reorder_sentences(output_sentences, input):
def custom_sort(s1, s2):
return input.find(s1) - input.find(s2)
output_sentences.sort(key=functools.cmp_to_key(custom_sort))
return output_sentences
<|reserved_special_token_0|>
def summarize(input, num_sentences):
return ' '.join(get_summarized(input, num_sentences))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def clean(string):
string = re.sub("\\'s", " 's", string)
string = re.sub("\\'ve", " 've", string)
string = re.sub("n\\'t", " n't", string)
string = re.sub("\\'re", " 're", string)
string = re.sub("\\'d", " 'd", string)
string = re.sub("\\'ll", " 'll", string)
string = re.sub('!', ' ! ', string)
string = re.sub('\\\\\\(', ' \\( ', string)
string = re.sub('\\\\\\)', ' \\) ', string)
string = re.sub('\\?', ' \\? ', string)
string = re.sub('\\]\\]', '', string)
string = re.sub('\\n', '', string)
string = string.rstrip()
string = remove_text_inside_brackets(string, '(){}[]')
return string.strip()
def remove_text_inside_brackets(text, brackets):
count = [0] * (len(brackets) // 2)
saved_chars = []
for character in text:
for i, b in enumerate(brackets):
if character == b:
kind, is_close = divmod(i, 2)
count[kind] += (-1) ** is_close
if count[kind] < 0:
count[kind] = 0
else:
break
else:
if not any(count):
saved_chars.append(character)
return ''.join(saved_chars)
def reorder_sentences(output_sentences, input):
def custom_sort(s1, s2):
return input.find(s1) - input.find(s2)
output_sentences.sort(key=functools.cmp_to_key(custom_sort))
return output_sentences
def get_summarized(input, num_sentences):
input = clean(input)
tokenizer = RegexpTokenizer('\\w+')
base_words = [word.lower() for word in tokenizer.tokenize(input)]
words = [word for word in base_words if word not in stopwords.words()]
word_frequencies = FreqDist(words)
most_frequent_words = [pair[0] for pair in word_frequencies.most_common
(100)]
input = remove_text_inside_brackets(input, '====')
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
actual_sentences_pre = sent_detector.tokenize(input)
actual_sentences = []
for sentence in actual_sentences_pre:
if len(sentence.split()) <= 6:
continue
else:
actual_sentences.append(sentence)
working_sentences = [sentence.lower() for sentence in actual_sentences]
output_sentences = []
for word in most_frequent_words:
for i in range(0, len(working_sentences)):
if word in working_sentences[i] and actual_sentences[i
] not in output_sentences:
output_sentences.append(actual_sentences[i])
break
if len(output_sentences) >= num_sentences:
break
if len(output_sentences) >= num_sentences:
break
for sentence in output_sentences:
sentence.capitalize()
return reorder_sentences(output_sentences, input)
def summarize(input, num_sentences):
return ' '.join(get_summarized(input, num_sentences))
<|reserved_special_token_1|>
import functools
import re
import nltk.data
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from nltk.tokenize import RegexpTokenizer
def clean(string):
string = re.sub("\\'s", " 's", string)
string = re.sub("\\'ve", " 've", string)
string = re.sub("n\\'t", " n't", string)
string = re.sub("\\'re", " 're", string)
string = re.sub("\\'d", " 'd", string)
string = re.sub("\\'ll", " 'll", string)
string = re.sub('!', ' ! ', string)
string = re.sub('\\\\\\(', ' \\( ', string)
string = re.sub('\\\\\\)', ' \\) ', string)
string = re.sub('\\?', ' \\? ', string)
string = re.sub('\\]\\]', '', string)
string = re.sub('\\n', '', string)
string = string.rstrip()
string = remove_text_inside_brackets(string, '(){}[]')
return string.strip()
def remove_text_inside_brackets(text, brackets):
count = [0] * (len(brackets) // 2)
saved_chars = []
for character in text:
for i, b in enumerate(brackets):
if character == b:
kind, is_close = divmod(i, 2)
count[kind] += (-1) ** is_close
if count[kind] < 0:
count[kind] = 0
else:
break
else:
if not any(count):
saved_chars.append(character)
return ''.join(saved_chars)
def reorder_sentences(output_sentences, input):
def custom_sort(s1, s2):
return input.find(s1) - input.find(s2)
output_sentences.sort(key=functools.cmp_to_key(custom_sort))
return output_sentences
def get_summarized(input, num_sentences):
input = clean(input)
tokenizer = RegexpTokenizer('\\w+')
base_words = [word.lower() for word in tokenizer.tokenize(input)]
words = [word for word in base_words if word not in stopwords.words()]
word_frequencies = FreqDist(words)
most_frequent_words = [pair[0] for pair in word_frequencies.most_common
(100)]
input = remove_text_inside_brackets(input, '====')
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
actual_sentences_pre = sent_detector.tokenize(input)
actual_sentences = []
for sentence in actual_sentences_pre:
if len(sentence.split()) <= 6:
continue
else:
actual_sentences.append(sentence)
working_sentences = [sentence.lower() for sentence in actual_sentences]
output_sentences = []
for word in most_frequent_words:
for i in range(0, len(working_sentences)):
if word in working_sentences[i] and actual_sentences[i
] not in output_sentences:
output_sentences.append(actual_sentences[i])
break
if len(output_sentences) >= num_sentences:
break
if len(output_sentences) >= num_sentences:
break
for sentence in output_sentences:
sentence.capitalize()
return reorder_sentences(output_sentences, input)
def summarize(input, num_sentences):
return ' '.join(get_summarized(input, num_sentences))
<|reserved_special_token_1|>
import functools
import re
import nltk.data
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from nltk.tokenize import RegexpTokenizer
def clean(string):
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\\\(", " \( ", string)
string = re.sub(r"\\\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\]\]", "", string)
string = re.sub(r"\n", "", string)
string = string.rstrip()
string = remove_text_inside_brackets(string, "(){}[]")
return string.strip()
def remove_text_inside_brackets(text, brackets):
count = [0] * (len(brackets) // 2) # count open/close brackets
saved_chars = []
for character in text:
for i, b in enumerate(brackets):
if character == b: # found bracket
kind, is_close = divmod(i, 2)
count[kind] += (-1) ** is_close # `+1`: open, `-1`: close
if count[kind] < 0: # unbalanced bracket
count[kind] = 0 # keep it
else: # found bracket to remove
break
else: # character is not a [balanced] bracket
if not any(count): # outside brackets
saved_chars.append(character)
return ''.join(saved_chars)
def reorder_sentences(output_sentences, input):
def custom_sort(s1, s2):
return input.find(s1) - input.find(s2)
output_sentences.sort(key=functools.cmp_to_key(custom_sort))
return output_sentences
def get_summarized(input, num_sentences):
input = clean(input)
tokenizer = RegexpTokenizer('\w+')
base_words = [word.lower() for word in tokenizer.tokenize(input)]
words = [word for word in base_words if word not in stopwords.words()]
word_frequencies = FreqDist(words)
most_frequent_words = [pair[0] for pair in word_frequencies.most_common(100)]
input = remove_text_inside_brackets(input, "====")
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
actual_sentences_pre = sent_detector.tokenize(input)
actual_sentences = []
for sentence in actual_sentences_pre:
if len(sentence.split()) <= 6:
continue
else:
actual_sentences.append(sentence)
working_sentences = [sentence.lower() for sentence in actual_sentences]
output_sentences = []
for word in most_frequent_words:
for i in range(0, len(working_sentences)):
if word in working_sentences[i] and actual_sentences[i] not in output_sentences:
output_sentences.append(actual_sentences[i])
break
if len(output_sentences) >= num_sentences:
break
if len(output_sentences) >= num_sentences:
break
for sentence in output_sentences:
sentence.capitalize()
return reorder_sentences(output_sentences, input)
def summarize(input, num_sentences):
return " ".join(get_summarized(input, num_sentences))
|
flexible
|
{
"blob_id": "837e84d4a58d8fd0d0ffc24973d196ae57f9a260",
"index": 1723,
"step-1": "<mask token>\n\n\ndef reorder_sentences(output_sentences, input):\n\n def custom_sort(s1, s2):\n return input.find(s1) - input.find(s2)\n output_sentences.sort(key=functools.cmp_to_key(custom_sort))\n return output_sentences\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef clean(string):\n string = re.sub(\"\\\\'s\", \" 's\", string)\n string = re.sub(\"\\\\'ve\", \" 've\", string)\n string = re.sub(\"n\\\\'t\", \" n't\", string)\n string = re.sub(\"\\\\'re\", \" 're\", string)\n string = re.sub(\"\\\\'d\", \" 'd\", string)\n string = re.sub(\"\\\\'ll\", \" 'll\", string)\n string = re.sub('!', ' ! ', string)\n string = re.sub('\\\\\\\\\\\\(', ' \\\\( ', string)\n string = re.sub('\\\\\\\\\\\\)', ' \\\\) ', string)\n string = re.sub('\\\\?', ' \\\\? ', string)\n string = re.sub('\\\\]\\\\]', '', string)\n string = re.sub('\\\\n', '', string)\n string = string.rstrip()\n string = remove_text_inside_brackets(string, '(){}[]')\n return string.strip()\n\n\ndef remove_text_inside_brackets(text, brackets):\n count = [0] * (len(brackets) // 2)\n saved_chars = []\n for character in text:\n for i, b in enumerate(brackets):\n if character == b:\n kind, is_close = divmod(i, 2)\n count[kind] += (-1) ** is_close\n if count[kind] < 0:\n count[kind] = 0\n else:\n break\n else:\n if not any(count):\n saved_chars.append(character)\n return ''.join(saved_chars)\n\n\ndef reorder_sentences(output_sentences, input):\n\n def custom_sort(s1, s2):\n return input.find(s1) - input.find(s2)\n output_sentences.sort(key=functools.cmp_to_key(custom_sort))\n return output_sentences\n\n\n<mask token>\n\n\ndef summarize(input, num_sentences):\n return ' '.join(get_summarized(input, num_sentences))\n",
"step-3": "<mask token>\n\n\ndef clean(string):\n string = re.sub(\"\\\\'s\", \" 's\", string)\n string = re.sub(\"\\\\'ve\", \" 've\", string)\n string = re.sub(\"n\\\\'t\", \" n't\", string)\n string = re.sub(\"\\\\'re\", \" 're\", string)\n string = re.sub(\"\\\\'d\", \" 'd\", string)\n string = re.sub(\"\\\\'ll\", \" 'll\", string)\n string = re.sub('!', ' ! ', string)\n string = re.sub('\\\\\\\\\\\\(', ' \\\\( ', string)\n string = re.sub('\\\\\\\\\\\\)', ' \\\\) ', string)\n string = re.sub('\\\\?', ' \\\\? ', string)\n string = re.sub('\\\\]\\\\]', '', string)\n string = re.sub('\\\\n', '', string)\n string = string.rstrip()\n string = remove_text_inside_brackets(string, '(){}[]')\n return string.strip()\n\n\ndef remove_text_inside_brackets(text, brackets):\n count = [0] * (len(brackets) // 2)\n saved_chars = []\n for character in text:\n for i, b in enumerate(brackets):\n if character == b:\n kind, is_close = divmod(i, 2)\n count[kind] += (-1) ** is_close\n if count[kind] < 0:\n count[kind] = 0\n else:\n break\n else:\n if not any(count):\n saved_chars.append(character)\n return ''.join(saved_chars)\n\n\ndef reorder_sentences(output_sentences, input):\n\n def custom_sort(s1, s2):\n return input.find(s1) - input.find(s2)\n output_sentences.sort(key=functools.cmp_to_key(custom_sort))\n return output_sentences\n\n\ndef get_summarized(input, num_sentences):\n input = clean(input)\n tokenizer = RegexpTokenizer('\\\\w+')\n base_words = [word.lower() for word in tokenizer.tokenize(input)]\n words = [word for word in base_words if word not in stopwords.words()]\n word_frequencies = FreqDist(words)\n most_frequent_words = [pair[0] for pair in word_frequencies.most_common\n (100)]\n input = remove_text_inside_brackets(input, '====')\n sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n actual_sentences_pre = sent_detector.tokenize(input)\n actual_sentences = []\n for sentence in actual_sentences_pre:\n if len(sentence.split()) <= 6:\n continue\n else:\n actual_sentences.append(sentence)\n working_sentences = [sentence.lower() for sentence in actual_sentences]\n output_sentences = []\n for word in most_frequent_words:\n for i in range(0, len(working_sentences)):\n if word in working_sentences[i] and actual_sentences[i\n ] not in output_sentences:\n output_sentences.append(actual_sentences[i])\n break\n if len(output_sentences) >= num_sentences:\n break\n if len(output_sentences) >= num_sentences:\n break\n for sentence in output_sentences:\n sentence.capitalize()\n return reorder_sentences(output_sentences, input)\n\n\ndef summarize(input, num_sentences):\n return ' '.join(get_summarized(input, num_sentences))\n",
"step-4": "import functools\nimport re\nimport nltk.data\nfrom nltk.corpus import stopwords\nfrom nltk.probability import FreqDist\nfrom nltk.tokenize import RegexpTokenizer\n\n\ndef clean(string):\n string = re.sub(\"\\\\'s\", \" 's\", string)\n string = re.sub(\"\\\\'ve\", \" 've\", string)\n string = re.sub(\"n\\\\'t\", \" n't\", string)\n string = re.sub(\"\\\\'re\", \" 're\", string)\n string = re.sub(\"\\\\'d\", \" 'd\", string)\n string = re.sub(\"\\\\'ll\", \" 'll\", string)\n string = re.sub('!', ' ! ', string)\n string = re.sub('\\\\\\\\\\\\(', ' \\\\( ', string)\n string = re.sub('\\\\\\\\\\\\)', ' \\\\) ', string)\n string = re.sub('\\\\?', ' \\\\? ', string)\n string = re.sub('\\\\]\\\\]', '', string)\n string = re.sub('\\\\n', '', string)\n string = string.rstrip()\n string = remove_text_inside_brackets(string, '(){}[]')\n return string.strip()\n\n\ndef remove_text_inside_brackets(text, brackets):\n count = [0] * (len(brackets) // 2)\n saved_chars = []\n for character in text:\n for i, b in enumerate(brackets):\n if character == b:\n kind, is_close = divmod(i, 2)\n count[kind] += (-1) ** is_close\n if count[kind] < 0:\n count[kind] = 0\n else:\n break\n else:\n if not any(count):\n saved_chars.append(character)\n return ''.join(saved_chars)\n\n\ndef reorder_sentences(output_sentences, input):\n\n def custom_sort(s1, s2):\n return input.find(s1) - input.find(s2)\n output_sentences.sort(key=functools.cmp_to_key(custom_sort))\n return output_sentences\n\n\ndef get_summarized(input, num_sentences):\n input = clean(input)\n tokenizer = RegexpTokenizer('\\\\w+')\n base_words = [word.lower() for word in tokenizer.tokenize(input)]\n words = [word for word in base_words if word not in stopwords.words()]\n word_frequencies = FreqDist(words)\n most_frequent_words = [pair[0] for pair in word_frequencies.most_common\n (100)]\n input = remove_text_inside_brackets(input, '====')\n sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n actual_sentences_pre = sent_detector.tokenize(input)\n actual_sentences = []\n for sentence in actual_sentences_pre:\n if len(sentence.split()) <= 6:\n continue\n else:\n actual_sentences.append(sentence)\n working_sentences = [sentence.lower() for sentence in actual_sentences]\n output_sentences = []\n for word in most_frequent_words:\n for i in range(0, len(working_sentences)):\n if word in working_sentences[i] and actual_sentences[i\n ] not in output_sentences:\n output_sentences.append(actual_sentences[i])\n break\n if len(output_sentences) >= num_sentences:\n break\n if len(output_sentences) >= num_sentences:\n break\n for sentence in output_sentences:\n sentence.capitalize()\n return reorder_sentences(output_sentences, input)\n\n\ndef summarize(input, num_sentences):\n return ' '.join(get_summarized(input, num_sentences))\n",
"step-5": "import functools\nimport re\nimport nltk.data\nfrom nltk.corpus import stopwords\nfrom nltk.probability import FreqDist\nfrom nltk.tokenize import RegexpTokenizer\n\n\ndef clean(string):\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\\\\\(\", \" \\( \", string)\n string = re.sub(r\"\\\\\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\]\\]\", \"\", string)\n string = re.sub(r\"\\n\", \"\", string)\n string = string.rstrip()\n string = remove_text_inside_brackets(string, \"(){}[]\")\n return string.strip()\n\n\ndef remove_text_inside_brackets(text, brackets):\n count = [0] * (len(brackets) // 2) # count open/close brackets\n saved_chars = []\n for character in text:\n for i, b in enumerate(brackets):\n if character == b: # found bracket\n kind, is_close = divmod(i, 2)\n count[kind] += (-1) ** is_close # `+1`: open, `-1`: close\n if count[kind] < 0: # unbalanced bracket\n count[kind] = 0 # keep it\n else: # found bracket to remove\n break\n else: # character is not a [balanced] bracket\n if not any(count): # outside brackets\n saved_chars.append(character)\n return ''.join(saved_chars)\n\n\ndef reorder_sentences(output_sentences, input):\n def custom_sort(s1, s2):\n return input.find(s1) - input.find(s2)\n\n output_sentences.sort(key=functools.cmp_to_key(custom_sort))\n return output_sentences\n\n\ndef get_summarized(input, num_sentences):\n input = clean(input)\n tokenizer = RegexpTokenizer('\\w+')\n base_words = [word.lower() for word in tokenizer.tokenize(input)]\n words = [word for word in base_words if word not in stopwords.words()]\n word_frequencies = FreqDist(words)\n most_frequent_words = [pair[0] for pair in word_frequencies.most_common(100)]\n\n input = remove_text_inside_brackets(input, \"====\")\n\n sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n actual_sentences_pre = sent_detector.tokenize(input)\n actual_sentences = []\n for sentence in actual_sentences_pre:\n if len(sentence.split()) <= 6:\n continue\n else:\n actual_sentences.append(sentence)\n working_sentences = [sentence.lower() for sentence in actual_sentences]\n output_sentences = []\n\n for word in most_frequent_words:\n for i in range(0, len(working_sentences)):\n if word in working_sentences[i] and actual_sentences[i] not in output_sentences:\n output_sentences.append(actual_sentences[i])\n break\n if len(output_sentences) >= num_sentences:\n break\n\n if len(output_sentences) >= num_sentences:\n break\n for sentence in output_sentences:\n sentence.capitalize()\n return reorder_sentences(output_sentences, input)\n\n\ndef summarize(input, num_sentences):\n return \" \".join(get_summarized(input, num_sentences))\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
__author__ = 'fshaw'
import gzip
import hashlib
import os
import uuid
import json
import jsonpickle
from chunked_upload.models import ChunkedUpload
from chunked_upload.views import ChunkedUploadView, ChunkedUploadCompleteView
from django.conf import settings
from django.core import serializers
from django.core.files.base import ContentFile
from django.http import HttpResponse
from django.template.context_processors import csrf
from rest_framework.renderers import JSONRenderer
import web.apps.web_copo.schemas.utils.data_utils as d_utils
import web.apps.web_copo.utils.EnaUtils as u
from dal.broker_da import BrokerDA
from dal.copo_da import DataFile
from web.apps.web_copo.rest.models import CopoChunkedUpload
class CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):
do_md5_check = False
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
Called *only* if POST is successful.
"""
files = {'files': {}}
files['files']['name'] = chunked_upload.filename
files['files']['id'] = chunked_upload.id
files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return files
class CopoChunkedUploadView(ChunkedUploadView):
model = CopoChunkedUpload
'''
'''
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def receive_data_file(request):
# this method is called for writing smaller files (<= 260MB) to disk, larger files use the
# upload method in ChunkedUpload class
from django.utils import timezone
# need to make a chunked upload record to store deails of the file
if request.method == 'POST':
c = {}
f = request.FILES['file']
fname = f.__str__()
attrs = {'user': request.user, 'filename': fname, 'completed_on': timezone.now(), 'offset': f.size}
chunked_upload = ChunkedUpload(**attrs)
# file starts empty
chunked_upload.file.save(name='', content=ContentFile(''), save=True)
path = chunked_upload.file
destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name), 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
c.update(csrf(request))
# create output structure to pass back to jquery-upload
files = {'files': {}}
files['files']['name'] = f._name
files['files']['size'] = path.size / (1000 * 1000.0)
files['files']['id'] = chunked_upload.id
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return HttpResponse(str, content_type='json')
def resume_chunked(request):
file_name = request.GET.get('filename')
user_id = request.user.id
# retrieve incomplete file for user with this name
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id, filename=file_name).order_by(
'-offset')[:1]
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def get_partial_uploads(request):
user_id = request.user.id
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id).order_by('created_on')
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def hash_upload(request):
# utility method to create an md5 hash of a given file path
# open uploaded file
file_id = request.GET['file_id']
print('hash started ' + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
# now hash opened file
md5 = hashlib.md5()
with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
file_obj.hash = md5.hexdigest()
file_obj.save()
output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_hash")] = file_obj.hash
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = json.dumps(output_dict)
print('hash complete ' + file_id)
return HttpResponse(out, content_type='json')
def inspect_file(request):
# utility method to examine a file and return meta-data to the frontend
output_dict = {'file_type': 'unknown', 'do_compress': False}
# get reference to file
file_id = request.GET['file_id']
chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)
# size threshold to determine if a file should be compressed
zip_threshold = 200000000 # size in bytes
# check if file is compressed
is_zipped = u.is_gzipped(file_name)
if chunked_upload.offset >= zip_threshold and not is_zipped:
output_dict['do_compress'] = True
# check for file type
if u.is_pdf_file(file_name):
output_dict['file_type'] = 'pdf'
else:
try:
if u.is_fastq_file(file_name):
output_dict['file_type'] = 'fastq'
if not is_zipped:
output_dict['do_compress'] = True
elif u.is_sam_file(file_name):
output_dict['file_type'] = 'sam'
if not is_zipped:
output_dict['do_compress'] = False
elif u.is_bam_file(file_name):
output_dict['file_type'] = 'bam'
if not is_zipped:
output_dict['do_compress'] = False
else: # make file type same as extension
output_dict['file_type'] = chunked_upload.filename.rsplit('.')[1]
except:
output_dict['file_type'] = 'unknown'
# add datafile schema
chunked_upload.type = output_dict['file_type']
chunked_upload.save()
# ...and obtain the inserted record
profile_id = request.session['profile_id']
component = "datafile"
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_id")] = file_id
auto_fields[DataFile().get_qualified_field("file_type")] = output_dict['file_type']
auto_fields[DataFile().get_qualified_field("file_location")] = file_name
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(chunked_upload.offset)
auto_fields[DataFile().get_qualified_field("name")] = chunked_upload.filename
# get default type from schema
type = [f for f in d_utils.get_copo_schema(component) if f.get("id").split(".")[-1] == "type"]
if type:
type = type[0]["default_value"]
auto_fields[DataFile().get_qualified_field("type")] = type
df = BrokerDA(context=dict(),
profile_id=profile_id,
component=component,
auto_fields=auto_fields,
visualize="last_record"
).do_save_edit().get("record_object", dict())
out = jsonpickle.encode(output_dict)
return HttpResponse(out, content_type='json')
def zip_file(request):
# need to get a reference to the file to zip
file_id = request.GET['file_id']
print("zip started " + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
# get the name of the file to zip and change its suffix to .gz
output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
output_file_name = file_obj.filename + '.gz'
try:
# open the file as gzip acrchive...set compression level
temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) + '.tmp')
myzip = gzip.open(temp_name, 'wb', compresslevel=1)
src = open(output_file_location, 'r')
# write input file to gzip archive in n byte chunks
n = 100000000
for chunk in iter(lambda: src.read(n), ''):
myzip.write(bytes(chunk, 'UTF-8'))
finally:
myzip.close()
src.close()
print('zip complete ' + file_id)
# now need to delete the old file and update the file record with the new file
new_file_name = output_file_location + '.gz'
os.rename(temp_name, new_file_name)
os.remove(output_file_location)
# calculate new file size
stats = os.stat(new_file_name)
new_file_size = stats.st_size / 1000 / 1000
# update filename
file_obj.filename = output_file_name
file_obj.file.name = new_file_name
# update file size
file_obj.offset = stats.st_size
file_obj.save()
out = {'zipped': True, 'file_name': output_file_name, 'file_size': new_file_size}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(file_obj.offset)
auto_fields[DataFile().get_qualified_field("name")] = output_file_name
auto_fields[DataFile().get_qualified_field("file_location")] = new_file_name
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = jsonpickle.encode(out)
return HttpResponse(out, content_type='json')
|
normal
|
{
"blob_id": "2b7415d86f9157ae55228efdd61c9a9e9920bc5c",
"index": 7716,
"step-1": "<mask token>\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n \"\"\"\n \"\"\"\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n from django.utils import timezone\n if request.method == 'POST':\n c = {}\n f = request.FILES['file']\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on':\n timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name\n ), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n files = {'files': {}}\n files['files']['name'] = f._name\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=\n user_id, filename=file_name).order_by('-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\n<mask token>\n\n\ndef inspect_file(request):\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n file_id = request.GET['file_id']\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n zip_threshold = 200000000\n is_zipped = u.is_gzipped(file_name)\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n else:\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[\n 1]\n except:\n output_dict['file_type'] = 'unknown'\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n profile_id = request.session['profile_id']\n component = 'datafile'\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_id')] = file_id\n auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[\n 'file_type']\n auto_fields[DataFile().get_qualified_field('file_location')] = file_name\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field('name')\n ] = chunked_upload.filename\n type = [f for f in d_utils.get_copo_schema(component) if f.get('id').\n split('.')[-1] == 'type']\n if type:\n type = type[0]['default_value']\n auto_fields[DataFile().get_qualified_field('type')] = type\n df = BrokerDA(context=dict(), profile_id=profile_id, component=\n component, auto_fields=auto_fields, visualize='last_record'\n ).do_save_edit().get('record_object', dict())\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n \"\"\"\n \"\"\"\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n from django.utils import timezone\n if request.method == 'POST':\n c = {}\n f = request.FILES['file']\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on':\n timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name\n ), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n files = {'files': {}}\n files['files']['name'] = f._name\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=\n user_id, filename=file_name).order_by('-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\n<mask token>\n\n\ndef hash_upload(request):\n file_id = request.GET['file_id']\n print('hash started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n md5 = hashlib.md5()\n with open(file_name, 'rb') as f:\n for chunk in iter(lambda : f.read(8192), b''):\n md5.update(chunk)\n file_obj.hash = md5.hexdigest()\n file_obj.save()\n output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_hash')] = file_obj.hash\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = json.dumps(output_dict)\n print('hash complete ' + file_id)\n return HttpResponse(out, content_type='json')\n\n\ndef inspect_file(request):\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n file_id = request.GET['file_id']\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n zip_threshold = 200000000\n is_zipped = u.is_gzipped(file_name)\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n else:\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[\n 1]\n except:\n output_dict['file_type'] = 'unknown'\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n profile_id = request.session['profile_id']\n component = 'datafile'\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_id')] = file_id\n auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[\n 'file_type']\n auto_fields[DataFile().get_qualified_field('file_location')] = file_name\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field('name')\n ] = chunked_upload.filename\n type = [f for f in d_utils.get_copo_schema(component) if f.get('id').\n split('.')[-1] == 'type']\n if type:\n type = type[0]['default_value']\n auto_fields[DataFile().get_qualified_field('type')] = type\n df = BrokerDA(context=dict(), profile_id=profile_id, component=\n component, auto_fields=auto_fields, visualize='last_record'\n ).do_save_edit().get('record_object', dict())\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n \"\"\"\n \"\"\"\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n from django.utils import timezone\n if request.method == 'POST':\n c = {}\n f = request.FILES['file']\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on':\n timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name\n ), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n files = {'files': {}}\n files['files']['name'] = f._name\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=\n user_id, filename=file_name).order_by('-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\n<mask token>\n\n\ndef hash_upload(request):\n file_id = request.GET['file_id']\n print('hash started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n md5 = hashlib.md5()\n with open(file_name, 'rb') as f:\n for chunk in iter(lambda : f.read(8192), b''):\n md5.update(chunk)\n file_obj.hash = md5.hexdigest()\n file_obj.save()\n output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_hash')] = file_obj.hash\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = json.dumps(output_dict)\n print('hash complete ' + file_id)\n return HttpResponse(out, content_type='json')\n\n\ndef inspect_file(request):\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n file_id = request.GET['file_id']\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n zip_threshold = 200000000\n is_zipped = u.is_gzipped(file_name)\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n else:\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[\n 1]\n except:\n output_dict['file_type'] = 'unknown'\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n profile_id = request.session['profile_id']\n component = 'datafile'\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_id')] = file_id\n auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[\n 'file_type']\n auto_fields[DataFile().get_qualified_field('file_location')] = file_name\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field('name')\n ] = chunked_upload.filename\n type = [f for f in d_utils.get_copo_schema(component) if f.get('id').\n split('.')[-1] == 'type']\n if type:\n type = type[0]['default_value']\n auto_fields[DataFile().get_qualified_field('type')] = type\n df = BrokerDA(context=dict(), profile_id=profile_id, component=\n component, auto_fields=auto_fields, visualize='last_record'\n ).do_save_edit().get('record_object', dict())\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\ndef zip_file(request):\n file_id = request.GET['file_id']\n print('zip started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name\n )\n output_file_name = file_obj.filename + '.gz'\n try:\n temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) +\n '.tmp')\n myzip = gzip.open(temp_name, 'wb', compresslevel=1)\n src = open(output_file_location, 'r')\n n = 100000000\n for chunk in iter(lambda : src.read(n), ''):\n myzip.write(bytes(chunk, 'UTF-8'))\n finally:\n myzip.close()\n src.close()\n print('zip complete ' + file_id)\n new_file_name = output_file_location + '.gz'\n os.rename(temp_name, new_file_name)\n os.remove(output_file_location)\n stats = os.stat(new_file_name)\n new_file_size = stats.st_size / 1000 / 1000\n file_obj.filename = output_file_name\n file_obj.file.name = new_file_name\n file_obj.offset = stats.st_size\n file_obj.save()\n out = {'zipped': True, 'file_name': output_file_name, 'file_size':\n new_file_size}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(file_obj.offset)\n auto_fields[DataFile().get_qualified_field('name')] = output_file_name\n auto_fields[DataFile().get_qualified_field('file_location')\n ] = new_file_name\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = jsonpickle.encode(out)\n return HttpResponse(out, content_type='json')\n",
"step-4": "<mask token>\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n \"\"\"\n \"\"\"\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n from django.utils import timezone\n if request.method == 'POST':\n c = {}\n f = request.FILES['file']\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on':\n timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name\n ), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n files = {'files': {}}\n files['files']['name'] = f._name\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=\n user_id, filename=file_name).order_by('-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\ndef get_partial_uploads(request):\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id\n ).order_by('created_on')\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\ndef hash_upload(request):\n file_id = request.GET['file_id']\n print('hash started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n md5 = hashlib.md5()\n with open(file_name, 'rb') as f:\n for chunk in iter(lambda : f.read(8192), b''):\n md5.update(chunk)\n file_obj.hash = md5.hexdigest()\n file_obj.save()\n output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_hash')] = file_obj.hash\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = json.dumps(output_dict)\n print('hash complete ' + file_id)\n return HttpResponse(out, content_type='json')\n\n\ndef inspect_file(request):\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n file_id = request.GET['file_id']\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n zip_threshold = 200000000\n is_zipped = u.is_gzipped(file_name)\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n else:\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[\n 1]\n except:\n output_dict['file_type'] = 'unknown'\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n profile_id = request.session['profile_id']\n component = 'datafile'\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_id')] = file_id\n auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[\n 'file_type']\n auto_fields[DataFile().get_qualified_field('file_location')] = file_name\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field('name')\n ] = chunked_upload.filename\n type = [f for f in d_utils.get_copo_schema(component) if f.get('id').\n split('.')[-1] == 'type']\n if type:\n type = type[0]['default_value']\n auto_fields[DataFile().get_qualified_field('type')] = type\n df = BrokerDA(context=dict(), profile_id=profile_id, component=\n component, auto_fields=auto_fields, visualize='last_record'\n ).do_save_edit().get('record_object', dict())\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\ndef zip_file(request):\n file_id = request.GET['file_id']\n print('zip started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name\n )\n output_file_name = file_obj.filename + '.gz'\n try:\n temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) +\n '.tmp')\n myzip = gzip.open(temp_name, 'wb', compresslevel=1)\n src = open(output_file_location, 'r')\n n = 100000000\n for chunk in iter(lambda : src.read(n), ''):\n myzip.write(bytes(chunk, 'UTF-8'))\n finally:\n myzip.close()\n src.close()\n print('zip complete ' + file_id)\n new_file_name = output_file_location + '.gz'\n os.rename(temp_name, new_file_name)\n os.remove(output_file_location)\n stats = os.stat(new_file_name)\n new_file_size = stats.st_size / 1000 / 1000\n file_obj.filename = output_file_name\n file_obj.file.name = new_file_name\n file_obj.offset = stats.st_size\n file_obj.save()\n out = {'zipped': True, 'file_name': output_file_name, 'file_size':\n new_file_size}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(file_obj.offset)\n auto_fields[DataFile().get_qualified_field('name')] = output_file_name\n auto_fields[DataFile().get_qualified_field('file_location')\n ] = new_file_name\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = jsonpickle.encode(out)\n return HttpResponse(out, content_type='json')\n",
"step-5": "__author__ = 'fshaw'\nimport gzip\nimport hashlib\nimport os\nimport uuid\nimport json\nimport jsonpickle\nfrom chunked_upload.models import ChunkedUpload\nfrom chunked_upload.views import ChunkedUploadView, ChunkedUploadCompleteView\nfrom django.conf import settings\nfrom django.core import serializers\nfrom django.core.files.base import ContentFile\nfrom django.http import HttpResponse\nfrom django.template.context_processors import csrf\nfrom rest_framework.renderers import JSONRenderer\n\nimport web.apps.web_copo.schemas.utils.data_utils as d_utils\nimport web.apps.web_copo.utils.EnaUtils as u\nfrom dal.broker_da import BrokerDA\nfrom dal.copo_da import DataFile\nfrom web.apps.web_copo.rest.models import CopoChunkedUpload\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n\n '''\n '''\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n # this method is called for writing smaller files (<= 260MB) to disk, larger files use the\n # upload method in ChunkedUpload class\n\n from django.utils import timezone\n # need to make a chunked upload record to store deails of the file\n if request.method == 'POST':\n\n c = {}\n f = request.FILES['file']\n\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on': timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n # file starts empty\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n\n # create output structure to pass back to jquery-upload\n files = {'files': {}}\n files['files']['name'] = f._name\n\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n # retrieve incomplete file for user with this name\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id, filename=file_name).order_by(\n '-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\ndef get_partial_uploads(request):\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id).order_by('created_on')\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\ndef hash_upload(request):\n # utility method to create an md5 hash of a given file path\n # open uploaded file\n file_id = request.GET['file_id']\n print('hash started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n\n # now hash opened file\n md5 = hashlib.md5()\n with open(file_name, 'rb') as f:\n for chunk in iter(lambda: f.read(8192), b''):\n md5.update(chunk)\n\n file_obj.hash = md5.hexdigest()\n file_obj.save()\n\n output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}\n\n # update record in mongo\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field(\"file_hash\")] = file_obj.hash\n\n profile_id = request.session['profile_id']\n component = \"datafile\"\n\n BrokerDA(target_id=str(record_object.get(\"_id\", str())),\n component=component,\n auto_fields=auto_fields\n ).do_save_edit()\n\n out = json.dumps(output_dict)\n print('hash complete ' + file_id)\n return HttpResponse(out, content_type='json')\n\n\ndef inspect_file(request):\n # utility method to examine a file and return meta-data to the frontend\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n\n # get reference to file\n file_id = request.GET['file_id']\n\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n\n # size threshold to determine if a file should be compressed\n zip_threshold = 200000000 # size in bytes\n\n # check if file is compressed\n is_zipped = u.is_gzipped(file_name)\n\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n\n # check for file type\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n\n else: # make file type same as extension\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[1]\n except:\n output_dict['file_type'] = 'unknown'\n\n # add datafile schema\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n\n # ...and obtain the inserted record\n profile_id = request.session['profile_id']\n component = \"datafile\"\n\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field(\"file_id\")] = file_id\n auto_fields[DataFile().get_qualified_field(\"file_type\")] = output_dict['file_type']\n auto_fields[DataFile().get_qualified_field(\"file_location\")] = file_name\n auto_fields[DataFile().get_qualified_field(\"file_size\")] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field(\"name\")] = chunked_upload.filename\n\n # get default type from schema\n type = [f for f in d_utils.get_copo_schema(component) if f.get(\"id\").split(\".\")[-1] == \"type\"]\n if type:\n type = type[0][\"default_value\"]\n auto_fields[DataFile().get_qualified_field(\"type\")] = type\n\n df = BrokerDA(context=dict(),\n profile_id=profile_id,\n component=component,\n auto_fields=auto_fields,\n visualize=\"last_record\"\n ).do_save_edit().get(\"record_object\", dict())\n\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\ndef zip_file(request):\n # need to get a reference to the file to zip\n file_id = request.GET['file_id']\n print(\"zip started \" + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n\n # get the name of the file to zip and change its suffix to .gz\n output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n output_file_name = file_obj.filename + '.gz'\n try:\n # open the file as gzip acrchive...set compression level\n temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) + '.tmp')\n myzip = gzip.open(temp_name, 'wb', compresslevel=1)\n src = open(output_file_location, 'r')\n\n # write input file to gzip archive in n byte chunks\n n = 100000000\n for chunk in iter(lambda: src.read(n), ''):\n myzip.write(bytes(chunk, 'UTF-8'))\n finally:\n myzip.close()\n src.close()\n\n print('zip complete ' + file_id)\n # now need to delete the old file and update the file record with the new file\n new_file_name = output_file_location + '.gz'\n os.rename(temp_name, new_file_name)\n os.remove(output_file_location)\n\n # calculate new file size\n stats = os.stat(new_file_name)\n new_file_size = stats.st_size / 1000 / 1000\n\n # update filename\n file_obj.filename = output_file_name\n file_obj.file.name = new_file_name\n\n # update file size\n file_obj.offset = stats.st_size\n file_obj.save()\n\n out = {'zipped': True, 'file_name': output_file_name, 'file_size': new_file_size}\n\n # update record in mongo\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field(\"file_size\")] = u.filesize_toString(file_obj.offset)\n auto_fields[DataFile().get_qualified_field(\"name\")] = output_file_name\n auto_fields[DataFile().get_qualified_field(\"file_location\")] = new_file_name\n\n profile_id = request.session['profile_id']\n component = \"datafile\"\n\n BrokerDA(target_id=str(record_object.get(\"_id\", str())),\n component=component,\n auto_fields=auto_fields\n ).do_save_edit()\n\n out = jsonpickle.encode(out)\n return HttpResponse(out, content_type='json')\n",
"step-ids": [
12,
13,
14,
15,
18
]
}
|
[
12,
13,
14,
15,
18
] |
print("rap.sweeps.data_management level init")
|
normal
|
{
"blob_id": "7d138a0ad7e4d8f7047dd73ae503bdc7ae5aa065",
"index": 9801,
"step-1": "<mask token>\n",
"step-2": "print('rap.sweeps.data_management level init')\n",
"step-3": "print(\"rap.sweeps.data_management level init\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
input = """
t(Z) :- t0(Z).
t(Z) :- g(X,Y,Z), t(X), not t(Y).
t0(2).
g(5,1,3).
g(1,2,4).
g(3,4,5).
"""
output = """
t(Z) :- t0(Z).
t(Z) :- g(X,Y,Z), t(X), not t(Y).
t0(2).
g(5,1,3).
g(1,2,4).
g(3,4,5).
"""
|
normal
|
{
"blob_id": "df5c79c79d827b6b3de7ceb4b1e3c652c8956346",
"index": 2620,
"step-1": "<mask token>\n",
"step-2": "input = \"\"\"\nt(Z) :- t0(Z).\nt(Z) :- g(X,Y,Z), t(X), not t(Y).\n\nt0(2).\ng(5,1,3).\ng(1,2,4).\ng(3,4,5).\n\n\"\"\"\noutput = \"\"\"\nt(Z) :- t0(Z).\nt(Z) :- g(X,Y,Z), t(X), not t(Y).\n\nt0(2).\ng(5,1,3).\ng(1,2,4).\ng(3,4,5).\n\n\"\"\"\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import os
from PIL import Image
import urllib
import json
import math
def download_images(a,b):
image_count = 0
k = a
no_of_images = b
baseURL='https://graph.facebook.com/v2.2/'
imgURL='/picture?type=large'
sil_check='/picture?redirect=false'
while image_count<no_of_images:
obj=urllib.urlopen(baseURL+str(k)+sil_check)
data=obj.read()
jsondata=json.loads(data)
if not jsondata['data']['is_silhouette']:
img=urllib.urlopen(baseURL+str(k)+imgURL)
image=img.read()
f=open(str(k)+'.jpg','wb')
f.write(image)
f.close()
print 'Image written to '+str(k)+'.jpg'
image_count+=1
else:
print str(k)+' is Silhouette.'
k+=1
def resize_images():
files=[f for f in os.listdir('.') if os.path.isfile(f) and '.jpg' in f]
print 'Resizing images ...'
for i in files:
img=Image.open(i)
j = i.replace('jpg','png')
img.resize((100,100)).save(j)
img.close()
os.remove(i)
def create_mosaic(b):
files=[f for f in os.listdir('.') if os.path.isfile(f) and '.png' in f]
no_of_images = b
N = int(math.sqrt(no_of_images))
mosaic=Image.new('RGB',(N*100,N*100))
mpixels=mosaic.load()
mX,mY = 0,0
counter=0
print 'Combining images ...'
for img in files:
mX = (counter%N)*100
mY = (counter/N)*100
image=Image.open(img)
pixels=image.load()
for iY in range(100):
mX = (counter%N)*100
for iX in range(100):
try:
mpixels[mX,mY] = pixels[iX,iY]
except:
print mX,mY
mX+=1
mY+=1
counter+=1
image.close()
os.remove(img)
mosaic.save('mosaic.png')
a = int(raw_input('Enter the fb-id from where to begin:'))
b = int(raw_input('Enter the number of images to download (a square):'))
download_images(a,b)
resize_images()
create_mosaic(b)
|
normal
|
{
"blob_id": "533154fe58511ac9c9c693bf07f076146b0c6136",
"index": 4445,
"step-1": "import os\nfrom PIL import Image\nimport urllib\nimport json\nimport math\n\ndef download_images(a,b):\n\timage_count = 0\n\tk = a\n\tno_of_images = b\n\tbaseURL='https://graph.facebook.com/v2.2/'\n\timgURL='/picture?type=large'\n\tsil_check='/picture?redirect=false'\n\twhile image_count<no_of_images:\n\t\tobj=urllib.urlopen(baseURL+str(k)+sil_check)\n\t\tdata=obj.read()\n\t\tjsondata=json.loads(data)\n\t\tif not jsondata['data']['is_silhouette']:\n\t\t\timg=urllib.urlopen(baseURL+str(k)+imgURL)\n\t\t\timage=img.read()\n\t\t\tf=open(str(k)+'.jpg','wb')\n\t\t\tf.write(image)\n\t\t\tf.close()\n\t\t\tprint 'Image written to '+str(k)+'.jpg'\n\t\t\timage_count+=1\n\t\telse:\n\t\t\tprint str(k)+' is Silhouette.'\n\t\tk+=1\ndef resize_images():\n\tfiles=[f for f in os.listdir('.') if os.path.isfile(f) and '.jpg' in f]\n\tprint 'Resizing images ...'\n\tfor i in files:\n\t\timg=Image.open(i)\n\t\tj = i.replace('jpg','png')\n\t\timg.resize((100,100)).save(j)\n\t\timg.close()\n\t\tos.remove(i)\ndef create_mosaic(b):\n\tfiles=[f for f in os.listdir('.') if os.path.isfile(f) and '.png' in f]\n\tno_of_images = b\n\tN = int(math.sqrt(no_of_images))\n\tmosaic=Image.new('RGB',(N*100,N*100))\n\tmpixels=mosaic.load()\n\tmX,mY = 0,0\n\tcounter=0\n\tprint 'Combining images ...'\n\tfor img in files:\n\t\tmX = (counter%N)*100\n\t\tmY = (counter/N)*100\n\t\timage=Image.open(img)\n\t\tpixels=image.load()\n\t\tfor iY in range(100):\n\t\t\tmX = (counter%N)*100\n\t\t\tfor iX in range(100):\n\t\t\t\ttry:\n\t\t\t\t\tmpixels[mX,mY] = pixels[iX,iY]\n\t\t\t\texcept:\n\t\t\t\t\tprint mX,mY\n\t\t\t\tmX+=1\n\t\t\tmY+=1\n\t\tcounter+=1\n\t\timage.close()\n\t\tos.remove(img)\n\tmosaic.save('mosaic.png')\n\na = int(raw_input('Enter the fb-id from where to begin:'))\nb = int(raw_input('Enter the number of images to download (a square):'))\ndownload_images(a,b)\nresize_images()\ncreate_mosaic(b)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# coding=utf-8
"""
PYOPENGL-TOOLBOX UTILS
General purpouse functions.
MIT License
Copyright (c) 2015-2019 Pablo Pizarro R.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Library imports
from __future__ import print_function
from PyOpenGLtoolbox.geometry import draw_vertex_list
from PyOpenGLtoolbox.mathlib import Point3
import sys as _sys
# noinspection PyPep8Naming
import OpenGL.GL as _gl
# noinspection PyPep8Naming
import OpenGL.GLUT as _glut
# Constants
_UTILS_COLOR_BLACK = [0, 0, 0]
_UTILS_COLOR_WHITE = [1, 1, 1]
_UTILS_ERRS = [False]
def print_gl_error(err_msg):
"""
Prints an OpenGL error to console.
:param err_msg: Error message
:type err_msg: basestring
"""
if len(err_msg) == 0:
return
print('[GL-ERROR] {0}'.format(err_msg), file=_sys.stderr)
# noinspection PyUnresolvedReferences
def create_axes(length, both=False, text=False, font=_glut.GLUT_BITMAP_HELVETICA_18):
"""
Create axes system.
:param length: Axes length
:param both: Both axes
:param text: Show axes names (x,y,z)
:param font: Font
:type length: float, int
:type both: bool
:type text: bool
:type font: int
:return: OpenGL list
"""
if length > 0: # Valid length
# Crate points
x = Point3(length, 0, 0)
y = Point3(0, length, 0)
z = Point3(0, 0, length)
o = Point3()
# Create list
lista = _gl.glGenLists(1)
_gl.glNewList(lista, _gl.GL_COMPILE)
# Init primitve
_gl.glBegin(_gl.GL_LINES)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
if both: # Draw axes in both directions
x = Point3(-length, 0, 0)
y = Point3(0, -length, 0)
z = Point3(0, 0, -length)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
# End primitive
_gl.glEnd()
if text: # Draw axes names
draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)
draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)
draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)
if both:
draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)
draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)
draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)
# Returns list
_gl.glEndList()
return lista
else:
raise Exception('Axes length must be positive, greater than zero')
# noinspection PyUnresolvedReferences
def draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24, linespace=20):
"""Dibuja un texto en una posicon dada por un punto point3"""
if color is None:
color = _UTILS_COLOR_WHITE
_gl.glColor3fv(color)
if isinstance(pos, Point3):
x = pos.get_x()
y = pos.get_y()
z = pos.get_z()
_gl.glRasterPos3f(x, y, z)
for char in text:
if char == "\n":
y += linespace
_gl.glRasterPos3f(x, y, z)
else:
# noinspection PyBroadException
try:
glutBitmapCharacter(font, ord(char))
except:
if not _UTILS_ERRS[0]:
print_gl_error('Actual OpenGL version doest not support glutBitmapCharacter function')
_UTILS_ERRS[0] = True
else:
raise Exception('Point must be Point3 type')
def get_rgb_normalized(r, g, b, a=1.0):
"""
Return rgb color normalized (from 0 to 1).
:param r: Red color
:param g: Green color
:param b: Blue color
:param a: Alpha
:type r: float, int
:type g: float, int
:type b: float, int
:type a: float
:return: RGBA tuple
:rtype: tuple
"""
if r <= 1 and g <= 1 and b <= 1:
return r, g, b, a
return r / 255.0, g / 255.0, b / 255.0, a
|
normal
|
{
"blob_id": "cffcfa08cd919f93dfe2ab8dc676efc76feafab3",
"index": 2123,
"step-1": "<mask token>\n\n\ndef create_axes(length, both=False, text=False, font=_glut.\n GLUT_BITMAP_HELVETICA_18):\n \"\"\"\n Create axes system.\n\n :param length: Axes length\n :param both: Both axes\n :param text: Show axes names (x,y,z)\n :param font: Font\n :type length: float, int\n :type both: bool\n :type text: bool\n :type font: int\n :return: OpenGL list\n \"\"\"\n if length > 0:\n x = Point3(length, 0, 0)\n y = Point3(0, length, 0)\n z = Point3(0, 0, length)\n o = Point3()\n lista = _gl.glGenLists(1)\n _gl.glNewList(lista, _gl.GL_COMPILE)\n _gl.glBegin(_gl.GL_LINES)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n if both:\n x = Point3(-length, 0, 0)\n y = Point3(0, -length, 0)\n z = Point3(0, 0, -length)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n _gl.glEnd()\n if text:\n draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)\n draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)\n draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)\n if both:\n draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)\n draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)\n draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)\n _gl.glEndList()\n return lista\n else:\n raise Exception('Axes length must be positive, greater than zero')\n\n\ndef draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24,\n linespace=20):\n \"\"\"Dibuja un texto en una posicon dada por un punto point3\"\"\"\n if color is None:\n color = _UTILS_COLOR_WHITE\n _gl.glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n _gl.glRasterPos3f(x, y, z)\n for char in text:\n if char == '\\n':\n y += linespace\n _gl.glRasterPos3f(x, y, z)\n else:\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _UTILS_ERRS[0]:\n print_gl_error(\n 'Actual OpenGL version doest not support glutBitmapCharacter function'\n )\n _UTILS_ERRS[0] = True\n else:\n raise Exception('Point must be Point3 type')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_axes(length, both=False, text=False, font=_glut.\n GLUT_BITMAP_HELVETICA_18):\n \"\"\"\n Create axes system.\n\n :param length: Axes length\n :param both: Both axes\n :param text: Show axes names (x,y,z)\n :param font: Font\n :type length: float, int\n :type both: bool\n :type text: bool\n :type font: int\n :return: OpenGL list\n \"\"\"\n if length > 0:\n x = Point3(length, 0, 0)\n y = Point3(0, length, 0)\n z = Point3(0, 0, length)\n o = Point3()\n lista = _gl.glGenLists(1)\n _gl.glNewList(lista, _gl.GL_COMPILE)\n _gl.glBegin(_gl.GL_LINES)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n if both:\n x = Point3(-length, 0, 0)\n y = Point3(0, -length, 0)\n z = Point3(0, 0, -length)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n _gl.glEnd()\n if text:\n draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)\n draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)\n draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)\n if both:\n draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)\n draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)\n draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)\n _gl.glEndList()\n return lista\n else:\n raise Exception('Axes length must be positive, greater than zero')\n\n\ndef draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24,\n linespace=20):\n \"\"\"Dibuja un texto en una posicon dada por un punto point3\"\"\"\n if color is None:\n color = _UTILS_COLOR_WHITE\n _gl.glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n _gl.glRasterPos3f(x, y, z)\n for char in text:\n if char == '\\n':\n y += linespace\n _gl.glRasterPos3f(x, y, z)\n else:\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _UTILS_ERRS[0]:\n print_gl_error(\n 'Actual OpenGL version doest not support glutBitmapCharacter function'\n )\n _UTILS_ERRS[0] = True\n else:\n raise Exception('Point must be Point3 type')\n\n\ndef get_rgb_normalized(r, g, b, a=1.0):\n \"\"\"\n Return rgb color normalized (from 0 to 1).\n\n :param r: Red color\n :param g: Green color\n :param b: Blue color\n :param a: Alpha\n :type r: float, int\n :type g: float, int\n :type b: float, int\n :type a: float\n :return: RGBA tuple\n :rtype: tuple\n \"\"\"\n if r <= 1 and g <= 1 and b <= 1:\n return r, g, b, a\n return r / 255.0, g / 255.0, b / 255.0, a\n",
"step-3": "<mask token>\n_UTILS_COLOR_BLACK = [0, 0, 0]\n_UTILS_COLOR_WHITE = [1, 1, 1]\n_UTILS_ERRS = [False]\n\n\ndef print_gl_error(err_msg):\n \"\"\"\n Prints an OpenGL error to console.\n\n :param err_msg: Error message\n :type err_msg: basestring\n \"\"\"\n if len(err_msg) == 0:\n return\n print('[GL-ERROR] {0}'.format(err_msg), file=_sys.stderr)\n\n\ndef create_axes(length, both=False, text=False, font=_glut.\n GLUT_BITMAP_HELVETICA_18):\n \"\"\"\n Create axes system.\n\n :param length: Axes length\n :param both: Both axes\n :param text: Show axes names (x,y,z)\n :param font: Font\n :type length: float, int\n :type both: bool\n :type text: bool\n :type font: int\n :return: OpenGL list\n \"\"\"\n if length > 0:\n x = Point3(length, 0, 0)\n y = Point3(0, length, 0)\n z = Point3(0, 0, length)\n o = Point3()\n lista = _gl.glGenLists(1)\n _gl.glNewList(lista, _gl.GL_COMPILE)\n _gl.glBegin(_gl.GL_LINES)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n if both:\n x = Point3(-length, 0, 0)\n y = Point3(0, -length, 0)\n z = Point3(0, 0, -length)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n _gl.glEnd()\n if text:\n draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)\n draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)\n draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)\n if both:\n draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)\n draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)\n draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)\n _gl.glEndList()\n return lista\n else:\n raise Exception('Axes length must be positive, greater than zero')\n\n\ndef draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24,\n linespace=20):\n \"\"\"Dibuja un texto en una posicon dada por un punto point3\"\"\"\n if color is None:\n color = _UTILS_COLOR_WHITE\n _gl.glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n _gl.glRasterPos3f(x, y, z)\n for char in text:\n if char == '\\n':\n y += linespace\n _gl.glRasterPos3f(x, y, z)\n else:\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _UTILS_ERRS[0]:\n print_gl_error(\n 'Actual OpenGL version doest not support glutBitmapCharacter function'\n )\n _UTILS_ERRS[0] = True\n else:\n raise Exception('Point must be Point3 type')\n\n\ndef get_rgb_normalized(r, g, b, a=1.0):\n \"\"\"\n Return rgb color normalized (from 0 to 1).\n\n :param r: Red color\n :param g: Green color\n :param b: Blue color\n :param a: Alpha\n :type r: float, int\n :type g: float, int\n :type b: float, int\n :type a: float\n :return: RGBA tuple\n :rtype: tuple\n \"\"\"\n if r <= 1 and g <= 1 and b <= 1:\n return r, g, b, a\n return r / 255.0, g / 255.0, b / 255.0, a\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nfrom PyOpenGLtoolbox.geometry import draw_vertex_list\nfrom PyOpenGLtoolbox.mathlib import Point3\nimport sys as _sys\nimport OpenGL.GL as _gl\nimport OpenGL.GLUT as _glut\n_UTILS_COLOR_BLACK = [0, 0, 0]\n_UTILS_COLOR_WHITE = [1, 1, 1]\n_UTILS_ERRS = [False]\n\n\ndef print_gl_error(err_msg):\n \"\"\"\n Prints an OpenGL error to console.\n\n :param err_msg: Error message\n :type err_msg: basestring\n \"\"\"\n if len(err_msg) == 0:\n return\n print('[GL-ERROR] {0}'.format(err_msg), file=_sys.stderr)\n\n\ndef create_axes(length, both=False, text=False, font=_glut.\n GLUT_BITMAP_HELVETICA_18):\n \"\"\"\n Create axes system.\n\n :param length: Axes length\n :param both: Both axes\n :param text: Show axes names (x,y,z)\n :param font: Font\n :type length: float, int\n :type both: bool\n :type text: bool\n :type font: int\n :return: OpenGL list\n \"\"\"\n if length > 0:\n x = Point3(length, 0, 0)\n y = Point3(0, length, 0)\n z = Point3(0, 0, length)\n o = Point3()\n lista = _gl.glGenLists(1)\n _gl.glNewList(lista, _gl.GL_COMPILE)\n _gl.glBegin(_gl.GL_LINES)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n if both:\n x = Point3(-length, 0, 0)\n y = Point3(0, -length, 0)\n z = Point3(0, 0, -length)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n _gl.glEnd()\n if text:\n draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)\n draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)\n draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)\n if both:\n draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)\n draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)\n draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)\n _gl.glEndList()\n return lista\n else:\n raise Exception('Axes length must be positive, greater than zero')\n\n\ndef draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24,\n linespace=20):\n \"\"\"Dibuja un texto en una posicon dada por un punto point3\"\"\"\n if color is None:\n color = _UTILS_COLOR_WHITE\n _gl.glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n _gl.glRasterPos3f(x, y, z)\n for char in text:\n if char == '\\n':\n y += linespace\n _gl.glRasterPos3f(x, y, z)\n else:\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _UTILS_ERRS[0]:\n print_gl_error(\n 'Actual OpenGL version doest not support glutBitmapCharacter function'\n )\n _UTILS_ERRS[0] = True\n else:\n raise Exception('Point must be Point3 type')\n\n\ndef get_rgb_normalized(r, g, b, a=1.0):\n \"\"\"\n Return rgb color normalized (from 0 to 1).\n\n :param r: Red color\n :param g: Green color\n :param b: Blue color\n :param a: Alpha\n :type r: float, int\n :type g: float, int\n :type b: float, int\n :type a: float\n :return: RGBA tuple\n :rtype: tuple\n \"\"\"\n if r <= 1 and g <= 1 and b <= 1:\n return r, g, b, a\n return r / 255.0, g / 255.0, b / 255.0, a\n",
"step-5": "# coding=utf-8\n\"\"\"\nPYOPENGL-TOOLBOX UTILS\nGeneral purpouse functions.\n\nMIT License\nCopyright (c) 2015-2019 Pablo Pizarro R.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n# Library imports\nfrom __future__ import print_function\nfrom PyOpenGLtoolbox.geometry import draw_vertex_list\nfrom PyOpenGLtoolbox.mathlib import Point3\nimport sys as _sys\n\n# noinspection PyPep8Naming\nimport OpenGL.GL as _gl\n\n# noinspection PyPep8Naming\nimport OpenGL.GLUT as _glut\n\n# Constants\n_UTILS_COLOR_BLACK = [0, 0, 0]\n_UTILS_COLOR_WHITE = [1, 1, 1]\n_UTILS_ERRS = [False]\n\n\ndef print_gl_error(err_msg):\n \"\"\"\n Prints an OpenGL error to console.\n\n :param err_msg: Error message\n :type err_msg: basestring\n \"\"\"\n if len(err_msg) == 0:\n return\n print('[GL-ERROR] {0}'.format(err_msg), file=_sys.stderr)\n\n\n# noinspection PyUnresolvedReferences\ndef create_axes(length, both=False, text=False, font=_glut.GLUT_BITMAP_HELVETICA_18):\n \"\"\"\n Create axes system.\n\n :param length: Axes length\n :param both: Both axes\n :param text: Show axes names (x,y,z)\n :param font: Font\n :type length: float, int\n :type both: bool\n :type text: bool\n :type font: int\n :return: OpenGL list\n \"\"\"\n if length > 0: # Valid length\n\n # Crate points\n x = Point3(length, 0, 0)\n y = Point3(0, length, 0)\n z = Point3(0, 0, length)\n o = Point3()\n\n # Create list\n lista = _gl.glGenLists(1)\n _gl.glNewList(lista, _gl.GL_COMPILE)\n\n # Init primitve\n _gl.glBegin(_gl.GL_LINES)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n\n if both: # Draw axes in both directions\n x = Point3(-length, 0, 0)\n y = Point3(0, -length, 0)\n z = Point3(0, 0, -length)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n\n # End primitive\n _gl.glEnd()\n\n if text: # Draw axes names\n draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)\n draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)\n draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)\n\n if both:\n draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)\n draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)\n draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)\n\n # Returns list\n _gl.glEndList()\n return lista\n\n else:\n raise Exception('Axes length must be positive, greater than zero')\n\n\n# noinspection PyUnresolvedReferences\ndef draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24, linespace=20):\n \"\"\"Dibuja un texto en una posicon dada por un punto point3\"\"\"\n if color is None:\n color = _UTILS_COLOR_WHITE\n _gl.glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n _gl.glRasterPos3f(x, y, z)\n for char in text:\n if char == \"\\n\":\n y += linespace\n _gl.glRasterPos3f(x, y, z)\n else:\n # noinspection PyBroadException\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _UTILS_ERRS[0]:\n print_gl_error('Actual OpenGL version doest not support glutBitmapCharacter function')\n _UTILS_ERRS[0] = True\n else:\n raise Exception('Point must be Point3 type')\n\n\ndef get_rgb_normalized(r, g, b, a=1.0):\n \"\"\"\n Return rgb color normalized (from 0 to 1).\n\n :param r: Red color\n :param g: Green color\n :param b: Blue color\n :param a: Alpha\n :type r: float, int\n :type g: float, int\n :type b: float, int\n :type a: float\n :return: RGBA tuple\n :rtype: tuple\n \"\"\"\n if r <= 1 and g <= 1 and b <= 1:\n return r, g, b, a\n return r / 255.0, g / 255.0, b / 255.0, a\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-24 11:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=30, null=True)),
('last_name', models.CharField(blank=True, max_length=30, null=True)),
('gender', models.CharField(blank=True, max_length=10, null=True)),
('email', models.EmailField(blank=True, max_length=255, null=True)),
('phone_number', models.CharField(blank=True, max_length=20, null=True)),
('address', models.TextField(blank=True, max_length=255, null=True)),
('city', models.CharField(blank=True, max_length=50, null=True)),
('state', models.CharField(blank=True, max_length=50, null=True)),
('post_code', models.CharField(blank=True, max_length=10, null=True)),
('comment', models.TextField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('barcode', models.CharField(blank=True, max_length=100, null=True)),
('item_name', models.CharField(blank=True, max_length=100, null=True)),
('catagory', models.CharField(blank=True, max_length=100, null=True)),
('wholesale_price', models.FloatField(blank=True, null=True)),
('retail_price', models.FloatField(blank=True, null=True)),
('tax', models.FloatField(blank=True, null=True)),
('quantity_stock', models.IntegerField(blank=True, null=True)),
('receiving_quantity', models.IntegerField(blank=True, null=True)),
('description', models.TextField(blank=True, max_length=1000, null=True)),
('image', models.ImageField(blank=True, default='no-img.jpg', null=True, upload_to='item/')),
('item_has_serial_number', models.BooleanField(default=False)),
('reorder_level', models.CharField(blank=True, max_length=10, null=True)),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('employee_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.Employee')),
],
bases=('account.employee',),
),
migrations.CreateModel(
name='Supplier',
fields=[
('employee_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.Employee')),
('company_name', models.CharField(blank=True, max_length=100, null=True)),
],
bases=('account.employee',),
),
migrations.AddField(
model_name='item',
name='supplier',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='account.Supplier'),
),
]
|
normal
|
{
"blob_id": "56157aaf3f98abc58572b45111becb91cb93f328",
"index": 2926,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Employee', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('first_name', models.CharField(blank=\n True, max_length=30, null=True)), ('last_name', models.CharField(\n blank=True, max_length=30, null=True)), ('gender', models.CharField\n (blank=True, max_length=10, null=True)), ('email', models.\n EmailField(blank=True, max_length=255, null=True)), ('phone_number',\n models.CharField(blank=True, max_length=20, null=True)), ('address',\n models.TextField(blank=True, max_length=255, null=True)), ('city',\n models.CharField(blank=True, max_length=50, null=True)), ('state',\n models.CharField(blank=True, max_length=50, null=True)), (\n 'post_code', models.CharField(blank=True, max_length=10, null=True)\n ), ('comment', models.TextField(blank=True, max_length=255, null=\n True))]), migrations.CreateModel(name='Item', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('barcode', models.CharField(blank=True,\n max_length=100, null=True)), ('item_name', models.CharField(blank=\n True, max_length=100, null=True)), ('catagory', models.CharField(\n blank=True, max_length=100, null=True)), ('wholesale_price', models\n .FloatField(blank=True, null=True)), ('retail_price', models.\n FloatField(blank=True, null=True)), ('tax', models.FloatField(blank\n =True, null=True)), ('quantity_stock', models.IntegerField(blank=\n True, null=True)), ('receiving_quantity', models.IntegerField(blank\n =True, null=True)), ('description', models.TextField(blank=True,\n max_length=1000, null=True)), ('image', models.ImageField(blank=\n True, default='no-img.jpg', null=True, upload_to='item/')), (\n 'item_has_serial_number', models.BooleanField(default=False)), (\n 'reorder_level', models.CharField(blank=True, max_length=10, null=\n True))]), migrations.CreateModel(name='Customer', fields=[(\n 'employee_ptr', models.OneToOneField(auto_created=True, on_delete=\n django.db.models.deletion.CASCADE, parent_link=True, primary_key=\n True, serialize=False, to='account.Employee'))], bases=(\n 'account.employee',)), migrations.CreateModel(name='Supplier',\n fields=[('employee_ptr', models.OneToOneField(auto_created=True,\n on_delete=django.db.models.deletion.CASCADE, parent_link=True,\n primary_key=True, serialize=False, to='account.Employee')), (\n 'company_name', models.CharField(blank=True, max_length=100, null=\n True))], bases=('account.employee',)), migrations.AddField(\n model_name='item', name='supplier', field=models.ForeignKey(blank=\n True, null=True, on_delete=django.db.models.deletion.CASCADE, to=\n 'account.Supplier'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Employee', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('first_name', models.CharField(blank=\n True, max_length=30, null=True)), ('last_name', models.CharField(\n blank=True, max_length=30, null=True)), ('gender', models.CharField\n (blank=True, max_length=10, null=True)), ('email', models.\n EmailField(blank=True, max_length=255, null=True)), ('phone_number',\n models.CharField(blank=True, max_length=20, null=True)), ('address',\n models.TextField(blank=True, max_length=255, null=True)), ('city',\n models.CharField(blank=True, max_length=50, null=True)), ('state',\n models.CharField(blank=True, max_length=50, null=True)), (\n 'post_code', models.CharField(blank=True, max_length=10, null=True)\n ), ('comment', models.TextField(blank=True, max_length=255, null=\n True))]), migrations.CreateModel(name='Item', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('barcode', models.CharField(blank=True,\n max_length=100, null=True)), ('item_name', models.CharField(blank=\n True, max_length=100, null=True)), ('catagory', models.CharField(\n blank=True, max_length=100, null=True)), ('wholesale_price', models\n .FloatField(blank=True, null=True)), ('retail_price', models.\n FloatField(blank=True, null=True)), ('tax', models.FloatField(blank\n =True, null=True)), ('quantity_stock', models.IntegerField(blank=\n True, null=True)), ('receiving_quantity', models.IntegerField(blank\n =True, null=True)), ('description', models.TextField(blank=True,\n max_length=1000, null=True)), ('image', models.ImageField(blank=\n True, default='no-img.jpg', null=True, upload_to='item/')), (\n 'item_has_serial_number', models.BooleanField(default=False)), (\n 'reorder_level', models.CharField(blank=True, max_length=10, null=\n True))]), migrations.CreateModel(name='Customer', fields=[(\n 'employee_ptr', models.OneToOneField(auto_created=True, on_delete=\n django.db.models.deletion.CASCADE, parent_link=True, primary_key=\n True, serialize=False, to='account.Employee'))], bases=(\n 'account.employee',)), migrations.CreateModel(name='Supplier',\n fields=[('employee_ptr', models.OneToOneField(auto_created=True,\n on_delete=django.db.models.deletion.CASCADE, parent_link=True,\n primary_key=True, serialize=False, to='account.Employee')), (\n 'company_name', models.CharField(blank=True, max_length=100, null=\n True))], bases=('account.employee',)), migrations.AddField(\n model_name='item', name='supplier', field=models.ForeignKey(blank=\n True, null=True, on_delete=django.db.models.deletion.CASCADE, to=\n 'account.Supplier'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-02-24 11:30\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Employee',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('first_name', models.CharField(blank=True, max_length=30, null=True)),\n ('last_name', models.CharField(blank=True, max_length=30, null=True)),\n ('gender', models.CharField(blank=True, max_length=10, null=True)),\n ('email', models.EmailField(blank=True, max_length=255, null=True)),\n ('phone_number', models.CharField(blank=True, max_length=20, null=True)),\n ('address', models.TextField(blank=True, max_length=255, null=True)),\n ('city', models.CharField(blank=True, max_length=50, null=True)),\n ('state', models.CharField(blank=True, max_length=50, null=True)),\n ('post_code', models.CharField(blank=True, max_length=10, null=True)),\n ('comment', models.TextField(blank=True, max_length=255, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Item',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('barcode', models.CharField(blank=True, max_length=100, null=True)),\n ('item_name', models.CharField(blank=True, max_length=100, null=True)),\n ('catagory', models.CharField(blank=True, max_length=100, null=True)),\n ('wholesale_price', models.FloatField(blank=True, null=True)),\n ('retail_price', models.FloatField(blank=True, null=True)),\n ('tax', models.FloatField(blank=True, null=True)),\n ('quantity_stock', models.IntegerField(blank=True, null=True)),\n ('receiving_quantity', models.IntegerField(blank=True, null=True)),\n ('description', models.TextField(blank=True, max_length=1000, null=True)),\n ('image', models.ImageField(blank=True, default='no-img.jpg', null=True, upload_to='item/')),\n ('item_has_serial_number', models.BooleanField(default=False)),\n ('reorder_level', models.CharField(blank=True, max_length=10, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('employee_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.Employee')),\n ],\n bases=('account.employee',),\n ),\n migrations.CreateModel(\n name='Supplier',\n fields=[\n ('employee_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.Employee')),\n ('company_name', models.CharField(blank=True, max_length=100, null=True)),\n ],\n bases=('account.employee',),\n ),\n migrations.AddField(\n model_name='item',\n name='supplier',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='account.Supplier'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SUCCESS = 200
NotFound = 404
url_site = 'https://petstore.swagger.io/v2'
new_username = 'Khrystyna'
new_id = 12345
invalid_new_id = 1234
error_message = 'oops we have a problem!'
store_inventory = {'1': 1, '4444': 2, 'teste': 1, 'string': 6738,
'Operated': 4, 'pending': 56, 'Not-Operated': 10, 'available': 4800,
'waiting list': 1, 'Unavailable': 1, 'Shortlisted': 1, 'Sold': 1,
'availasdfsadfasdfble': 1, 'not available': 1, 'Available': 1,
'YAI3424forYAI3373': 1, 'ok': 1, 'KBMAvailable': 3, 'onwork': 1, 'sold':
87, 'ddd': 1, 'Nonavailable': 1, 'Offline': 1, 'straight': 2, 'pendin':
1, 'sts': 1, 'onhold': 3, 'status': 5, 'xavailable': 1}
Category_Dict = dict(id=36, name='Rexy')
tag_dict = dict(id=4, name='Dog')
PetObject = Pet(id=456, category=Category_Dict, name='Xixi', photo_urls=[
'https://www.what-dog.net/Images/faces2/scroll001.jpg'], tags=[tag_dict
], status='sold')
DataJsonForPets = json.dumps(PetObject.__dict__)
store = Store(id=12, petId=12, quantity=2, ship_date=
'2018-09-12T13:52:49.901Z', status='placed', complete=True)
data_json_for_store = json.dumps(store.__dict__)
user = User(id=3, username='Nini', first_name='Vira', last_name='Budda',
email='email@gmail.com', password='1234567', phone='55455545',
user_status=1)
data_json_for_user = json.dumps(user.__dict__)
<|reserved_special_token_1|>
import json
from pets.pet import Pet
from store_requests.store import Store
from user_requests.user import User
SUCCESS = 200
NotFound = 404
url_site = 'https://petstore.swagger.io/v2'
new_username = 'Khrystyna'
new_id = 12345
invalid_new_id = 1234
error_message = 'oops we have a problem!'
store_inventory = {'1': 1, '4444': 2, 'teste': 1, 'string': 6738,
'Operated': 4, 'pending': 56, 'Not-Operated': 10, 'available': 4800,
'waiting list': 1, 'Unavailable': 1, 'Shortlisted': 1, 'Sold': 1,
'availasdfsadfasdfble': 1, 'not available': 1, 'Available': 1,
'YAI3424forYAI3373': 1, 'ok': 1, 'KBMAvailable': 3, 'onwork': 1, 'sold':
87, 'ddd': 1, 'Nonavailable': 1, 'Offline': 1, 'straight': 2, 'pendin':
1, 'sts': 1, 'onhold': 3, 'status': 5, 'xavailable': 1}
Category_Dict = dict(id=36, name='Rexy')
tag_dict = dict(id=4, name='Dog')
PetObject = Pet(id=456, category=Category_Dict, name='Xixi', photo_urls=[
'https://www.what-dog.net/Images/faces2/scroll001.jpg'], tags=[tag_dict
], status='sold')
DataJsonForPets = json.dumps(PetObject.__dict__)
store = Store(id=12, petId=12, quantity=2, ship_date=
'2018-09-12T13:52:49.901Z', status='placed', complete=True)
data_json_for_store = json.dumps(store.__dict__)
user = User(id=3, username='Nini', first_name='Vira', last_name='Budda',
email='email@gmail.com', password='1234567', phone='55455545',
user_status=1)
data_json_for_user = json.dumps(user.__dict__)
<|reserved_special_token_1|>
import json
from pets.pet import Pet
from store_requests.store import Store
from user_requests.user import User
SUCCESS = 200
NotFound = 404
url_site = 'https://petstore.swagger.io/v2'
new_username = "Khrystyna"
new_id = 12345
invalid_new_id = 1234
error_message = "oops we have a problem!"
store_inventory = {
"1": 1,
"4444": 2,
"teste": 1,
"string": 6738,
"Operated": 4,
"pending": 56,
"Not-Operated": 10,
"available": 4800,
"waiting list": 1,
"Unavailable": 1,
"Shortlisted": 1,
"Sold": 1,
"availasdfsadfasdfble": 1,
"not available": 1,
"Available": 1,
"YAI3424forYAI3373": 1,
"ok": 1,
"KBMAvailable": 3,
"onwork": 1,
"sold": 87,
"ddd": 1,
"Nonavailable": 1,
"Offline": 1,
"straight": 2,
"pendin": 1,
"sts": 1,
"onhold": 3,
"status": 5,
"xavailable": 1
}
Category_Dict = dict(id=36,
name='Rexy')
tag_dict = dict(id=4,
name='Dog')
PetObject = Pet(id=456,
category=Category_Dict,
name="Xixi",
photo_urls=["https://www.what-dog.net/Images/faces2/scroll001.jpg"],
tags=[tag_dict],
status='sold')
DataJsonForPets = json.dumps(PetObject.__dict__)
store = Store(id=12,
petId=12,
quantity=2,
ship_date="2018-09-12T13:52:49.901Z",
status="placed",
complete=True)
data_json_for_store = json.dumps(store.__dict__)
user = User(id=3,
username="Nini",
first_name="Vira",
last_name="Budda",
email="email@gmail.com",
password="1234567",
phone="55455545",
user_status=1)
data_json_for_user = json.dumps(user.__dict__)
|
flexible
|
{
"blob_id": "54ed0683d0f8d907c27e2f3809f9533556593392",
"index": 5546,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nSUCCESS = 200\nNotFound = 404\nurl_site = 'https://petstore.swagger.io/v2'\nnew_username = 'Khrystyna'\nnew_id = 12345\ninvalid_new_id = 1234\nerror_message = 'oops we have a problem!'\nstore_inventory = {'1': 1, '4444': 2, 'teste': 1, 'string': 6738,\n 'Operated': 4, 'pending': 56, 'Not-Operated': 10, 'available': 4800,\n 'waiting list': 1, 'Unavailable': 1, 'Shortlisted': 1, 'Sold': 1,\n 'availasdfsadfasdfble': 1, 'not available': 1, 'Available': 1,\n 'YAI3424forYAI3373': 1, 'ok': 1, 'KBMAvailable': 3, 'onwork': 1, 'sold':\n 87, 'ddd': 1, 'Nonavailable': 1, 'Offline': 1, 'straight': 2, 'pendin':\n 1, 'sts': 1, 'onhold': 3, 'status': 5, 'xavailable': 1}\nCategory_Dict = dict(id=36, name='Rexy')\ntag_dict = dict(id=4, name='Dog')\nPetObject = Pet(id=456, category=Category_Dict, name='Xixi', photo_urls=[\n 'https://www.what-dog.net/Images/faces2/scroll001.jpg'], tags=[tag_dict\n ], status='sold')\nDataJsonForPets = json.dumps(PetObject.__dict__)\nstore = Store(id=12, petId=12, quantity=2, ship_date=\n '2018-09-12T13:52:49.901Z', status='placed', complete=True)\ndata_json_for_store = json.dumps(store.__dict__)\nuser = User(id=3, username='Nini', first_name='Vira', last_name='Budda',\n email='email@gmail.com', password='1234567', phone='55455545',\n user_status=1)\ndata_json_for_user = json.dumps(user.__dict__)\n",
"step-3": "import json\nfrom pets.pet import Pet\nfrom store_requests.store import Store\nfrom user_requests.user import User\nSUCCESS = 200\nNotFound = 404\nurl_site = 'https://petstore.swagger.io/v2'\nnew_username = 'Khrystyna'\nnew_id = 12345\ninvalid_new_id = 1234\nerror_message = 'oops we have a problem!'\nstore_inventory = {'1': 1, '4444': 2, 'teste': 1, 'string': 6738,\n 'Operated': 4, 'pending': 56, 'Not-Operated': 10, 'available': 4800,\n 'waiting list': 1, 'Unavailable': 1, 'Shortlisted': 1, 'Sold': 1,\n 'availasdfsadfasdfble': 1, 'not available': 1, 'Available': 1,\n 'YAI3424forYAI3373': 1, 'ok': 1, 'KBMAvailable': 3, 'onwork': 1, 'sold':\n 87, 'ddd': 1, 'Nonavailable': 1, 'Offline': 1, 'straight': 2, 'pendin':\n 1, 'sts': 1, 'onhold': 3, 'status': 5, 'xavailable': 1}\nCategory_Dict = dict(id=36, name='Rexy')\ntag_dict = dict(id=4, name='Dog')\nPetObject = Pet(id=456, category=Category_Dict, name='Xixi', photo_urls=[\n 'https://www.what-dog.net/Images/faces2/scroll001.jpg'], tags=[tag_dict\n ], status='sold')\nDataJsonForPets = json.dumps(PetObject.__dict__)\nstore = Store(id=12, petId=12, quantity=2, ship_date=\n '2018-09-12T13:52:49.901Z', status='placed', complete=True)\ndata_json_for_store = json.dumps(store.__dict__)\nuser = User(id=3, username='Nini', first_name='Vira', last_name='Budda',\n email='email@gmail.com', password='1234567', phone='55455545',\n user_status=1)\ndata_json_for_user = json.dumps(user.__dict__)\n",
"step-4": "import json\n\nfrom pets.pet import Pet\nfrom store_requests.store import Store\nfrom user_requests.user import User\n\nSUCCESS = 200\nNotFound = 404\nurl_site = 'https://petstore.swagger.io/v2'\nnew_username = \"Khrystyna\"\nnew_id = 12345\ninvalid_new_id = 1234\nerror_message = \"oops we have a problem!\"\nstore_inventory = {\n \"1\": 1,\n \"4444\": 2,\n \"teste\": 1,\n \"string\": 6738,\n \"Operated\": 4,\n \"pending\": 56,\n \"Not-Operated\": 10,\n \"available\": 4800,\n \"waiting list\": 1,\n \"Unavailable\": 1,\n \"Shortlisted\": 1,\n \"Sold\": 1,\n \"availasdfsadfasdfble\": 1,\n \"not available\": 1,\n \"Available\": 1,\n \"YAI3424forYAI3373\": 1,\n \"ok\": 1,\n \"KBMAvailable\": 3,\n \"onwork\": 1,\n \"sold\": 87,\n \"ddd\": 1,\n \"Nonavailable\": 1,\n \"Offline\": 1,\n \"straight\": 2,\n \"pendin\": 1,\n \"sts\": 1,\n \"onhold\": 3,\n \"status\": 5,\n \"xavailable\": 1\n}\n\nCategory_Dict = dict(id=36,\n name='Rexy')\ntag_dict = dict(id=4,\n name='Dog')\nPetObject = Pet(id=456,\n category=Category_Dict,\n name=\"Xixi\",\n photo_urls=[\"https://www.what-dog.net/Images/faces2/scroll001.jpg\"],\n tags=[tag_dict],\n status='sold')\nDataJsonForPets = json.dumps(PetObject.__dict__)\n\nstore = Store(id=12,\n petId=12,\n quantity=2,\n ship_date=\"2018-09-12T13:52:49.901Z\",\n status=\"placed\",\n complete=True)\ndata_json_for_store = json.dumps(store.__dict__)\n\nuser = User(id=3,\n username=\"Nini\",\n first_name=\"Vira\",\n last_name=\"Budda\",\n email=\"email@gmail.com\",\n password=\"1234567\",\n phone=\"55455545\",\n user_status=1)\ndata_json_for_user = json.dumps(user.__dict__)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def derf1(l, lp, kbt):
return kbt / lp * (0.5 / ((1.0 - l) * (1.0 - l) * (1.0 - l)) + 1.0)
def xdefWLC(kbt, l, p, f):
l0 = 0.9999
lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)
if abs(f) < 1e-05:
return 0.0
while abs(l0 - lnew) > 1e-05:
l0 = lnew
lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)
xdef = l * lnew
return xdef
def intfdexWLC(kbt, l, p, f):
l0 = xdefWLC(kbt, l, p, f)
return kbt * l / (4.0 * p) * (1.0 / (1.0 - l0 / l) + 2 * l0 * l0 / (l *
l) - l0 / l)
def stretching_energy(K, l, p, f, fmin):
total = intfdexWLC(K, l, p, f) - intfdexWLC(K, l, p, fmin)
return total
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def f1(l, lp, kbt):
return kbt / lp * (1.0 / (4 * (1.0 - l) * (1.0 - l)) - 0.25 + l)
def derf1(l, lp, kbt):
return kbt / lp * (0.5 / ((1.0 - l) * (1.0 - l) * (1.0 - l)) + 1.0)
def xdefWLC(kbt, l, p, f):
l0 = 0.9999
lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)
if abs(f) < 1e-05:
return 0.0
while abs(l0 - lnew) > 1e-05:
l0 = lnew
lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)
xdef = l * lnew
return xdef
def intfdexWLC(kbt, l, p, f):
l0 = xdefWLC(kbt, l, p, f)
return kbt * l / (4.0 * p) * (1.0 / (1.0 - l0 / l) + 2 * l0 * l0 / (l *
l) - l0 / l)
def stretching_energy(K, l, p, f, fmin):
total = intfdexWLC(K, l, p, f) - intfdexWLC(K, l, p, fmin)
return total
<|reserved_special_token_0|>
for i in range(1, 2):
force = forces[i]
fmin = 0.0
DeltaG = n_atp * Deltamu - Deltabp + 2.0 * force * xdefWLC(K, l, p, force
) - 2.0 * stretching_energy(K, l, p, force, fmin)
Ateo = (n_atp * Deltamu / (2 * xdefWLC(K, l, p, force)) - Deltabp / (2 *
xdefWLC(K, l, p, force)) + force - 1.0 * stretching_energy(K, l, p,
force, fmin) / xdefWLC(K, l, p, force)) / K
print(Ateo)
for j in range(0, 1):
Dt = 10 * dt * 2 ** j
dn = 10 * 2 ** j
n = 0
folder = (
'/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/DATA/'
)
filein = '%s%s_%dhist.txt' % (folder, force, dn)
foldout = (
'/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/'
)
data = np.loadtxt(filein)
data2 = np.asarray(data)
dx = data2[:, 0]
pdx = data2[:, 1]
dxav = 0.0
pdxav = 0.0
for k in range(0, len(dx)):
dxav += dx[k] * pdx[k]
pdxav += pdx[k]
dxav /= pdxav
pdx /= pdxav
print(force)
print(Dt)
print(dxav)
print('\n')
epsilon = 0.08
delta = 0.4
y = []
x = []
for k in range(0, len(dx)):
if (dx[k] <= 0.0) & (dx[k] <= -delta):
for l in range(0, len(dx)):
if (dx[l] >= 0.0) & (dx[l] >= delta):
if np.absolute(dx[k] + dx[l]) <= epsilon:
x.append(dx[l] / dxav)
y.append(np.log(pdx[l] / pdx[k]) / dxav)
"""
print(x)
#print("
")
#print(y)
print(len(x))
print("
")
print(len(y))
#plt.plot(x,y, 'o')
plt.plot(x,y)
plt.show()
"""
fileout = '%s%s_%dFT.txt' % (foldout, force, dn)
fout = open(fileout, 'w')
for k in range(0, len(x)):
fout.write('%.12f %.12f\n' % (x[k], y[k]))
fout.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def f1(l, lp, kbt):
return kbt / lp * (1.0 / (4 * (1.0 - l) * (1.0 - l)) - 0.25 + l)
def derf1(l, lp, kbt):
return kbt / lp * (0.5 / ((1.0 - l) * (1.0 - l) * (1.0 - l)) + 1.0)
def xdefWLC(kbt, l, p, f):
l0 = 0.9999
lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)
if abs(f) < 1e-05:
return 0.0
while abs(l0 - lnew) > 1e-05:
l0 = lnew
lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)
xdef = l * lnew
return xdef
def intfdexWLC(kbt, l, p, f):
l0 = xdefWLC(kbt, l, p, f)
return kbt * l / (4.0 * p) * (1.0 / (1.0 - l0 / l) + 2 * l0 * l0 / (l *
l) - l0 / l)
def stretching_energy(K, l, p, f, fmin):
total = intfdexWLC(K, l, p, f) - intfdexWLC(K, l, p, fmin)
return total
forces = [5, 6, 7, 8, 9, 10, 11, 12]
delimiter = ' '
p = 0.75
K = 4.114
l = 0.66
Deltamu = 2.0 * K
Deltabp = 2.0 * K
n_atp = 1.0
force = forces[7]
k0 = 1000000.0
dt = 0.5 / (k0 * 5)
fmin = 0.0
<|reserved_special_token_0|>
for i in range(1, 2):
force = forces[i]
fmin = 0.0
DeltaG = n_atp * Deltamu - Deltabp + 2.0 * force * xdefWLC(K, l, p, force
) - 2.0 * stretching_energy(K, l, p, force, fmin)
Ateo = (n_atp * Deltamu / (2 * xdefWLC(K, l, p, force)) - Deltabp / (2 *
xdefWLC(K, l, p, force)) + force - 1.0 * stretching_energy(K, l, p,
force, fmin) / xdefWLC(K, l, p, force)) / K
print(Ateo)
for j in range(0, 1):
Dt = 10 * dt * 2 ** j
dn = 10 * 2 ** j
n = 0
folder = (
'/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/DATA/'
)
filein = '%s%s_%dhist.txt' % (folder, force, dn)
foldout = (
'/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/'
)
data = np.loadtxt(filein)
data2 = np.asarray(data)
dx = data2[:, 0]
pdx = data2[:, 1]
dxav = 0.0
pdxav = 0.0
for k in range(0, len(dx)):
dxav += dx[k] * pdx[k]
pdxav += pdx[k]
dxav /= pdxav
pdx /= pdxav
print(force)
print(Dt)
print(dxav)
print('\n')
epsilon = 0.08
delta = 0.4
y = []
x = []
for k in range(0, len(dx)):
if (dx[k] <= 0.0) & (dx[k] <= -delta):
for l in range(0, len(dx)):
if (dx[l] >= 0.0) & (dx[l] >= delta):
if np.absolute(dx[k] + dx[l]) <= epsilon:
x.append(dx[l] / dxav)
y.append(np.log(pdx[l] / pdx[k]) / dxav)
"""
print(x)
#print("
")
#print(y)
print(len(x))
print("
")
print(len(y))
#plt.plot(x,y, 'o')
plt.plot(x,y)
plt.show()
"""
fileout = '%s%s_%dFT.txt' % (foldout, force, dn)
fout = open(fileout, 'w')
for k in range(0, len(x)):
fout.write('%.12f %.12f\n' % (x[k], y[k]))
fout.close()
<|reserved_special_token_1|>
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import seaborn as sns
import os
from pathlib import Path
def f1(l, lp, kbt):
return kbt / lp * (1.0 / (4 * (1.0 - l) * (1.0 - l)) - 0.25 + l)
def derf1(l, lp, kbt):
return kbt / lp * (0.5 / ((1.0 - l) * (1.0 - l) * (1.0 - l)) + 1.0)
def xdefWLC(kbt, l, p, f):
l0 = 0.9999
lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)
if abs(f) < 1e-05:
return 0.0
while abs(l0 - lnew) > 1e-05:
l0 = lnew
lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)
xdef = l * lnew
return xdef
def intfdexWLC(kbt, l, p, f):
l0 = xdefWLC(kbt, l, p, f)
return kbt * l / (4.0 * p) * (1.0 / (1.0 - l0 / l) + 2 * l0 * l0 / (l *
l) - l0 / l)
def stretching_energy(K, l, p, f, fmin):
total = intfdexWLC(K, l, p, f) - intfdexWLC(K, l, p, fmin)
return total
forces = [5, 6, 7, 8, 9, 10, 11, 12]
delimiter = ' '
p = 0.75
K = 4.114
l = 0.66
Deltamu = 2.0 * K
Deltabp = 2.0 * K
n_atp = 1.0
force = forces[7]
k0 = 1000000.0
dt = 0.5 / (k0 * 5)
fmin = 0.0
<|reserved_special_token_0|>
for i in range(1, 2):
force = forces[i]
fmin = 0.0
DeltaG = n_atp * Deltamu - Deltabp + 2.0 * force * xdefWLC(K, l, p, force
) - 2.0 * stretching_energy(K, l, p, force, fmin)
Ateo = (n_atp * Deltamu / (2 * xdefWLC(K, l, p, force)) - Deltabp / (2 *
xdefWLC(K, l, p, force)) + force - 1.0 * stretching_energy(K, l, p,
force, fmin) / xdefWLC(K, l, p, force)) / K
print(Ateo)
for j in range(0, 1):
Dt = 10 * dt * 2 ** j
dn = 10 * 2 ** j
n = 0
folder = (
'/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/DATA/'
)
filein = '%s%s_%dhist.txt' % (folder, force, dn)
foldout = (
'/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/'
)
data = np.loadtxt(filein)
data2 = np.asarray(data)
dx = data2[:, 0]
pdx = data2[:, 1]
dxav = 0.0
pdxav = 0.0
for k in range(0, len(dx)):
dxav += dx[k] * pdx[k]
pdxav += pdx[k]
dxav /= pdxav
pdx /= pdxav
print(force)
print(Dt)
print(dxav)
print('\n')
epsilon = 0.08
delta = 0.4
y = []
x = []
for k in range(0, len(dx)):
if (dx[k] <= 0.0) & (dx[k] <= -delta):
for l in range(0, len(dx)):
if (dx[l] >= 0.0) & (dx[l] >= delta):
if np.absolute(dx[k] + dx[l]) <= epsilon:
x.append(dx[l] / dxav)
y.append(np.log(pdx[l] / pdx[k]) / dxav)
"""
print(x)
#print("
")
#print(y)
print(len(x))
print("
")
print(len(y))
#plt.plot(x,y, 'o')
plt.plot(x,y)
plt.show()
"""
fileout = '%s%s_%dFT.txt' % (foldout, force, dn)
fout = open(fileout, 'w')
for k in range(0, len(x)):
fout.write('%.12f %.12f\n' % (x[k], y[k]))
fout.close()
<|reserved_special_token_1|>
#!/usr/bin/python
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import seaborn as sns
import os
from pathlib import Path
#import math
#functions related to elasticity of WLC
def f1(l,lp, kbt):
return kbt/lp*(1./(4*(1.-l)*(1.-l))-.25+l)
def derf1(l,lp,kbt):
return kbt/lp*(.5/((1.-l)*(1.-l)*(1.-l))+1.)
def xdefWLC(kbt, l, p, f):
l0=.9999
lnew=l0-(f1(l0,p,kbt)-f)/derf1(l0,p,kbt)
if abs(f)<1.e-5: return 0.0
while abs(l0-lnew)>1.e-5:
l0=lnew
lnew=l0-(f1(l0,p,kbt)-f)/derf1(l0,p,kbt)
xdef=l*lnew
return xdef
def intfdexWLC (kbt, l, p, f):
l0=xdefWLC(kbt,l,p,f)
return kbt*l/(4.*p)*(1./(1.-l0/l)+2*l0*l0/(l*l)-l0/l)
def stretching_energy(K,l,p,f,fmin):
total= intfdexWLC(K,l,p,f)-intfdexWLC(K,l,p,fmin)
return total
forces = [ 5 ,6 ,7 , 8, 9, 10, 11, 12]
delimiter = " "
p = 0.75
K = 4.114
l = 0.66
#value of mu(pN/nm), N, etc
Deltamu=2.0*K #chemical potential of 1 ATP 3*K
Deltabp=2.0*K #free-energy of 1 bp opening
n_atp=1. #Number of bp opened by 1-step of the motor
force = forces[7]
#frequency bps
k0=1000000.
#montecarlo step = 1e-7
dt=0.5/(k0*5)
fmin=0.0
'''
print (DeltaG)
print(n_atp*Deltamu)
print(Deltabp)
print(2.*force*xdefWLC(K,l,p,force))
print(2.*stretching_energy(K,l,p,force,fmin))
#time.sleep(30.5)
print("pre")
print(k0*np.exp(-DeltaG/(2.*K))*dt)
print("pun")
print(k0*np.exp(DeltaG/(2.*K))*dt)
'''
for i in range(1,2):
force=forces[i]
fmin=0.0
DeltaG=n_atp*Deltamu-Deltabp+2.*force*xdefWLC(K,l,p,force)-2.*stretching_energy(K,l,p,force,fmin)
Ateo=(n_atp*Deltamu/(2*xdefWLC(K,l,p,force))-Deltabp/(2*xdefWLC(K,l,p,force))+force-1.*stretching_energy(K,l,p,force,fmin)/xdefWLC(K,l,p,force))/K
print(Ateo)
for j in range (0,1):
#CHECK parameter dt is the same than for the files processed in hist
Dt=10*dt*2**j
#print(Dt)
dn=10*2**j
n=0
folder='/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/DATA/'
filein='%s%s_%dhist.txt' %(folder,force,dn)
#File_in Dx, p(Dx)
foldout='/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/'
#reading input file
data=np.loadtxt(filein)
data2=np.asarray(data)
dx=data2[:,0]
pdx=data2[:,1]
#computing the average <Dx>
dxav=0.
pdxav=0.
##Cal renormalitzar de nou les probabilitats (al descartar el 0 la integral ha canviat)
for k in range(0,len(dx)):
dxav+=dx[k]*pdx[k]
pdxav+=pdx[k]
dxav/=pdxav
pdx/=pdxav
print(force)
print(Dt)
print(dxav)
print("\n")
epsilon=0.08
delta=0.4
y=[]
x=[]
for k in range(0,len(dx)):
if (dx[k]<=0.) & (dx[k]<=-delta):
for l in range(0,len(dx)):
#comprovem si x[l]+x[k]<epsilon --> computem
if (dx[l]>=0.) & (dx[l]>=delta):
if np.absolute(dx[k]+dx[l]) <= epsilon:
x.append(dx[l]/dxav)
y.append((np.log(pdx[l]/pdx[k]))/dxav)
'''
print(x)
#print("\n")
#print(y)
print(len(x))
print("\n")
print(len(y))
#plt.plot(x,y, 'o')
plt.plot(x,y)
plt.show()
'''
#writing to an output file for each DT
fileout='%s%s_%dFT.txt' %(foldout,force,dn)
fout=open(fileout,"w")
#fout.write()
for k in range(0,len(x)):
fout.write('%.12f %.12f\n'%(x[k],y[k]))
fout.close()
|
flexible
|
{
"blob_id": "9817600759bc01e89f6c48bdc2d256651aedf74d",
"index": 1788,
"step-1": "<mask token>\n\n\ndef derf1(l, lp, kbt):\n return kbt / lp * (0.5 / ((1.0 - l) * (1.0 - l) * (1.0 - l)) + 1.0)\n\n\ndef xdefWLC(kbt, l, p, f):\n l0 = 0.9999\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n if abs(f) < 1e-05:\n return 0.0\n while abs(l0 - lnew) > 1e-05:\n l0 = lnew\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n xdef = l * lnew\n return xdef\n\n\ndef intfdexWLC(kbt, l, p, f):\n l0 = xdefWLC(kbt, l, p, f)\n return kbt * l / (4.0 * p) * (1.0 / (1.0 - l0 / l) + 2 * l0 * l0 / (l *\n l) - l0 / l)\n\n\ndef stretching_energy(K, l, p, f, fmin):\n total = intfdexWLC(K, l, p, f) - intfdexWLC(K, l, p, fmin)\n return total\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef f1(l, lp, kbt):\n return kbt / lp * (1.0 / (4 * (1.0 - l) * (1.0 - l)) - 0.25 + l)\n\n\ndef derf1(l, lp, kbt):\n return kbt / lp * (0.5 / ((1.0 - l) * (1.0 - l) * (1.0 - l)) + 1.0)\n\n\ndef xdefWLC(kbt, l, p, f):\n l0 = 0.9999\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n if abs(f) < 1e-05:\n return 0.0\n while abs(l0 - lnew) > 1e-05:\n l0 = lnew\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n xdef = l * lnew\n return xdef\n\n\ndef intfdexWLC(kbt, l, p, f):\n l0 = xdefWLC(kbt, l, p, f)\n return kbt * l / (4.0 * p) * (1.0 / (1.0 - l0 / l) + 2 * l0 * l0 / (l *\n l) - l0 / l)\n\n\ndef stretching_energy(K, l, p, f, fmin):\n total = intfdexWLC(K, l, p, f) - intfdexWLC(K, l, p, fmin)\n return total\n\n\n<mask token>\nfor i in range(1, 2):\n force = forces[i]\n fmin = 0.0\n DeltaG = n_atp * Deltamu - Deltabp + 2.0 * force * xdefWLC(K, l, p, force\n ) - 2.0 * stretching_energy(K, l, p, force, fmin)\n Ateo = (n_atp * Deltamu / (2 * xdefWLC(K, l, p, force)) - Deltabp / (2 *\n xdefWLC(K, l, p, force)) + force - 1.0 * stretching_energy(K, l, p,\n force, fmin) / xdefWLC(K, l, p, force)) / K\n print(Ateo)\n for j in range(0, 1):\n Dt = 10 * dt * 2 ** j\n dn = 10 * 2 ** j\n n = 0\n folder = (\n '/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/DATA/'\n )\n filein = '%s%s_%dhist.txt' % (folder, force, dn)\n foldout = (\n '/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/'\n )\n data = np.loadtxt(filein)\n data2 = np.asarray(data)\n dx = data2[:, 0]\n pdx = data2[:, 1]\n dxav = 0.0\n pdxav = 0.0\n for k in range(0, len(dx)):\n dxav += dx[k] * pdx[k]\n pdxav += pdx[k]\n dxav /= pdxav\n pdx /= pdxav\n print(force)\n print(Dt)\n print(dxav)\n print('\\n')\n epsilon = 0.08\n delta = 0.4\n y = []\n x = []\n for k in range(0, len(dx)):\n if (dx[k] <= 0.0) & (dx[k] <= -delta):\n for l in range(0, len(dx)):\n if (dx[l] >= 0.0) & (dx[l] >= delta):\n if np.absolute(dx[k] + dx[l]) <= epsilon:\n x.append(dx[l] / dxav)\n y.append(np.log(pdx[l] / pdx[k]) / dxav)\n \"\"\"\t\t\n\t\tprint(x)\n\t\t#print(\"\n\")\n\t\t#print(y)\n\t\tprint(len(x))\n\t\tprint(\"\n\")\n\t\tprint(len(y))\n\t\t#plt.plot(x,y, 'o')\n\t\tplt.plot(x,y)\n\t\tplt.show()\n\t\t\"\"\"\n fileout = '%s%s_%dFT.txt' % (foldout, force, dn)\n fout = open(fileout, 'w')\n for k in range(0, len(x)):\n fout.write('%.12f %.12f\\n' % (x[k], y[k]))\n fout.close()\n",
"step-3": "<mask token>\n\n\ndef f1(l, lp, kbt):\n return kbt / lp * (1.0 / (4 * (1.0 - l) * (1.0 - l)) - 0.25 + l)\n\n\ndef derf1(l, lp, kbt):\n return kbt / lp * (0.5 / ((1.0 - l) * (1.0 - l) * (1.0 - l)) + 1.0)\n\n\ndef xdefWLC(kbt, l, p, f):\n l0 = 0.9999\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n if abs(f) < 1e-05:\n return 0.0\n while abs(l0 - lnew) > 1e-05:\n l0 = lnew\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n xdef = l * lnew\n return xdef\n\n\ndef intfdexWLC(kbt, l, p, f):\n l0 = xdefWLC(kbt, l, p, f)\n return kbt * l / (4.0 * p) * (1.0 / (1.0 - l0 / l) + 2 * l0 * l0 / (l *\n l) - l0 / l)\n\n\ndef stretching_energy(K, l, p, f, fmin):\n total = intfdexWLC(K, l, p, f) - intfdexWLC(K, l, p, fmin)\n return total\n\n\nforces = [5, 6, 7, 8, 9, 10, 11, 12]\ndelimiter = ' '\np = 0.75\nK = 4.114\nl = 0.66\nDeltamu = 2.0 * K\nDeltabp = 2.0 * K\nn_atp = 1.0\nforce = forces[7]\nk0 = 1000000.0\ndt = 0.5 / (k0 * 5)\nfmin = 0.0\n<mask token>\nfor i in range(1, 2):\n force = forces[i]\n fmin = 0.0\n DeltaG = n_atp * Deltamu - Deltabp + 2.0 * force * xdefWLC(K, l, p, force\n ) - 2.0 * stretching_energy(K, l, p, force, fmin)\n Ateo = (n_atp * Deltamu / (2 * xdefWLC(K, l, p, force)) - Deltabp / (2 *\n xdefWLC(K, l, p, force)) + force - 1.0 * stretching_energy(K, l, p,\n force, fmin) / xdefWLC(K, l, p, force)) / K\n print(Ateo)\n for j in range(0, 1):\n Dt = 10 * dt * 2 ** j\n dn = 10 * 2 ** j\n n = 0\n folder = (\n '/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/DATA/'\n )\n filein = '%s%s_%dhist.txt' % (folder, force, dn)\n foldout = (\n '/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/'\n )\n data = np.loadtxt(filein)\n data2 = np.asarray(data)\n dx = data2[:, 0]\n pdx = data2[:, 1]\n dxav = 0.0\n pdxav = 0.0\n for k in range(0, len(dx)):\n dxav += dx[k] * pdx[k]\n pdxav += pdx[k]\n dxav /= pdxav\n pdx /= pdxav\n print(force)\n print(Dt)\n print(dxav)\n print('\\n')\n epsilon = 0.08\n delta = 0.4\n y = []\n x = []\n for k in range(0, len(dx)):\n if (dx[k] <= 0.0) & (dx[k] <= -delta):\n for l in range(0, len(dx)):\n if (dx[l] >= 0.0) & (dx[l] >= delta):\n if np.absolute(dx[k] + dx[l]) <= epsilon:\n x.append(dx[l] / dxav)\n y.append(np.log(pdx[l] / pdx[k]) / dxav)\n \"\"\"\t\t\n\t\tprint(x)\n\t\t#print(\"\n\")\n\t\t#print(y)\n\t\tprint(len(x))\n\t\tprint(\"\n\")\n\t\tprint(len(y))\n\t\t#plt.plot(x,y, 'o')\n\t\tplt.plot(x,y)\n\t\tplt.show()\n\t\t\"\"\"\n fileout = '%s%s_%dFT.txt' % (foldout, force, dn)\n fout = open(fileout, 'w')\n for k in range(0, len(x)):\n fout.write('%.12f %.12f\\n' % (x[k], y[k]))\n fout.close()\n",
"step-4": "import glob\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport time\nimport seaborn as sns\nimport os\nfrom pathlib import Path\n\n\ndef f1(l, lp, kbt):\n return kbt / lp * (1.0 / (4 * (1.0 - l) * (1.0 - l)) - 0.25 + l)\n\n\ndef derf1(l, lp, kbt):\n return kbt / lp * (0.5 / ((1.0 - l) * (1.0 - l) * (1.0 - l)) + 1.0)\n\n\ndef xdefWLC(kbt, l, p, f):\n l0 = 0.9999\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n if abs(f) < 1e-05:\n return 0.0\n while abs(l0 - lnew) > 1e-05:\n l0 = lnew\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n xdef = l * lnew\n return xdef\n\n\ndef intfdexWLC(kbt, l, p, f):\n l0 = xdefWLC(kbt, l, p, f)\n return kbt * l / (4.0 * p) * (1.0 / (1.0 - l0 / l) + 2 * l0 * l0 / (l *\n l) - l0 / l)\n\n\ndef stretching_energy(K, l, p, f, fmin):\n total = intfdexWLC(K, l, p, f) - intfdexWLC(K, l, p, fmin)\n return total\n\n\nforces = [5, 6, 7, 8, 9, 10, 11, 12]\ndelimiter = ' '\np = 0.75\nK = 4.114\nl = 0.66\nDeltamu = 2.0 * K\nDeltabp = 2.0 * K\nn_atp = 1.0\nforce = forces[7]\nk0 = 1000000.0\ndt = 0.5 / (k0 * 5)\nfmin = 0.0\n<mask token>\nfor i in range(1, 2):\n force = forces[i]\n fmin = 0.0\n DeltaG = n_atp * Deltamu - Deltabp + 2.0 * force * xdefWLC(K, l, p, force\n ) - 2.0 * stretching_energy(K, l, p, force, fmin)\n Ateo = (n_atp * Deltamu / (2 * xdefWLC(K, l, p, force)) - Deltabp / (2 *\n xdefWLC(K, l, p, force)) + force - 1.0 * stretching_energy(K, l, p,\n force, fmin) / xdefWLC(K, l, p, force)) / K\n print(Ateo)\n for j in range(0, 1):\n Dt = 10 * dt * 2 ** j\n dn = 10 * 2 ** j\n n = 0\n folder = (\n '/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/DATA/'\n )\n filein = '%s%s_%dhist.txt' % (folder, force, dn)\n foldout = (\n '/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/'\n )\n data = np.loadtxt(filein)\n data2 = np.asarray(data)\n dx = data2[:, 0]\n pdx = data2[:, 1]\n dxav = 0.0\n pdxav = 0.0\n for k in range(0, len(dx)):\n dxav += dx[k] * pdx[k]\n pdxav += pdx[k]\n dxav /= pdxav\n pdx /= pdxav\n print(force)\n print(Dt)\n print(dxav)\n print('\\n')\n epsilon = 0.08\n delta = 0.4\n y = []\n x = []\n for k in range(0, len(dx)):\n if (dx[k] <= 0.0) & (dx[k] <= -delta):\n for l in range(0, len(dx)):\n if (dx[l] >= 0.0) & (dx[l] >= delta):\n if np.absolute(dx[k] + dx[l]) <= epsilon:\n x.append(dx[l] / dxav)\n y.append(np.log(pdx[l] / pdx[k]) / dxav)\n \"\"\"\t\t\n\t\tprint(x)\n\t\t#print(\"\n\")\n\t\t#print(y)\n\t\tprint(len(x))\n\t\tprint(\"\n\")\n\t\tprint(len(y))\n\t\t#plt.plot(x,y, 'o')\n\t\tplt.plot(x,y)\n\t\tplt.show()\n\t\t\"\"\"\n fileout = '%s%s_%dFT.txt' % (foldout, force, dn)\n fout = open(fileout, 'w')\n for k in range(0, len(x)):\n fout.write('%.12f %.12f\\n' % (x[k], y[k]))\n fout.close()\n",
"step-5": "#!/usr/bin/python\n\nimport glob\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport time\nimport seaborn as sns\nimport os\nfrom pathlib import Path\n#import math\n#functions related to elasticity of WLC\ndef f1(l,lp, kbt):\n return kbt/lp*(1./(4*(1.-l)*(1.-l))-.25+l)\n\ndef derf1(l,lp,kbt):\n return kbt/lp*(.5/((1.-l)*(1.-l)*(1.-l))+1.)\n\ndef xdefWLC(kbt, l, p, f):\n l0=.9999\n lnew=l0-(f1(l0,p,kbt)-f)/derf1(l0,p,kbt)\n\n if abs(f)<1.e-5: return 0.0\n while abs(l0-lnew)>1.e-5:\n l0=lnew\n lnew=l0-(f1(l0,p,kbt)-f)/derf1(l0,p,kbt)\n \n xdef=l*lnew \n return xdef\n\ndef intfdexWLC (kbt, l, p, f):\n l0=xdefWLC(kbt,l,p,f)\n return kbt*l/(4.*p)*(1./(1.-l0/l)+2*l0*l0/(l*l)-l0/l)\n\ndef stretching_energy(K,l,p,f,fmin):\n total= intfdexWLC(K,l,p,f)-intfdexWLC(K,l,p,fmin)\n return total\n\nforces = [ 5 ,6 ,7 , 8, 9, 10, 11, 12]\ndelimiter = \" \"\n\n\np = 0.75 \nK = 4.114\nl = 0.66\n \n#value of mu(pN/nm), N, etc\nDeltamu=2.0*K #chemical potential of 1 ATP 3*K\nDeltabp=2.0*K #free-energy of 1 bp opening\nn_atp=1. #Number of bp opened by 1-step of the motor\n\nforce = forces[7]\n\n#frequency bps\nk0=1000000.\n#montecarlo step = 1e-7\ndt=0.5/(k0*5)\nfmin=0.0\n\n'''\nprint (DeltaG)\nprint(n_atp*Deltamu)\nprint(Deltabp)\nprint(2.*force*xdefWLC(K,l,p,force))\nprint(2.*stretching_energy(K,l,p,force,fmin))\n#time.sleep(30.5)\nprint(\"pre\")\nprint(k0*np.exp(-DeltaG/(2.*K))*dt)\nprint(\"pun\")\nprint(k0*np.exp(DeltaG/(2.*K))*dt)\n'''\n\n\n\nfor i in range(1,2):\n\tforce=forces[i]\n\tfmin=0.0\n\tDeltaG=n_atp*Deltamu-Deltabp+2.*force*xdefWLC(K,l,p,force)-2.*stretching_energy(K,l,p,force,fmin)\n\tAteo=(n_atp*Deltamu/(2*xdefWLC(K,l,p,force))-Deltabp/(2*xdefWLC(K,l,p,force))+force-1.*stretching_energy(K,l,p,force,fmin)/xdefWLC(K,l,p,force))/K\n\tprint(Ateo)\n\tfor j in range (0,1):\n\t\t#CHECK parameter dt is the same than for the files processed in hist\n\t\tDt=10*dt*2**j\n\t\t#print(Dt)\n\t\tdn=10*2**j\n\t\tn=0\n\t\tfolder='/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/DATA/'\n\t\tfilein='%s%s_%dhist.txt' %(folder,force,dn)\n\t\t#File_in Dx, p(Dx)\n\t\tfoldout='/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/'\n\t\t#reading input file\n\t\tdata=np.loadtxt(filein)\n\t\tdata2=np.asarray(data)\n\t\tdx=data2[:,0]\n\t\tpdx=data2[:,1]\n\t\t\n\t\t#computing the average <Dx>\n\t\tdxav=0.\n\t\tpdxav=0.\n\t\t##Cal renormalitzar de nou les probabilitats (al descartar el 0 la integral ha canviat)\n\t\tfor k in range(0,len(dx)):\n\t\t\tdxav+=dx[k]*pdx[k]\n\t\t\tpdxav+=pdx[k]\n\t\tdxav/=pdxav\n\t\tpdx/=pdxav\n\t\t\n\t\tprint(force)\n\t\tprint(Dt)\n\t\tprint(dxav)\n\t\tprint(\"\\n\")\n\t\t\n\t\tepsilon=0.08\n\t\tdelta=0.4\n\t\ty=[]\n\t\tx=[]\n\t\tfor k in range(0,len(dx)):\n\t\t\tif (dx[k]<=0.) & (dx[k]<=-delta):\n\t\t\t\tfor l in range(0,len(dx)):\n\t\t\t\t#comprovem si x[l]+x[k]<epsilon --> computem \n\t\t\t\t\tif (dx[l]>=0.) & (dx[l]>=delta):\n\t\t\t\t\t\tif np.absolute(dx[k]+dx[l]) <= epsilon:\n\t\t\t\t\t\t\tx.append(dx[l]/dxav)\n\t\t\t\t\t\t\ty.append((np.log(pdx[l]/pdx[k]))/dxav)\n\t\t'''\t\t\n\t\tprint(x)\n\t\t#print(\"\\n\")\n\t\t#print(y)\n\t\tprint(len(x))\n\t\tprint(\"\\n\")\n\t\tprint(len(y))\n\t\t#plt.plot(x,y, 'o')\n\t\tplt.plot(x,y)\n\t\tplt.show()\n\t\t'''\n\t\t#writing to an output file for each DT\t\t\n\t\tfileout='%s%s_%dFT.txt' %(foldout,force,dn)\n\t\tfout=open(fileout,\"w\")\n\t\t#fout.write()\n\t\tfor k in range(0,len(x)):\n\t\t\tfout.write('%.12f %.12f\\n'%(x[k],y[k]))\t\t\n\t\tfout.close()\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import matplotlib.image as mpimg
import cv2
import rasterio
from ode_data_access.image_utils import view_as_blocks, is_black, align_and_crop
import os
import numpy as np
from tqdm import tqdm
class ChunkProcessor:
def write_result_blocks(self, result_blocks, window, product_name, chunk_size, save_dir='test', skip_black_images=False,
align_and_crop_thresholds=None, vectorized_chunks=None):
for i in range(result_blocks.shape[0]):
for j in range(result_blocks.shape[1]):
img = result_blocks[i][j]
if not skip_black_images or not is_black(img):
filename = f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{window.width}_h_{window.height}_x_{i}_y_{j}.jpg'
filepath = './' + save_dir + '/' + filename
mpimg.imsave(filepath, img, cmap="gray")
img = mpimg.imread(filepath)
if align_and_crop_thresholds is not None:
img = align_and_crop(img, *align_and_crop_thresholds)
img = cv2.resize(img, (chunk_size, chunk_size), cv2.INTER_AREA)
mpimg.imsave(filepath, img, cmap='gray')
new_filename = f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{img.shape[1]}_h_{img.shape[0]}_x_{i}_y_{j}.jpg'
new_filepath = './' + save_dir + '/' + new_filename
os.rename(filepath, new_filepath)
if vectorized_chunks is not None:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
vectorized_chunks.append(img.astype(np.uint8))
# Based on the idea provided here - https://gis.stackexchange.com/questions/158527/reading-raster-files-by-block-with-rasterio
def chunkify(self, img_file, product_name, chunk_size=256, save_dir='test', skip_black_images=True, align_and_crop_thresholds=None,
vectorized_chunks=None):
with rasterio.open(img_file) as src:
print('Resolution =', src.width, 'x', src.height)
print('Estimated number of iterations =', ((src.width * src.height) / (1024 * 1024)) * 1.085)
for block_index, window in tqdm(src.block_windows(1)):
block_array = src.read(window=window)
# print('Block array', block_array.shape)
block_array = np.moveaxis(block_array, 0, -1)
# print('Move axis', block_array.shape)
if block_array.shape[2] != 1:
block_array = cv2.cvtColor(block_array, cv2.COLOR_RGB2GRAY)
else:
block_array = np.squeeze(block_array)
block_array_shape = block_array.shape
# plt.imshow(block_array, cmap='gray')
# print('Grayscale Block Shape', block_array_shape)
if block_array_shape[0] % chunk_size == 0 and block_array_shape[1] % chunk_size == 0:
result_blocks = view_as_blocks(block_array, block_shape=(chunk_size, chunk_size))
self.write_result_blocks(result_blocks, window, product_name, chunk_size, save_dir, skip_black_images,
align_and_crop_thresholds, vectorized_chunks)
def chunkify_all(self, save_dir_prefix, chunk_size, product_image_urls, skip_black_images=True, align_and_crop_thresholds=None,
vectorized_chunks=None):
for product_image_url, product_name in product_image_urls:
filename = product_image_url.split('/')[-1]
if filename.endswith('JP2') or filename.lower().endswith('jpg'):
print('Chunkifying', product_name)
jp2_filename = filename
chunk_dir = save_dir_prefix + '_' + product_name
if not os.path.exists(chunk_dir):
os.makedirs(chunk_dir)
self.chunkify(jp2_filename, product_name, chunk_size, chunk_dir, skip_black_images, align_and_crop_thresholds,
vectorized_chunks)
print("Number of chunks found:",
len([name for name in os.listdir(chunk_dir) if os.path.isfile(chunk_dir + '/' + name)]))
print('-----')
|
normal
|
{
"blob_id": "303e1b95c2ca60041a34b8c09e013849112a108d",
"index": 3475,
"step-1": "<mask token>\n\n\nclass ChunkProcessor:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ChunkProcessor:\n <mask token>\n\n def chunkify(self, img_file, product_name, chunk_size=256, save_dir=\n 'test', skip_black_images=True, align_and_crop_thresholds=None,\n vectorized_chunks=None):\n with rasterio.open(img_file) as src:\n print('Resolution =', src.width, 'x', src.height)\n print('Estimated number of iterations =', src.width * src.\n height / (1024 * 1024) * 1.085)\n for block_index, window in tqdm(src.block_windows(1)):\n block_array = src.read(window=window)\n block_array = np.moveaxis(block_array, 0, -1)\n if block_array.shape[2] != 1:\n block_array = cv2.cvtColor(block_array, cv2.COLOR_RGB2GRAY)\n else:\n block_array = np.squeeze(block_array)\n block_array_shape = block_array.shape\n if block_array_shape[0\n ] % chunk_size == 0 and block_array_shape[1\n ] % chunk_size == 0:\n result_blocks = view_as_blocks(block_array, block_shape\n =(chunk_size, chunk_size))\n self.write_result_blocks(result_blocks, window,\n product_name, chunk_size, save_dir,\n skip_black_images, align_and_crop_thresholds,\n vectorized_chunks)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ChunkProcessor:\n\n def write_result_blocks(self, result_blocks, window, product_name,\n chunk_size, save_dir='test', skip_black_images=False,\n align_and_crop_thresholds=None, vectorized_chunks=None):\n for i in range(result_blocks.shape[0]):\n for j in range(result_blocks.shape[1]):\n img = result_blocks[i][j]\n if not skip_black_images or not is_black(img):\n filename = (\n f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{window.width}_h_{window.height}_x_{i}_y_{j}.jpg'\n )\n filepath = './' + save_dir + '/' + filename\n mpimg.imsave(filepath, img, cmap='gray')\n img = mpimg.imread(filepath)\n if align_and_crop_thresholds is not None:\n img = align_and_crop(img, *align_and_crop_thresholds)\n img = cv2.resize(img, (chunk_size, chunk_size), cv2\n .INTER_AREA)\n mpimg.imsave(filepath, img, cmap='gray')\n new_filename = (\n f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{img.shape[1]}_h_{img.shape[0]}_x_{i}_y_{j}.jpg'\n )\n new_filepath = './' + save_dir + '/' + new_filename\n os.rename(filepath, new_filepath)\n if vectorized_chunks is not None:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n vectorized_chunks.append(img.astype(np.uint8))\n\n def chunkify(self, img_file, product_name, chunk_size=256, save_dir=\n 'test', skip_black_images=True, align_and_crop_thresholds=None,\n vectorized_chunks=None):\n with rasterio.open(img_file) as src:\n print('Resolution =', src.width, 'x', src.height)\n print('Estimated number of iterations =', src.width * src.\n height / (1024 * 1024) * 1.085)\n for block_index, window in tqdm(src.block_windows(1)):\n block_array = src.read(window=window)\n block_array = np.moveaxis(block_array, 0, -1)\n if block_array.shape[2] != 1:\n block_array = cv2.cvtColor(block_array, cv2.COLOR_RGB2GRAY)\n else:\n block_array = np.squeeze(block_array)\n block_array_shape = block_array.shape\n if block_array_shape[0\n ] % chunk_size == 0 and block_array_shape[1\n ] % chunk_size == 0:\n result_blocks = view_as_blocks(block_array, block_shape\n =(chunk_size, chunk_size))\n self.write_result_blocks(result_blocks, window,\n product_name, chunk_size, save_dir,\n skip_black_images, align_and_crop_thresholds,\n vectorized_chunks)\n <mask token>\n",
"step-4": "import matplotlib.image as mpimg\nimport cv2\nimport rasterio\nfrom ode_data_access.image_utils import view_as_blocks, is_black, align_and_crop\nimport os\nimport numpy as np\nfrom tqdm import tqdm\n\n\nclass ChunkProcessor:\n\n def write_result_blocks(self, result_blocks, window, product_name,\n chunk_size, save_dir='test', skip_black_images=False,\n align_and_crop_thresholds=None, vectorized_chunks=None):\n for i in range(result_blocks.shape[0]):\n for j in range(result_blocks.shape[1]):\n img = result_blocks[i][j]\n if not skip_black_images or not is_black(img):\n filename = (\n f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{window.width}_h_{window.height}_x_{i}_y_{j}.jpg'\n )\n filepath = './' + save_dir + '/' + filename\n mpimg.imsave(filepath, img, cmap='gray')\n img = mpimg.imread(filepath)\n if align_and_crop_thresholds is not None:\n img = align_and_crop(img, *align_and_crop_thresholds)\n img = cv2.resize(img, (chunk_size, chunk_size), cv2\n .INTER_AREA)\n mpimg.imsave(filepath, img, cmap='gray')\n new_filename = (\n f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{img.shape[1]}_h_{img.shape[0]}_x_{i}_y_{j}.jpg'\n )\n new_filepath = './' + save_dir + '/' + new_filename\n os.rename(filepath, new_filepath)\n if vectorized_chunks is not None:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n vectorized_chunks.append(img.astype(np.uint8))\n\n def chunkify(self, img_file, product_name, chunk_size=256, save_dir=\n 'test', skip_black_images=True, align_and_crop_thresholds=None,\n vectorized_chunks=None):\n with rasterio.open(img_file) as src:\n print('Resolution =', src.width, 'x', src.height)\n print('Estimated number of iterations =', src.width * src.\n height / (1024 * 1024) * 1.085)\n for block_index, window in tqdm(src.block_windows(1)):\n block_array = src.read(window=window)\n block_array = np.moveaxis(block_array, 0, -1)\n if block_array.shape[2] != 1:\n block_array = cv2.cvtColor(block_array, cv2.COLOR_RGB2GRAY)\n else:\n block_array = np.squeeze(block_array)\n block_array_shape = block_array.shape\n if block_array_shape[0\n ] % chunk_size == 0 and block_array_shape[1\n ] % chunk_size == 0:\n result_blocks = view_as_blocks(block_array, block_shape\n =(chunk_size, chunk_size))\n self.write_result_blocks(result_blocks, window,\n product_name, chunk_size, save_dir,\n skip_black_images, align_and_crop_thresholds,\n vectorized_chunks)\n\n def chunkify_all(self, save_dir_prefix, chunk_size, product_image_urls,\n skip_black_images=True, align_and_crop_thresholds=None,\n vectorized_chunks=None):\n for product_image_url, product_name in product_image_urls:\n filename = product_image_url.split('/')[-1]\n if filename.endswith('JP2') or filename.lower().endswith('jpg'):\n print('Chunkifying', product_name)\n jp2_filename = filename\n chunk_dir = save_dir_prefix + '_' + product_name\n if not os.path.exists(chunk_dir):\n os.makedirs(chunk_dir)\n self.chunkify(jp2_filename, product_name, chunk_size,\n chunk_dir, skip_black_images, align_and_crop_thresholds,\n vectorized_chunks)\n print('Number of chunks found:', len([name for name in os.\n listdir(chunk_dir) if os.path.isfile(chunk_dir + '/' +\n name)]))\n print('-----')\n",
"step-5": "import matplotlib.image as mpimg\r\nimport cv2\r\nimport rasterio\r\nfrom ode_data_access.image_utils import view_as_blocks, is_black, align_and_crop\r\nimport os\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\n\r\n\r\nclass ChunkProcessor:\r\n\r\n def write_result_blocks(self, result_blocks, window, product_name, chunk_size, save_dir='test', skip_black_images=False,\r\n align_and_crop_thresholds=None, vectorized_chunks=None):\r\n for i in range(result_blocks.shape[0]):\r\n for j in range(result_blocks.shape[1]):\r\n img = result_blocks[i][j]\r\n if not skip_black_images or not is_black(img):\r\n filename = f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{window.width}_h_{window.height}_x_{i}_y_{j}.jpg'\r\n filepath = './' + save_dir + '/' + filename\r\n mpimg.imsave(filepath, img, cmap=\"gray\")\r\n img = mpimg.imread(filepath)\r\n\r\n if align_and_crop_thresholds is not None:\r\n img = align_and_crop(img, *align_and_crop_thresholds)\r\n img = cv2.resize(img, (chunk_size, chunk_size), cv2.INTER_AREA)\r\n mpimg.imsave(filepath, img, cmap='gray')\r\n new_filename = f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{img.shape[1]}_h_{img.shape[0]}_x_{i}_y_{j}.jpg'\r\n new_filepath = './' + save_dir + '/' + new_filename\r\n os.rename(filepath, new_filepath)\r\n\r\n if vectorized_chunks is not None:\r\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\r\n vectorized_chunks.append(img.astype(np.uint8))\r\n\r\n\r\n # Based on the idea provided here - https://gis.stackexchange.com/questions/158527/reading-raster-files-by-block-with-rasterio\r\n def chunkify(self, img_file, product_name, chunk_size=256, save_dir='test', skip_black_images=True, align_and_crop_thresholds=None,\r\n vectorized_chunks=None):\r\n with rasterio.open(img_file) as src:\r\n print('Resolution =', src.width, 'x', src.height)\r\n print('Estimated number of iterations =', ((src.width * src.height) / (1024 * 1024)) * 1.085)\r\n\r\n for block_index, window in tqdm(src.block_windows(1)):\r\n block_array = src.read(window=window)\r\n # print('Block array', block_array.shape)\r\n\r\n block_array = np.moveaxis(block_array, 0, -1)\r\n # print('Move axis', block_array.shape)\r\n\r\n if block_array.shape[2] != 1:\r\n block_array = cv2.cvtColor(block_array, cv2.COLOR_RGB2GRAY)\r\n else:\r\n block_array = np.squeeze(block_array)\r\n block_array_shape = block_array.shape\r\n\r\n # plt.imshow(block_array, cmap='gray')\r\n # print('Grayscale Block Shape', block_array_shape)\r\n\r\n if block_array_shape[0] % chunk_size == 0 and block_array_shape[1] % chunk_size == 0:\r\n result_blocks = view_as_blocks(block_array, block_shape=(chunk_size, chunk_size))\r\n self.write_result_blocks(result_blocks, window, product_name, chunk_size, save_dir, skip_black_images,\r\n align_and_crop_thresholds, vectorized_chunks)\r\n\r\n\r\n def chunkify_all(self, save_dir_prefix, chunk_size, product_image_urls, skip_black_images=True, align_and_crop_thresholds=None,\r\n vectorized_chunks=None):\r\n\r\n for product_image_url, product_name in product_image_urls:\r\n filename = product_image_url.split('/')[-1]\r\n if filename.endswith('JP2') or filename.lower().endswith('jpg'):\r\n print('Chunkifying', product_name)\r\n jp2_filename = filename\r\n chunk_dir = save_dir_prefix + '_' + product_name\r\n\r\n if not os.path.exists(chunk_dir):\r\n os.makedirs(chunk_dir)\r\n\r\n self.chunkify(jp2_filename, product_name, chunk_size, chunk_dir, skip_black_images, align_and_crop_thresholds,\r\n vectorized_chunks)\r\n\r\n print(\"Number of chunks found:\",\r\n len([name for name in os.listdir(chunk_dir) if os.path.isfile(chunk_dir + '/' + name)]))\r\n print('-----')",
"step-ids": [
1,
2,
3,
5,
6
]
}
|
[
1,
2,
3,
5,
6
] |
import tensorflow as tf
def Float32():
return tf.float32
def Float16():
return tf.float16
|
normal
|
{
"blob_id": "c60b8eec57d845c73ee3e00432747d23748c1706",
"index": 9537,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Float32():\n return tf.float32\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef Float32():\n return tf.float32\n\n\ndef Float16():\n return tf.float16\n",
"step-4": "import tensorflow as tf\n\n\ndef Float32():\n return tf.float32\n\n\ndef Float16():\n return tf.float16\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = [('stores', '0001_initial')]
operations = [migrations.CreateModel(name='Assistants', fields=[('id',
models.UUIDField(default=uuid.uuid4, editable=False, primary_key=
True, serialize=False)), ('name_assistants', models.CharField(
max_length=255)), ('phone_assistants', models.IntegerField()), (
'email_assistants', models.EmailField(max_length=254)), (
'address_assistants', models.TextField()), ('timestamp', models.
DateField(auto_now=True)), ('fkstore', models.ForeignKey(on_delete=
django.db.models.deletion.CASCADE, related_name='assistants', to=
'stores.Store'))])]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [('stores', '0001_initial')]
operations = [migrations.CreateModel(name='Assistants', fields=[('id',
models.UUIDField(default=uuid.uuid4, editable=False, primary_key=
True, serialize=False)), ('name_assistants', models.CharField(
max_length=255)), ('phone_assistants', models.IntegerField()), (
'email_assistants', models.EmailField(max_length=254)), (
'address_assistants', models.TextField()), ('timestamp', models.
DateField(auto_now=True)), ('fkstore', models.ForeignKey(on_delete=
django.db.models.deletion.CASCADE, related_name='assistants', to=
'stores.Store'))])]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-13 02:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('stores', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Assistants',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name_assistants', models.CharField(max_length=255)),
('phone_assistants', models.IntegerField()),
('email_assistants', models.EmailField(max_length=254)),
('address_assistants', models.TextField()),
('timestamp', models.DateField(auto_now=True)),
('fkstore', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assistants', to='stores.Store')),
],
),
]
|
flexible
|
{
"blob_id": "e95de58828c63dc8ae24efff314665a308f6ce0c",
"index": 983,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('stores', '0001_initial')]\n operations = [migrations.CreateModel(name='Assistants', fields=[('id',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False)), ('name_assistants', models.CharField(\n max_length=255)), ('phone_assistants', models.IntegerField()), (\n 'email_assistants', models.EmailField(max_length=254)), (\n 'address_assistants', models.TextField()), ('timestamp', models.\n DateField(auto_now=True)), ('fkstore', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='assistants', to=\n 'stores.Store'))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('stores', '0001_initial')]\n operations = [migrations.CreateModel(name='Assistants', fields=[('id',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False)), ('name_assistants', models.CharField(\n max_length=255)), ('phone_assistants', models.IntegerField()), (\n 'email_assistants', models.EmailField(max_length=254)), (\n 'address_assistants', models.TextField()), ('timestamp', models.\n DateField(auto_now=True)), ('fkstore', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='assistants', to=\n 'stores.Store'))])]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.7 on 2017-12-13 02:06\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('stores', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Assistants',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('name_assistants', models.CharField(max_length=255)),\n ('phone_assistants', models.IntegerField()),\n ('email_assistants', models.EmailField(max_length=254)),\n ('address_assistants', models.TextField()),\n ('timestamp', models.DateField(auto_now=True)),\n ('fkstore', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assistants', to='stores.Store')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(
'Um funcioario que ganhava R$ {:.2f} com o aumento de 15% passa a ganhar R$ {:.2f}'
.format(salario, novo))
<|reserved_special_token_1|>
salario = float(input('Qual o valor do seu Salario atual? R$ '))
novo = salario + salario * 15 / 100
print(
'Um funcioario que ganhava R$ {:.2f} com o aumento de 15% passa a ganhar R$ {:.2f}'
.format(salario, novo))
<|reserved_special_token_1|>
salario = float(input('Qual o valor do seu Salario atual? R$ '))
novo = salario + (salario * 15 / 100)
print('Um funcioario que ganhava R$ {:.2f} com o aumento de 15% passa a ganhar R$ {:.2f}'.format(salario, novo))
|
flexible
|
{
"blob_id": "ffcd3c0086ff73eb722d867b335df23382615d20",
"index": 1657,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n 'Um funcioario que ganhava R$ {:.2f} com o aumento de 15% passa a ganhar R$ {:.2f}'\n .format(salario, novo))\n",
"step-3": "salario = float(input('Qual o valor do seu Salario atual? R$ '))\nnovo = salario + salario * 15 / 100\nprint(\n 'Um funcioario que ganhava R$ {:.2f} com o aumento de 15% passa a ganhar R$ {:.2f}'\n .format(salario, novo))\n",
"step-4": "salario = float(input('Qual o valor do seu Salario atual? R$ '))\nnovo = salario + (salario * 15 / 100)\nprint('Um funcioario que ganhava R$ {:.2f} com o aumento de 15% passa a ganhar R$ {:.2f}'.format(salario, novo))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
If you are using MultiScript Editor make sure to set PYTHONPATH to Winexs' editor.
You can use set PYTHONPATH=c:/users/username/myscripts
Set paths according to your project!
"""
CHROME_WEBDRIVER = 'c:/users/username/project/chromedriver.exe'
WEBSITE_PDF_CONVERTER = 'https://www.ilovepdf.com/merge_pdf'
PDF_FILES = 'c:/users/username/project'
|
normal
|
{
"blob_id": "0fdbdfe98496ebedb112c85b79836292ffa3a5a9",
"index": 9076,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nCHROME_WEBDRIVER = 'c:/users/username/project/chromedriver.exe'\nWEBSITE_PDF_CONVERTER = 'https://www.ilovepdf.com/merge_pdf'\nPDF_FILES = 'c:/users/username/project'\n",
"step-3": "\"\"\"\nIf you are using MultiScript Editor make sure to set PYTHONPATH to Winexs' editor.\nYou can use set PYTHONPATH=c:/users/username/myscripts\n\nSet paths according to your project!\n\"\"\"\n\nCHROME_WEBDRIVER = 'c:/users/username/project/chromedriver.exe'\nWEBSITE_PDF_CONVERTER = 'https://www.ilovepdf.com/merge_pdf'\nPDF_FILES = 'c:/users/username/project'",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Fetch screen scores with customizable search criteria
that can be tailored to match your own requirements
in tab format
"""
import requests
from core import config as cfg
screen_id = 178
request_url = cfg.BASE_URL + "/screen/" + str(screen_id)
# These parameters can be modified to match any search criteria following
# the rules outlined in the Wiki: https://wiki.thebiogrid.org/doku.php/orcs:webservice
# In this instance, we've chosen to return results in "tab" format with a header, and
# to limit scores in the SCORE.1 column to the range of 0.9 -> 0.98
params = {
"accesskey": cfg.ACCESS_KEY,
"format": "tab",
"header": "yes",
"score1min": 0.9,
"score1max": 0.98
}
r = requests.get( request_url, params = params )
screen = r.text.splitlines( )
row_count = 0
data = {}
for row in screen :
# Skip the header, but you could have also simply turned
# it off with header: "no" as a parameter instead
if row_count == 0 :
row_count = row_count + 1
continue
# Tab files are tab delimited
row = row.split( "\t" )
# create a hash of results by gene identifier
data[row[1]] = row
# Print out data about the genes BRIX1, ASB4, and NOB1
print( data['55299'] )
print( data['51666'] )
print( data['28987'] )
"""
Output as of version 1.0.1:
['178', '55299', 'gene', 'BRIX1', 'BRIX|BXDC2|FLJ11100', '9606', 'Homo sapiens', '0.94239', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']
['178', '51666', 'gene', 'ASB4', 'ASB-4', '9606', 'Homo sapiens', '0.97613', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']
['178', '28987', 'gene', 'NOB1', 'ART-4|MST158|MSTP158|NOB1P|PSMD8BP1', '9606', 'Homo sapiens', '0.96316', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']
"""
|
normal
|
{
"blob_id": "80c6dd1c76b3ac56f34e36f571e8db3927994311",
"index": 8162,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor row in screen:\n if row_count == 0:\n row_count = row_count + 1\n continue\n row = row.split('\\t')\n data[row[1]] = row\nprint(data['55299'])\nprint(data['51666'])\nprint(data['28987'])\n<mask token>\n",
"step-3": "<mask token>\nscreen_id = 178\nrequest_url = cfg.BASE_URL + '/screen/' + str(screen_id)\nparams = {'accesskey': cfg.ACCESS_KEY, 'format': 'tab', 'header': 'yes',\n 'score1min': 0.9, 'score1max': 0.98}\nr = requests.get(request_url, params=params)\nscreen = r.text.splitlines()\nrow_count = 0\ndata = {}\nfor row in screen:\n if row_count == 0:\n row_count = row_count + 1\n continue\n row = row.split('\\t')\n data[row[1]] = row\nprint(data['55299'])\nprint(data['51666'])\nprint(data['28987'])\n<mask token>\n",
"step-4": "<mask token>\nimport requests\nfrom core import config as cfg\nscreen_id = 178\nrequest_url = cfg.BASE_URL + '/screen/' + str(screen_id)\nparams = {'accesskey': cfg.ACCESS_KEY, 'format': 'tab', 'header': 'yes',\n 'score1min': 0.9, 'score1max': 0.98}\nr = requests.get(request_url, params=params)\nscreen = r.text.splitlines()\nrow_count = 0\ndata = {}\nfor row in screen:\n if row_count == 0:\n row_count = row_count + 1\n continue\n row = row.split('\\t')\n data[row[1]] = row\nprint(data['55299'])\nprint(data['51666'])\nprint(data['28987'])\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nFetch screen scores with customizable search criteria\nthat can be tailored to match your own requirements\nin tab format\n\"\"\"\n\nimport requests\nfrom core import config as cfg\n\nscreen_id = 178\nrequest_url = cfg.BASE_URL + \"/screen/\" + str(screen_id)\n\n# These parameters can be modified to match any search criteria following\n# the rules outlined in the Wiki: https://wiki.thebiogrid.org/doku.php/orcs:webservice\n# In this instance, we've chosen to return results in \"tab\" format with a header, and \n# to limit scores in the SCORE.1 column to the range of 0.9 -> 0.98\nparams = {\n \"accesskey\": cfg.ACCESS_KEY,\n \"format\": \"tab\",\n \"header\": \"yes\",\n \"score1min\": 0.9,\n \"score1max\": 0.98\n}\n\nr = requests.get( request_url, params = params )\nscreen = r.text.splitlines( )\n\nrow_count = 0\ndata = {}\nfor row in screen :\n\n # Skip the header, but you could have also simply turned\n # it off with header: \"no\" as a parameter instead\n if row_count == 0 :\n row_count = row_count + 1\n continue\n\n # Tab files are tab delimited\n row = row.split( \"\\t\" )\n \n # create a hash of results by gene identifier\n data[row[1]] = row\n\n# Print out data about the genes BRIX1, ASB4, and NOB1\nprint( data['55299'] )\nprint( data['51666'] )\nprint( data['28987'] )\n\n\"\"\" \nOutput as of version 1.0.1:\n['178', '55299', 'gene', 'BRIX1', 'BRIX|BXDC2|FLJ11100', '9606', 'Homo sapiens', '0.94239', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']\n['178', '51666', 'gene', 'ASB4', 'ASB-4', '9606', 'Homo sapiens', '0.97613', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']\n['178', '28987', 'gene', 'NOB1', 'ART-4|MST158|MSTP158|NOB1P|PSMD8BP1', '9606', 'Homo sapiens', '0.96316', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']\n\"\"\"",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pytest
from feast.pyspark.launchers.gcloud import DataprocClusterLauncher
@pytest.fixture
def dataproc_launcher(pytestconfig) -> DataprocClusterLauncher:
cluster_name = pytestconfig.getoption("--dataproc-cluster-name")
region = pytestconfig.getoption("--dataproc-region")
project_id = pytestconfig.getoption("--dataproc-project")
staging_location = pytestconfig.getoption("--dataproc-staging-location")
return DataprocClusterLauncher(
cluster_name=cluster_name,
staging_location=staging_location,
region=region,
project_id=project_id,
)
|
normal
|
{
"blob_id": "ff13ac0ee401471fe5446e8149f019d9da7f3ddf",
"index": 5147,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.fixture\ndef dataproc_launcher(pytestconfig) ->DataprocClusterLauncher:\n cluster_name = pytestconfig.getoption('--dataproc-cluster-name')\n region = pytestconfig.getoption('--dataproc-region')\n project_id = pytestconfig.getoption('--dataproc-project')\n staging_location = pytestconfig.getoption('--dataproc-staging-location')\n return DataprocClusterLauncher(cluster_name=cluster_name,\n staging_location=staging_location, region=region, project_id=project_id\n )\n",
"step-3": "import pytest\nfrom feast.pyspark.launchers.gcloud import DataprocClusterLauncher\n\n\n@pytest.fixture\ndef dataproc_launcher(pytestconfig) ->DataprocClusterLauncher:\n cluster_name = pytestconfig.getoption('--dataproc-cluster-name')\n region = pytestconfig.getoption('--dataproc-region')\n project_id = pytestconfig.getoption('--dataproc-project')\n staging_location = pytestconfig.getoption('--dataproc-staging-location')\n return DataprocClusterLauncher(cluster_name=cluster_name,\n staging_location=staging_location, region=region, project_id=project_id\n )\n",
"step-4": "import pytest\n\nfrom feast.pyspark.launchers.gcloud import DataprocClusterLauncher\n\n\n@pytest.fixture\ndef dataproc_launcher(pytestconfig) -> DataprocClusterLauncher:\n cluster_name = pytestconfig.getoption(\"--dataproc-cluster-name\")\n region = pytestconfig.getoption(\"--dataproc-region\")\n project_id = pytestconfig.getoption(\"--dataproc-project\")\n staging_location = pytestconfig.getoption(\"--dataproc-staging-location\")\n return DataprocClusterLauncher(\n cluster_name=cluster_name,\n staging_location=staging_location,\n region=region,\n project_id=project_id,\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator
from .models import Blog, BlogType
from django.conf import settings
from read_statistics.utils import read_statistics_once_read
from user.forms import LoginForm
# Create your views here.
#分页函数
def get_blogs_common_data(request, blogs_all_list):
# 分页器
page_num = request.GET.get('page', 1)
paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)
page_of_blogs = paginator.get_page(page_num)
current_page_num = page_of_blogs.number
page_range = list(range(max(1, current_page_num - 2), min(paginator.num_pages + 1, current_page_num + 3)))
if page_range[0] != 1:
page_range.insert(0, 1)
if page_range[1] - page_range[0] >= 2:
page_range.insert(1, '...')
if page_range[-1] != paginator.num_pages:
page_range.append(paginator.num_pages)
if page_range[-1] - page_range[-2] >= 2:
page_range.insert(-1, '...')
# 获取日期归档的博客统计数量
blog_dates = dict()
all_dates = Blog.objects.dates('created_time', 'month', order='DESC')
for blogs_date in all_dates:
blogs_count = Blog.objects.filter(created_time__year=blogs_date.year,
created_time__month=blogs_date.month).count()
blog_dates[blogs_date] = blogs_count
# 获取公共的数据
context = dict()
context['blogs'] = page_of_blogs
context['page_range'] = page_range
# 运用annotate方法给对象添加注释
#context['blog_types'] = BlogType.objects.annotate(blog_count=Count('blog'))
context['blog_types'] = BlogType.objects.all()
context['blog_dates'] = blog_dates
return context
def blog_list(request):
blogs_all_list = Blog.objects.all()
context = get_blogs_common_data(request, blogs_all_list)
return render(request, 'blog/blog_list.html', context)
def blogs_with_type(request, blog_type_pk):
blog_type = get_object_or_404(BlogType, pk=blog_type_pk)
blogs_all_list = Blog.objects.filter(blog_type=blog_type)
context = get_blogs_common_data(request, blogs_all_list)
context['blog_type'] = blog_type
return render(request, 'blog/blogs_with_type.html', context)
def blogs_with_date(request, year, month):
blogs_all_list = Blog.objects.filter(created_time__year=year, created_time__month=month)
context = get_blogs_common_data(request, blogs_all_list)
context['blogs_with_date'] = '%s年%s' % (year, month)
return render(request, 'blog/blogs_with_date.html', context)
def blog_detail(request, blog_pk):
blog = get_object_or_404(Blog, pk=blog_pk)
read_cookie_key = read_statistics_once_read(request, blog)
context = dict()
context['blog'] = blog
context['blog_author'] = blog.author.get_nickname_or_username()
context['login_form'] = LoginForm()
context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.created_time).last()
context['next_blog'] = Blog.objects.filter(created_time__lt=blog.created_time).first()
context['blog_dates'] = Blog.objects.dates('created_time', 'month', order='DESC')
response = render(request, 'blog/blog_detail.html', context)
response.set_cookie(read_cookie_key, 'true')
return response
|
normal
|
{
"blob_id": "9731f45b19d40a031216f8a430c09764fd34e984",
"index": 2594,
"step-1": "<mask token>\n\n\ndef get_blogs_common_data(request, blogs_all_list):\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.\n num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.\n year, created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\n\n<mask token>\n\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.\n created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.\n created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month',\n order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response\n",
"step-2": "<mask token>\n\n\ndef get_blogs_common_data(request, blogs_all_list):\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.\n num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.\n year, created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\n\ndef blogs_with_type(request, blog_type_pk):\n blog_type = get_object_or_404(BlogType, pk=blog_type_pk)\n blogs_all_list = Blog.objects.filter(blog_type=blog_type)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blog_type'] = blog_type\n return render(request, 'blog/blogs_with_type.html', context)\n\n\n<mask token>\n\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.\n created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.\n created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month',\n order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response\n",
"step-3": "<mask token>\n\n\ndef get_blogs_common_data(request, blogs_all_list):\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.\n num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.\n year, created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\n\ndef blogs_with_type(request, blog_type_pk):\n blog_type = get_object_or_404(BlogType, pk=blog_type_pk)\n blogs_all_list = Blog.objects.filter(blog_type=blog_type)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blog_type'] = blog_type\n return render(request, 'blog/blogs_with_type.html', context)\n\n\ndef blogs_with_date(request, year, month):\n blogs_all_list = Blog.objects.filter(created_time__year=year,\n created_time__month=month)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blogs_with_date'] = '%s年%s' % (year, month)\n return render(request, 'blog/blogs_with_date.html', context)\n\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.\n created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.\n created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month',\n order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response\n",
"step-4": "from django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator\nfrom .models import Blog, BlogType\nfrom django.conf import settings\nfrom read_statistics.utils import read_statistics_once_read\nfrom user.forms import LoginForm\n\n\ndef get_blogs_common_data(request, blogs_all_list):\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.\n num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.\n year, created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\n\ndef blogs_with_type(request, blog_type_pk):\n blog_type = get_object_or_404(BlogType, pk=blog_type_pk)\n blogs_all_list = Blog.objects.filter(blog_type=blog_type)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blog_type'] = blog_type\n return render(request, 'blog/blogs_with_type.html', context)\n\n\ndef blogs_with_date(request, year, month):\n blogs_all_list = Blog.objects.filter(created_time__year=year,\n created_time__month=month)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blogs_with_date'] = '%s年%s' % (year, month)\n return render(request, 'blog/blogs_with_date.html', context)\n\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.\n created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.\n created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month',\n order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response\n",
"step-5": "from django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator\nfrom .models import Blog, BlogType\nfrom django.conf import settings\nfrom read_statistics.utils import read_statistics_once_read\nfrom user.forms import LoginForm\n\n# Create your views here.\n#分页函数\ndef get_blogs_common_data(request, blogs_all_list):\n # 分页器\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n # 获取日期归档的博客统计数量\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.year,\n created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n\n # 获取公共的数据\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n # 运用annotate方法给对象添加注释\n #context['blog_types'] = BlogType.objects.annotate(blog_count=Count('blog'))\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\ndef blogs_with_type(request, blog_type_pk):\n blog_type = get_object_or_404(BlogType, pk=blog_type_pk)\n blogs_all_list = Blog.objects.filter(blog_type=blog_type)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blog_type'] = blog_type\n return render(request, 'blog/blogs_with_type.html', context)\n\ndef blogs_with_date(request, year, month):\n blogs_all_list = Blog.objects.filter(created_time__year=year, created_time__month=month)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blogs_with_date'] = '%s年%s' % (year, month)\n return render(request, 'blog/blogs_with_date.html', context)\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month', order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from datetime import datetime
import logging
import os
import re
from bs4 import BeautifulSoup
import requests
from .utils.log import get_logger
logger = get_logger(os.path.basename(__file__))
EVENTBRITE_TOKEN = os.environ['EVENTBRITE_TOKEN']
def get_category_name(page):
if page["category_id"] is None:
category = ''
else:
if page["subcategory_id"] is None:
category = get(page["category_id"], 'categories/').json()["name"]
else:
category_name = get(page["category_id"], 'categories/')
category_name = category_name.json()["name"]
category_name = category_name.replace(",", "")
subcategory_name = get(page["subcategory_id"], 'subcategories/')
subcategory_name = subcategory_name.json()["name"]
subcategory_name = subcategory_name.replace(",", "")
category = category_name + "," + subcategory_name
return category
def scrape(event_id, event_cost):
page = get(event_id, resource='events').json()
venue = get(page["venue_id"], resource='venues').json()
start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S')
end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S')
desc = "(" + venue["address"]["region"] + ") " + page["summary"]
event_data = {
'Event Name': page['name']['text'],
'Event Description': desc,
'Event Start Date': start.strftime('%Y-%m-%d'),
'Event Start Time': start.strftime('%H:%M:%S'),
'Event End Date': end.strftime('%Y-%m-%d'),
'Event End Time': end.strftime('%H:%M:%S'),
'All Day Event': "False",
'Timezone': "America/New_York",
'Event Venue Name': venue["name"],
'Event Organizers': 'Sierra Club MD',
'Event Cost': event_cost,
'Event Currency Symbol': "$",
# TODO: parse event data for optional category fields if present
'Event Category': get_category_name(page),
'Event Website': page['url'],
'Event Featured Image': ""
}
return event_data
def get(api_id, resource, params={'token': EVENTBRITE_TOKEN}):
url = f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' \
else f'https://www.eventbriteapi.com/v3/{resource}/{api_id}'
try:
if resource != 'o':
r = requests.get(url, params=params)
else:
r = requests.get(url)
except Exception as e:
msg = f"Exception making GET request to {url}: {e}"
logger.critical(msg, exc_info=True)
return
if not r.ok:
code = r.status_code
msg = f"Non-200 status code of {code} making GET request to: {url}"
logger.critical(msg, exc_info=True)
return r
def get_live_events(soup):
live_events = soup.find("article", {"id": "live_events"})
try:
event_divs = live_events.find_all("div", {"class": "list-card-v2"})
except AttributeError:
return []
return event_divs
def get_cost_events(soup):
cost = soup.find("span", {"class": "list-card__label"}).text
cost = cost.lower()
cost = cost.replace("free", "0")
cost = re.sub(r'[^\d]+', '', cost)
if cost == "":
cost = "0"
return cost
def main():
events_array = []
r = get(14506382808, 'o')
soup = BeautifulSoup(r.content, 'html.parser')
event_a_refs = get_live_events(soup)
for events in event_a_refs:
event_cost = get_cost_events(events)
event_id = events.find("a").get("data-eid")
events_array.append(scrape(event_id, event_cost))
return events_array
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
events = main()
print(len(events))
|
normal
|
{
"blob_id": "edfc8794fab2c95e01ae254f9f13d446faafe6fd",
"index": 9213,
"step-1": "<mask token>\n\n\ndef scrape(event_id, event_cost):\n page = get(event_id, resource='events').json()\n venue = get(page['venue_id'], resource='venues').json()\n start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S')\n end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S')\n desc = '(' + venue['address']['region'] + ') ' + page['summary']\n event_data = {'Event Name': page['name']['text'], 'Event Description':\n desc, 'Event Start Date': start.strftime('%Y-%m-%d'),\n 'Event Start Time': start.strftime('%H:%M:%S'), 'Event End Date':\n end.strftime('%Y-%m-%d'), 'Event End Time': end.strftime('%H:%M:%S'\n ), 'All Day Event': 'False', 'Timezone': 'America/New_York',\n 'Event Venue Name': venue['name'], 'Event Organizers':\n 'Sierra Club MD', 'Event Cost': event_cost, 'Event Currency Symbol':\n '$', 'Event Category': get_category_name(page), 'Event Website':\n page['url'], 'Event Featured Image': ''}\n return event_data\n\n\ndef get(api_id, resource, params={'token': EVENTBRITE_TOKEN}):\n url = (f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' else\n f'https://www.eventbriteapi.com/v3/{resource}/{api_id}')\n try:\n if resource != 'o':\n r = requests.get(url, params=params)\n else:\n r = requests.get(url)\n except Exception as e:\n msg = f'Exception making GET request to {url}: {e}'\n logger.critical(msg, exc_info=True)\n return\n if not r.ok:\n code = r.status_code\n msg = f'Non-200 status code of {code} making GET request to: {url}'\n logger.critical(msg, exc_info=True)\n return r\n\n\ndef get_live_events(soup):\n live_events = soup.find('article', {'id': 'live_events'})\n try:\n event_divs = live_events.find_all('div', {'class': 'list-card-v2'})\n except AttributeError:\n return []\n return event_divs\n\n\ndef get_cost_events(soup):\n cost = soup.find('span', {'class': 'list-card__label'}).text\n cost = cost.lower()\n cost = cost.replace('free', '0')\n cost = re.sub('[^\\\\d]+', '', cost)\n if cost == '':\n cost = '0'\n return cost\n\n\ndef main():\n events_array = []\n r = get(14506382808, 'o')\n soup = BeautifulSoup(r.content, 'html.parser')\n event_a_refs = get_live_events(soup)\n for events in event_a_refs:\n event_cost = get_cost_events(events)\n event_id = events.find('a').get('data-eid')\n events_array.append(scrape(event_id, event_cost))\n return events_array\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_category_name(page):\n if page['category_id'] is None:\n category = ''\n elif page['subcategory_id'] is None:\n category = get(page['category_id'], 'categories/').json()['name']\n else:\n category_name = get(page['category_id'], 'categories/')\n category_name = category_name.json()['name']\n category_name = category_name.replace(',', '')\n subcategory_name = get(page['subcategory_id'], 'subcategories/')\n subcategory_name = subcategory_name.json()['name']\n subcategory_name = subcategory_name.replace(',', '')\n category = category_name + ',' + subcategory_name\n return category\n\n\ndef scrape(event_id, event_cost):\n page = get(event_id, resource='events').json()\n venue = get(page['venue_id'], resource='venues').json()\n start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S')\n end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S')\n desc = '(' + venue['address']['region'] + ') ' + page['summary']\n event_data = {'Event Name': page['name']['text'], 'Event Description':\n desc, 'Event Start Date': start.strftime('%Y-%m-%d'),\n 'Event Start Time': start.strftime('%H:%M:%S'), 'Event End Date':\n end.strftime('%Y-%m-%d'), 'Event End Time': end.strftime('%H:%M:%S'\n ), 'All Day Event': 'False', 'Timezone': 'America/New_York',\n 'Event Venue Name': venue['name'], 'Event Organizers':\n 'Sierra Club MD', 'Event Cost': event_cost, 'Event Currency Symbol':\n '$', 'Event Category': get_category_name(page), 'Event Website':\n page['url'], 'Event Featured Image': ''}\n return event_data\n\n\ndef get(api_id, resource, params={'token': EVENTBRITE_TOKEN}):\n url = (f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' else\n f'https://www.eventbriteapi.com/v3/{resource}/{api_id}')\n try:\n if resource != 'o':\n r = requests.get(url, params=params)\n else:\n r = requests.get(url)\n except Exception as e:\n msg = f'Exception making GET request to {url}: {e}'\n logger.critical(msg, exc_info=True)\n return\n if not r.ok:\n code = r.status_code\n msg = f'Non-200 status code of {code} making GET request to: {url}'\n logger.critical(msg, exc_info=True)\n return r\n\n\ndef get_live_events(soup):\n live_events = soup.find('article', {'id': 'live_events'})\n try:\n event_divs = live_events.find_all('div', {'class': 'list-card-v2'})\n except AttributeError:\n return []\n return event_divs\n\n\ndef get_cost_events(soup):\n cost = soup.find('span', {'class': 'list-card__label'}).text\n cost = cost.lower()\n cost = cost.replace('free', '0')\n cost = re.sub('[^\\\\d]+', '', cost)\n if cost == '':\n cost = '0'\n return cost\n\n\ndef main():\n events_array = []\n r = get(14506382808, 'o')\n soup = BeautifulSoup(r.content, 'html.parser')\n event_a_refs = get_live_events(soup)\n for events in event_a_refs:\n event_cost = get_cost_events(events)\n event_id = events.find('a').get('data-eid')\n events_array.append(scrape(event_id, event_cost))\n return events_array\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n events = main()\n print(len(events))\n",
"step-3": "<mask token>\nlogger = get_logger(os.path.basename(__file__))\nEVENTBRITE_TOKEN = os.environ['EVENTBRITE_TOKEN']\n\n\ndef get_category_name(page):\n if page['category_id'] is None:\n category = ''\n elif page['subcategory_id'] is None:\n category = get(page['category_id'], 'categories/').json()['name']\n else:\n category_name = get(page['category_id'], 'categories/')\n category_name = category_name.json()['name']\n category_name = category_name.replace(',', '')\n subcategory_name = get(page['subcategory_id'], 'subcategories/')\n subcategory_name = subcategory_name.json()['name']\n subcategory_name = subcategory_name.replace(',', '')\n category = category_name + ',' + subcategory_name\n return category\n\n\ndef scrape(event_id, event_cost):\n page = get(event_id, resource='events').json()\n venue = get(page['venue_id'], resource='venues').json()\n start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S')\n end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S')\n desc = '(' + venue['address']['region'] + ') ' + page['summary']\n event_data = {'Event Name': page['name']['text'], 'Event Description':\n desc, 'Event Start Date': start.strftime('%Y-%m-%d'),\n 'Event Start Time': start.strftime('%H:%M:%S'), 'Event End Date':\n end.strftime('%Y-%m-%d'), 'Event End Time': end.strftime('%H:%M:%S'\n ), 'All Day Event': 'False', 'Timezone': 'America/New_York',\n 'Event Venue Name': venue['name'], 'Event Organizers':\n 'Sierra Club MD', 'Event Cost': event_cost, 'Event Currency Symbol':\n '$', 'Event Category': get_category_name(page), 'Event Website':\n page['url'], 'Event Featured Image': ''}\n return event_data\n\n\ndef get(api_id, resource, params={'token': EVENTBRITE_TOKEN}):\n url = (f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' else\n f'https://www.eventbriteapi.com/v3/{resource}/{api_id}')\n try:\n if resource != 'o':\n r = requests.get(url, params=params)\n else:\n r = requests.get(url)\n except Exception as e:\n msg = f'Exception making GET request to {url}: {e}'\n logger.critical(msg, exc_info=True)\n return\n if not r.ok:\n code = r.status_code\n msg = f'Non-200 status code of {code} making GET request to: {url}'\n logger.critical(msg, exc_info=True)\n return r\n\n\ndef get_live_events(soup):\n live_events = soup.find('article', {'id': 'live_events'})\n try:\n event_divs = live_events.find_all('div', {'class': 'list-card-v2'})\n except AttributeError:\n return []\n return event_divs\n\n\ndef get_cost_events(soup):\n cost = soup.find('span', {'class': 'list-card__label'}).text\n cost = cost.lower()\n cost = cost.replace('free', '0')\n cost = re.sub('[^\\\\d]+', '', cost)\n if cost == '':\n cost = '0'\n return cost\n\n\ndef main():\n events_array = []\n r = get(14506382808, 'o')\n soup = BeautifulSoup(r.content, 'html.parser')\n event_a_refs = get_live_events(soup)\n for events in event_a_refs:\n event_cost = get_cost_events(events)\n event_id = events.find('a').get('data-eid')\n events_array.append(scrape(event_id, event_cost))\n return events_array\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n events = main()\n print(len(events))\n",
"step-4": "from datetime import datetime\nimport logging\nimport os\nimport re\nfrom bs4 import BeautifulSoup\nimport requests\nfrom .utils.log import get_logger\nlogger = get_logger(os.path.basename(__file__))\nEVENTBRITE_TOKEN = os.environ['EVENTBRITE_TOKEN']\n\n\ndef get_category_name(page):\n if page['category_id'] is None:\n category = ''\n elif page['subcategory_id'] is None:\n category = get(page['category_id'], 'categories/').json()['name']\n else:\n category_name = get(page['category_id'], 'categories/')\n category_name = category_name.json()['name']\n category_name = category_name.replace(',', '')\n subcategory_name = get(page['subcategory_id'], 'subcategories/')\n subcategory_name = subcategory_name.json()['name']\n subcategory_name = subcategory_name.replace(',', '')\n category = category_name + ',' + subcategory_name\n return category\n\n\ndef scrape(event_id, event_cost):\n page = get(event_id, resource='events').json()\n venue = get(page['venue_id'], resource='venues').json()\n start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S')\n end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S')\n desc = '(' + venue['address']['region'] + ') ' + page['summary']\n event_data = {'Event Name': page['name']['text'], 'Event Description':\n desc, 'Event Start Date': start.strftime('%Y-%m-%d'),\n 'Event Start Time': start.strftime('%H:%M:%S'), 'Event End Date':\n end.strftime('%Y-%m-%d'), 'Event End Time': end.strftime('%H:%M:%S'\n ), 'All Day Event': 'False', 'Timezone': 'America/New_York',\n 'Event Venue Name': venue['name'], 'Event Organizers':\n 'Sierra Club MD', 'Event Cost': event_cost, 'Event Currency Symbol':\n '$', 'Event Category': get_category_name(page), 'Event Website':\n page['url'], 'Event Featured Image': ''}\n return event_data\n\n\ndef get(api_id, resource, params={'token': EVENTBRITE_TOKEN}):\n url = (f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' else\n f'https://www.eventbriteapi.com/v3/{resource}/{api_id}')\n try:\n if resource != 'o':\n r = requests.get(url, params=params)\n else:\n r = requests.get(url)\n except Exception as e:\n msg = f'Exception making GET request to {url}: {e}'\n logger.critical(msg, exc_info=True)\n return\n if not r.ok:\n code = r.status_code\n msg = f'Non-200 status code of {code} making GET request to: {url}'\n logger.critical(msg, exc_info=True)\n return r\n\n\ndef get_live_events(soup):\n live_events = soup.find('article', {'id': 'live_events'})\n try:\n event_divs = live_events.find_all('div', {'class': 'list-card-v2'})\n except AttributeError:\n return []\n return event_divs\n\n\ndef get_cost_events(soup):\n cost = soup.find('span', {'class': 'list-card__label'}).text\n cost = cost.lower()\n cost = cost.replace('free', '0')\n cost = re.sub('[^\\\\d]+', '', cost)\n if cost == '':\n cost = '0'\n return cost\n\n\ndef main():\n events_array = []\n r = get(14506382808, 'o')\n soup = BeautifulSoup(r.content, 'html.parser')\n event_a_refs = get_live_events(soup)\n for events in event_a_refs:\n event_cost = get_cost_events(events)\n event_id = events.find('a').get('data-eid')\n events_array.append(scrape(event_id, event_cost))\n return events_array\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n events = main()\n print(len(events))\n",
"step-5": "from datetime import datetime\nimport logging\nimport os\nimport re\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nfrom .utils.log import get_logger\n\nlogger = get_logger(os.path.basename(__file__))\n\nEVENTBRITE_TOKEN = os.environ['EVENTBRITE_TOKEN']\n\n\ndef get_category_name(page):\n if page[\"category_id\"] is None:\n category = ''\n else:\n if page[\"subcategory_id\"] is None:\n category = get(page[\"category_id\"], 'categories/').json()[\"name\"]\n else:\n category_name = get(page[\"category_id\"], 'categories/')\n category_name = category_name.json()[\"name\"]\n category_name = category_name.replace(\",\", \"\")\n subcategory_name = get(page[\"subcategory_id\"], 'subcategories/')\n subcategory_name = subcategory_name.json()[\"name\"]\n subcategory_name = subcategory_name.replace(\",\", \"\")\n category = category_name + \",\" + subcategory_name\n return category\n\n \ndef scrape(event_id, event_cost):\n page = get(event_id, resource='events').json()\n venue = get(page[\"venue_id\"], resource='venues').json()\n\n start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S')\n end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S')\n desc = \"(\" + venue[\"address\"][\"region\"] + \") \" + page[\"summary\"]\n event_data = {\n 'Event Name': page['name']['text'],\n 'Event Description': desc,\n 'Event Start Date': start.strftime('%Y-%m-%d'),\n 'Event Start Time': start.strftime('%H:%M:%S'),\n 'Event End Date': end.strftime('%Y-%m-%d'),\n 'Event End Time': end.strftime('%H:%M:%S'),\n 'All Day Event': \"False\",\n 'Timezone': \"America/New_York\",\n 'Event Venue Name': venue[\"name\"],\n 'Event Organizers': 'Sierra Club MD',\n 'Event Cost': event_cost,\n 'Event Currency Symbol': \"$\",\n # TODO: parse event data for optional category fields if present\n 'Event Category': get_category_name(page), \n 'Event Website': page['url'],\n 'Event Featured Image': \"\"\n }\n return event_data\n\n\ndef get(api_id, resource, params={'token': EVENTBRITE_TOKEN}):\n url = f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' \\\n else f'https://www.eventbriteapi.com/v3/{resource}/{api_id}' \n \n try:\n if resource != 'o':\n r = requests.get(url, params=params) \n else:\n r = requests.get(url)\n except Exception as e:\n msg = f\"Exception making GET request to {url}: {e}\"\n logger.critical(msg, exc_info=True)\n return\n if not r.ok:\n code = r.status_code\n msg = f\"Non-200 status code of {code} making GET request to: {url}\"\n logger.critical(msg, exc_info=True)\n \n return r\n\n\ndef get_live_events(soup):\n live_events = soup.find(\"article\", {\"id\": \"live_events\"})\n try:\n event_divs = live_events.find_all(\"div\", {\"class\": \"list-card-v2\"})\n except AttributeError:\n return []\n \n return event_divs\n \n\ndef get_cost_events(soup):\n cost = soup.find(\"span\", {\"class\": \"list-card__label\"}).text\n cost = cost.lower()\n cost = cost.replace(\"free\", \"0\")\n cost = re.sub(r'[^\\d]+', '', cost)\n if cost == \"\":\n cost = \"0\"\n return cost\n\n\ndef main():\n events_array = []\n r = get(14506382808, 'o')\n soup = BeautifulSoup(r.content, 'html.parser') \n event_a_refs = get_live_events(soup)\n for events in event_a_refs:\n event_cost = get_cost_events(events)\n event_id = events.find(\"a\").get(\"data-eid\")\n events_array.append(scrape(event_id, event_cost))\n \n return events_array\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n events = main()\n print(len(events))",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
# 데이터 출처: kaggle
# 데이터 개요: 511, 유리를 위한 다양한 속성(화학원소)들로부터 type 구별
# 데이터 예측 모델: 이진클래스
# 적용 머신러닝 모델: 깊은 다층 퍼셉트론 신경망
# 훈련 데이터셋: 160건
# 검증 데이터셋: 건
# 시험 데이터셋: 수집데이터로서 시험셋을 확보할 수 없으므로 고려하지 않음
# 입력 데이터: 10개 항목의 데이터
# 은닉층: 2개
# 사용한 활성화 함수
# - 제1 은닉층: Relu
# - 제2 은닉층: Relu
# - Output Layer: Softmax
# 사용한 손실함수: categorical_crossentropy
# 사용한 Optimizer: rmsprop
# Tensorflow 버전: 2.0.0
# 파이썬버전: 3.7.4
import pandas as pd
from datetime import datetime
from sklearn.model_selection import train_test_split
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
np.random.seed(5)
match_dic={}
zoo_class = pd.read_csv('zoo.csv',sep=',',header=0)
zoo_class.columns = zoo_class.columns.str.replace(' ','_')
# 전체 독립변수 식별
input_data_header = list(zoo_class.columns.difference(["animal_name","class_type"]))
input_data_number = len(input_data_header)
label = zoo_class["class_type"]
start_time = datetime.now()
train_data, test_data, train_label, test_label = train_test_split(zoo_class[input_data_header],label)
train_label = to_categorical(train_label, num_classes=7)
test_label = to_categorical(test_label, num_classes=7)
# 훈련셋과 시험셋 불러오기
# x_train = x_train.reshape(60000, width * height).astype('float32') / 255.0
# x_test = x_test.reshape(10000, width * height).astype('float32') / 255.0
# 모델 구성하기
model = Sequential()
model.add(Dense(64, input_dim=input_data_number, activation='relu'))
model.add(Dense(64, activation='relu'))
# model.add(Dense(6, activation='sigmoid'))
model.add(Dense(7, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
# 4. 모델 학습시키기
hist = model.fit(train_data, train_label, epochs=20000, batch_size=64, validation_data=(test_data, test_label))
# hist = model.fit(train_data, train_label, epochs=1000, batch_size=64)
end_time = datetime.now()
# 5. 학습과정 살펴보기
import matplotlib.pyplot as plt
fig, loss_ax = plt.subplots()
acc_ax = loss_ax.twinx()
loss_ax.plot(hist.history['loss'], 'y', label='train loss')
loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')
# acc_ax.plot(hist.history['acc'], 'b', label='train acc')
acc_ax.plot(hist.history['accuracy'], 'b', label='train acc')
# acc_ax.plot(hist.history['val_acc'], 'g', label='val acc')
acc_ax.plot(hist.history['val_accuracy'],'g', label='val acc')
loss_ax.set_xlabel('epoch')
loss_ax.set_ylabel('loss')
acc_ax.set_ylabel('accuray')
loss_ax.legend(loc='upper left')
acc_ax.legend(loc='lower left')
plt.show()
# 6. 모델 평가하기
loss_and_metrics = model.evaluate(test_data, test_label, batch_size=32)
print('loss_and_metrics : ' + str(loss_and_metrics))
scores = model.evaluate(test_data, test_label)
print("%s: %.2f%%"%(model.metrics_names[1],scores[1]*100))
|
normal
|
{
"blob_id": "bfa5739949c26758e3762fcff8347d23ad70f704",
"index": 6114,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(5)\n<mask token>\nmodel.add(Dense(64, input_dim=input_data_number, activation='relu'))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(7, activation='softmax'))\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[\n 'accuracy'])\n<mask token>\nloss_ax.plot(hist.history['loss'], 'y', label='train loss')\nloss_ax.plot(hist.history['val_loss'], 'r', label='val loss')\nacc_ax.plot(hist.history['accuracy'], 'b', label='train acc')\nacc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')\nloss_ax.set_xlabel('epoch')\nloss_ax.set_ylabel('loss')\nacc_ax.set_ylabel('accuray')\nloss_ax.legend(loc='upper left')\nacc_ax.legend(loc='lower left')\nplt.show()\n<mask token>\nprint('loss_and_metrics : ' + str(loss_and_metrics))\n<mask token>\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\n",
"step-3": "<mask token>\nnp.random.seed(5)\nmatch_dic = {}\nzoo_class = pd.read_csv('zoo.csv', sep=',', header=0)\nzoo_class.columns = zoo_class.columns.str.replace(' ', '_')\ninput_data_header = list(zoo_class.columns.difference(['animal_name',\n 'class_type']))\ninput_data_number = len(input_data_header)\nlabel = zoo_class['class_type']\nstart_time = datetime.now()\ntrain_data, test_data, train_label, test_label = train_test_split(zoo_class\n [input_data_header], label)\ntrain_label = to_categorical(train_label, num_classes=7)\ntest_label = to_categorical(test_label, num_classes=7)\nmodel = Sequential()\nmodel.add(Dense(64, input_dim=input_data_number, activation='relu'))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(7, activation='softmax'))\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[\n 'accuracy'])\nhist = model.fit(train_data, train_label, epochs=20000, batch_size=64,\n validation_data=(test_data, test_label))\nend_time = datetime.now()\n<mask token>\nfig, loss_ax = plt.subplots()\nacc_ax = loss_ax.twinx()\nloss_ax.plot(hist.history['loss'], 'y', label='train loss')\nloss_ax.plot(hist.history['val_loss'], 'r', label='val loss')\nacc_ax.plot(hist.history['accuracy'], 'b', label='train acc')\nacc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')\nloss_ax.set_xlabel('epoch')\nloss_ax.set_ylabel('loss')\nacc_ax.set_ylabel('accuray')\nloss_ax.legend(loc='upper left')\nacc_ax.legend(loc='lower left')\nplt.show()\nloss_and_metrics = model.evaluate(test_data, test_label, batch_size=32)\nprint('loss_and_metrics : ' + str(loss_and_metrics))\nscores = model.evaluate(test_data, test_label)\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\n",
"step-4": "import pandas as pd\nfrom datetime import datetime\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils import to_categorical\nnp.random.seed(5)\nmatch_dic = {}\nzoo_class = pd.read_csv('zoo.csv', sep=',', header=0)\nzoo_class.columns = zoo_class.columns.str.replace(' ', '_')\ninput_data_header = list(zoo_class.columns.difference(['animal_name',\n 'class_type']))\ninput_data_number = len(input_data_header)\nlabel = zoo_class['class_type']\nstart_time = datetime.now()\ntrain_data, test_data, train_label, test_label = train_test_split(zoo_class\n [input_data_header], label)\ntrain_label = to_categorical(train_label, num_classes=7)\ntest_label = to_categorical(test_label, num_classes=7)\nmodel = Sequential()\nmodel.add(Dense(64, input_dim=input_data_number, activation='relu'))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(7, activation='softmax'))\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[\n 'accuracy'])\nhist = model.fit(train_data, train_label, epochs=20000, batch_size=64,\n validation_data=(test_data, test_label))\nend_time = datetime.now()\nimport matplotlib.pyplot as plt\nfig, loss_ax = plt.subplots()\nacc_ax = loss_ax.twinx()\nloss_ax.plot(hist.history['loss'], 'y', label='train loss')\nloss_ax.plot(hist.history['val_loss'], 'r', label='val loss')\nacc_ax.plot(hist.history['accuracy'], 'b', label='train acc')\nacc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')\nloss_ax.set_xlabel('epoch')\nloss_ax.set_ylabel('loss')\nacc_ax.set_ylabel('accuray')\nloss_ax.legend(loc='upper left')\nacc_ax.legend(loc='lower left')\nplt.show()\nloss_and_metrics = model.evaluate(test_data, test_label, batch_size=32)\nprint('loss_and_metrics : ' + str(loss_and_metrics))\nscores = model.evaluate(test_data, test_label)\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\n",
"step-5": "# 데이터 출처: kaggle\n# 데이터 개요: 511, 유리를 위한 다양한 속성(화학원소)들로부터 type 구별\n# 데이터 예측 모델: 이진클래스\n# 적용 머신러닝 모델: 깊은 다층 퍼셉트론 신경망\n# 훈련 데이터셋: 160건\n# 검증 데이터셋: 건\n# 시험 데이터셋: 수집데이터로서 시험셋을 확보할 수 없으므로 고려하지 않음\n# 입력 데이터: 10개 항목의 데이터\n# 은닉층: 2개\n# 사용한 활성화 함수\n# - 제1 은닉층: Relu\n# - 제2 은닉층: Relu\n# - Output Layer: Softmax\n# 사용한 손실함수: categorical_crossentropy\n# 사용한 Optimizer: rmsprop\n# Tensorflow 버전: 2.0.0\n# 파이썬버전: 3.7.4\n\nimport pandas as pd\nfrom datetime import datetime\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils import to_categorical\n\nnp.random.seed(5)\nmatch_dic={}\n\nzoo_class = pd.read_csv('zoo.csv',sep=',',header=0)\nzoo_class.columns = zoo_class.columns.str.replace(' ','_')\n\n\n# 전체 독립변수 식별\ninput_data_header = list(zoo_class.columns.difference([\"animal_name\",\"class_type\"]))\ninput_data_number = len(input_data_header)\nlabel = zoo_class[\"class_type\"]\n\nstart_time = datetime.now()\n\ntrain_data, test_data, train_label, test_label = train_test_split(zoo_class[input_data_header],label)\ntrain_label = to_categorical(train_label, num_classes=7)\ntest_label = to_categorical(test_label, num_classes=7)\n\n# 훈련셋과 시험셋 불러오기\n# x_train = x_train.reshape(60000, width * height).astype('float32') / 255.0\n# x_test = x_test.reshape(10000, width * height).astype('float32') / 255.0\n\n# 모델 구성하기\nmodel = Sequential()\nmodel.add(Dense(64, input_dim=input_data_number, activation='relu'))\nmodel.add(Dense(64, activation='relu'))\n# model.add(Dense(6, activation='sigmoid'))\nmodel.add(Dense(7, activation='softmax'))\n\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n# model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\n\n# 4. 모델 학습시키기\nhist = model.fit(train_data, train_label, epochs=20000, batch_size=64, validation_data=(test_data, test_label))\n# hist = model.fit(train_data, train_label, epochs=1000, batch_size=64)\n\nend_time = datetime.now()\n\n# 5. 학습과정 살펴보기\nimport matplotlib.pyplot as plt\n\nfig, loss_ax = plt.subplots()\n\nacc_ax = loss_ax.twinx()\n\nloss_ax.plot(hist.history['loss'], 'y', label='train loss')\nloss_ax.plot(hist.history['val_loss'], 'r', label='val loss')\n\n# acc_ax.plot(hist.history['acc'], 'b', label='train acc')\nacc_ax.plot(hist.history['accuracy'], 'b', label='train acc')\n# acc_ax.plot(hist.history['val_acc'], 'g', label='val acc')\nacc_ax.plot(hist.history['val_accuracy'],'g', label='val acc')\n\nloss_ax.set_xlabel('epoch')\nloss_ax.set_ylabel('loss')\nacc_ax.set_ylabel('accuray')\n\nloss_ax.legend(loc='upper left')\nacc_ax.legend(loc='lower left')\n\nplt.show()\n\n# 6. 모델 평가하기\nloss_and_metrics = model.evaluate(test_data, test_label, batch_size=32)\nprint('loss_and_metrics : ' + str(loss_and_metrics))\n\nscores = model.evaluate(test_data, test_label)\nprint(\"%s: %.2f%%\"%(model.metrics_names[1],scores[1]*100))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import *
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
map_: List[List[int]] = [[0 if (i > 0 and j > 0) else 1 for j in range(m)] for i in range(n)]
for row in range(1, n):
for col in range(1, m):
map_[row][col] = map_[row][col - 1] + map_[row - 1][col]
# [map_[row][col] := map_[row][col - 1] + map_[row - 1][col] for col in range(1, m) for row in range(1, n)]
return map_[-1][-1]
print(Solution().uniquePaths(7, 3))
|
normal
|
{
"blob_id": "e2a38d38d2ab750cf775ed0fbdb56bc6fc7300c4",
"index": 8934,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def uniquePaths(self, m: int, n: int) ->int:\n map_: List[List[int]] = [[(0 if i > 0 and j > 0 else 1) for j in\n range(m)] for i in range(n)]\n for row in range(1, n):\n for col in range(1, m):\n map_[row][col] = map_[row][col - 1] + map_[row - 1][col]\n return map_[-1][-1]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def uniquePaths(self, m: int, n: int) ->int:\n map_: List[List[int]] = [[(0 if i > 0 and j > 0 else 1) for j in\n range(m)] for i in range(n)]\n for row in range(1, n):\n for col in range(1, m):\n map_[row][col] = map_[row][col - 1] + map_[row - 1][col]\n return map_[-1][-1]\n\n\nprint(Solution().uniquePaths(7, 3))\n",
"step-4": "from typing import *\n\n\nclass Solution:\n\n def uniquePaths(self, m: int, n: int) ->int:\n map_: List[List[int]] = [[(0 if i > 0 and j > 0 else 1) for j in\n range(m)] for i in range(n)]\n for row in range(1, n):\n for col in range(1, m):\n map_[row][col] = map_[row][col - 1] + map_[row - 1][col]\n return map_[-1][-1]\n\n\nprint(Solution().uniquePaths(7, 3))\n",
"step-5": "from typing import *\n\n\nclass Solution:\n def uniquePaths(self, m: int, n: int) -> int:\n map_: List[List[int]] = [[0 if (i > 0 and j > 0) else 1 for j in range(m)] for i in range(n)]\n for row in range(1, n):\n for col in range(1, m):\n map_[row][col] = map_[row][col - 1] + map_[row - 1][col]\n\n # [map_[row][col] := map_[row][col - 1] + map_[row - 1][col] for col in range(1, m) for row in range(1, n)]\n return map_[-1][-1]\n\n\nprint(Solution().uniquePaths(7, 3))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#
# abc088 c
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """1 0 1
2 1 2
1 0 1"""
output = """Yes"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """2 2 2
2 1 2
2 2 2"""
output = """No"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """0 8 8
0 8 8
0 8 8"""
output = """Yes"""
self.assertIO(input, output)
def test_入力例_4(self):
input = """1 8 6
2 9 7
0 7 7"""
output = """No"""
self.assertIO(input, output)
def resolve():
c = []
for _ in range(3):
c.append(list(map(int, input().split())))
a1 = 0
b1 = c[0][0] - a1
b2 = c[0][1] - a1
b3 = c[0][2] - a1
a2 = c[1][0] - b1
a3 = c[2][0] - b1
if a2+b2 == c[1][1] and a2+b3 == c[1][2] and a3+b2 == c[2][1] and a3+b3 == c[2][2]:
print("Yes")
else:
print("No")
if __name__ == "__main__":
# unittest.main()
resolve()
|
normal
|
{
"blob_id": "8b97c1e14adfcb09806e2d37e2f5c4f0b356c009",
"index": 2742,
"step-1": "<mask token>\n\n\nclass TestClass(unittest.TestCase):\n <mask token>\n\n def test_入力例_1(self):\n input = '1 0 1\\n2 1 2\\n1 0 1'\n output = 'Yes'\n self.assertIO(input, output)\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestClass(unittest.TestCase):\n\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = '1 0 1\\n2 1 2\\n1 0 1'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = '2 2 2\\n2 1 2\\n2 2 2'\n output = 'No'\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = '0 8 8\\n0 8 8\\n0 8 8'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_4(self):\n input = '1 8 6\\n2 9 7\\n0 7 7'\n output = 'No'\n self.assertIO(input, output)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestClass(unittest.TestCase):\n\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = '1 0 1\\n2 1 2\\n1 0 1'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = '2 2 2\\n2 1 2\\n2 2 2'\n output = 'No'\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = '0 8 8\\n0 8 8\\n0 8 8'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_4(self):\n input = '1 8 6\\n2 9 7\\n0 7 7'\n output = 'No'\n self.assertIO(input, output)\n\n\ndef resolve():\n c = []\n for _ in range(3):\n c.append(list(map(int, input().split())))\n a1 = 0\n b1 = c[0][0] - a1\n b2 = c[0][1] - a1\n b3 = c[0][2] - a1\n a2 = c[1][0] - b1\n a3 = c[2][0] - b1\n if a2 + b2 == c[1][1] and a2 + b3 == c[1][2] and a3 + b2 == c[2][1\n ] and a3 + b3 == c[2][2]:\n print('Yes')\n else:\n print('No')\n\n\nif __name__ == '__main__':\n resolve()\n",
"step-4": "import sys\nfrom io import StringIO\nimport unittest\n\n\nclass TestClass(unittest.TestCase):\n\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = '1 0 1\\n2 1 2\\n1 0 1'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = '2 2 2\\n2 1 2\\n2 2 2'\n output = 'No'\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = '0 8 8\\n0 8 8\\n0 8 8'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_4(self):\n input = '1 8 6\\n2 9 7\\n0 7 7'\n output = 'No'\n self.assertIO(input, output)\n\n\ndef resolve():\n c = []\n for _ in range(3):\n c.append(list(map(int, input().split())))\n a1 = 0\n b1 = c[0][0] - a1\n b2 = c[0][1] - a1\n b3 = c[0][2] - a1\n a2 = c[1][0] - b1\n a3 = c[2][0] - b1\n if a2 + b2 == c[1][1] and a2 + b3 == c[1][2] and a3 + b2 == c[2][1\n ] and a3 + b3 == c[2][2]:\n print('Yes')\n else:\n print('No')\n\n\nif __name__ == '__main__':\n resolve()\n",
"step-5": "#\n# abc088 c\n#\nimport sys\nfrom io import StringIO\nimport unittest\n\n\nclass TestClass(unittest.TestCase):\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = \"\"\"1 0 1\n2 1 2\n1 0 1\"\"\"\n output = \"\"\"Yes\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = \"\"\"2 2 2\n2 1 2\n2 2 2\"\"\"\n output = \"\"\"No\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = \"\"\"0 8 8\n0 8 8\n0 8 8\"\"\"\n output = \"\"\"Yes\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_4(self):\n input = \"\"\"1 8 6\n2 9 7\n0 7 7\"\"\"\n output = \"\"\"No\"\"\"\n self.assertIO(input, output)\n\n\ndef resolve():\n c = []\n for _ in range(3):\n c.append(list(map(int, input().split())))\n\n a1 = 0\n b1 = c[0][0] - a1\n b2 = c[0][1] - a1\n b3 = c[0][2] - a1\n a2 = c[1][0] - b1\n a3 = c[2][0] - b1\n\n if a2+b2 == c[1][1] and a2+b3 == c[1][2] and a3+b2 == c[2][1] and a3+b3 == c[2][2]:\n print(\"Yes\")\n else:\n print(\"No\")\n\n\nif __name__ == \"__main__\":\n # unittest.main()\n resolve()\n",
"step-ids": [
2,
6,
8,
9,
10
]
}
|
[
2,
6,
8,
9,
10
] |
from amqpstorm import management
if __name__ == '__main__':
# If using a self-signed certificate, change verify=True to point at your CA bundle.
# You can disable certificate verification for testing by passing in verify=False.
API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest',
'guest', verify=True)
try:
result = API.aliveness_test('/')
if result['status'] == 'ok':
print('RabbitMQ is alive!')
else:
print('RabbitMQ is not alive! :(')
except management.ApiConnectionError as why:
print('Connection Error: %s' % why)
except management.ApiError as why:
print('ApiError: %s' % why)
|
normal
|
{
"blob_id": "0279057b3962e4b9839a86fc2e2683ac1da11b1a",
"index": 8665,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n API = management.ManagementApi('https://rmq.amqpstorm.io:15671',\n 'guest', 'guest', verify=True)\n try:\n result = API.aliveness_test('/')\n if result['status'] == 'ok':\n print('RabbitMQ is alive!')\n else:\n print('RabbitMQ is not alive! :(')\n except management.ApiConnectionError as why:\n print('Connection Error: %s' % why)\n except management.ApiError as why:\n print('ApiError: %s' % why)\n",
"step-3": "from amqpstorm import management\nif __name__ == '__main__':\n API = management.ManagementApi('https://rmq.amqpstorm.io:15671',\n 'guest', 'guest', verify=True)\n try:\n result = API.aliveness_test('/')\n if result['status'] == 'ok':\n print('RabbitMQ is alive!')\n else:\n print('RabbitMQ is not alive! :(')\n except management.ApiConnectionError as why:\n print('Connection Error: %s' % why)\n except management.ApiError as why:\n print('ApiError: %s' % why)\n",
"step-4": "from amqpstorm import management\n\nif __name__ == '__main__':\n # If using a self-signed certificate, change verify=True to point at your CA bundle.\n # You can disable certificate verification for testing by passing in verify=False.\n API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest',\n 'guest', verify=True)\n try:\n result = API.aliveness_test('/')\n if result['status'] == 'ok':\n print('RabbitMQ is alive!')\n else:\n print('RabbitMQ is not alive! :(')\n except management.ApiConnectionError as why:\n print('Connection Error: %s' % why)\n except management.ApiError as why:\n print('ApiError: %s' % why)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import random
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelBinarizer
from ann.act import relu, softmax_with_xentropy
from ann.loss import xentropy_with_softmax
from ann.opt import SGD, RMSprop, Adam, SGDM
from ann.sklearn import NetworkClassifier, FC
# set seeds
random.seed(42)
np.random.seed(42)
# prepare data
mnist = fetch_mldata('MNIST original')
x = mnist.data
y = LabelBinarizer().fit_transform(mnist.target.astype(int))
x_train, x_dev, y_train, y_dev = train_test_split(x, y, test_size=1000, stratify=y)
# normalize input
scaler = StandardScaler(copy=False)
x_train = scaler.fit_transform(x_train)
x_dev = scaler.transform(x_dev)
# define configurations
configs = []
net1 = NetworkClassifier(layers=[
FC(n_in=x.shape[1], n_out=256, act=relu),
FC(n_in=256, n_out=10, act=softmax_with_xentropy)
])
opt1 = SGD(loss=xentropy_with_softmax, lr=0.001, batch_size=64)
configs.append(("SGD", net1, opt1))
net2 = net1.clone()
opt2 = SGDM(loss=xentropy_with_softmax, lr=0.001, batch_size=64)
configs.append(("SGDM", net2, opt2))
net3 = net1.clone()
opt3 = RMSprop(loss=xentropy_with_softmax, lr=1e-8, batch_size=64)
configs.append(("RMSprop", net3, opt3))
net4 = net1.clone()
opt4 = Adam(loss=xentropy_with_softmax, lr=1e-8, batch_size=64)
configs.append(("Adam", net4, opt4))
# --- you can add other configurations here ---
# define training procedure
epochs = 10
early_stop_patience = 200
# train networks
results = []
for _, net, opt in configs:
res = opt.optimize(net, x_train, y_train, epochs, x_dev=x_dev, y_dev=y_dev, track_loss=True,
early_stop_pat=early_stop_patience, verbose=1)
results.append(res)
def plot(ax, ls_batch, ls_dev, its, title):
ax.plot(range(len(ls_batch)), ls_batch, label="Batch")
ax.plot(range(len(ls_dev)), ls_dev, label="Dev")
ax.text(0.3, 0.93, "Batch: {:.3f}".format(ls_batch[-1]), transform=ax.transAxes)
ax.text(0.3, 0.86, "Dev: {:.3f}".format(ls_dev[-1]), transform=ax.transAxes)
ax.text(0.3, 0.79, "Its: {}".format(its), transform=ax.transAxes)
ax.set_xlabel("Iterations")
ax.set_ylabel("Loss")
ax.set_title(title)
ax.legend(loc="upper right")
# plot results
rows = np.sqrt(len(configs)).astype(np.int)
cols = np.ceil(len(configs) / rows).astype(np.int)
plt.figure(figsize=(4 * cols, 4 * rows))
last_ax = None
for i, ((title, net, opt), (ls_batch, ls_dev, its)) in enumerate(zip(configs, results)):
ax = plt.subplot(rows, cols, i + 1, sharex=last_ax, sharey=last_ax)
if len(ls_batch) > 0:
plot(ax, ls_batch, ls_dev, its, title)
else:
print("Warning: Config {} did not return any results".format(title))
last_ax = ax
plt.tight_layout()
plt.show()
|
normal
|
{
"blob_id": "2f6e5ed4e2d52190551dec2ac18441b8355699b5",
"index": 7096,
"step-1": "<mask token>\n\n\ndef plot(ax, ls_batch, ls_dev, its, title):\n ax.plot(range(len(ls_batch)), ls_batch, label='Batch')\n ax.plot(range(len(ls_dev)), ls_dev, label='Dev')\n ax.text(0.3, 0.93, 'Batch: {:.3f}'.format(ls_batch[-1]), transform=ax.\n transAxes)\n ax.text(0.3, 0.86, 'Dev: {:.3f}'.format(ls_dev[-1]), transform=ax.transAxes\n )\n ax.text(0.3, 0.79, 'Its: {}'.format(its), transform=ax.transAxes)\n ax.set_xlabel('Iterations')\n ax.set_ylabel('Loss')\n ax.set_title(title)\n ax.legend(loc='upper right')\n\n\n<mask token>\n",
"step-2": "<mask token>\nrandom.seed(42)\nnp.random.seed(42)\n<mask token>\nconfigs.append(('SGD', net1, opt1))\n<mask token>\nconfigs.append(('SGDM', net2, opt2))\n<mask token>\nconfigs.append(('RMSprop', net3, opt3))\n<mask token>\nconfigs.append(('Adam', net4, opt4))\n<mask token>\nfor _, net, opt in configs:\n res = opt.optimize(net, x_train, y_train, epochs, x_dev=x_dev, y_dev=\n y_dev, track_loss=True, early_stop_pat=early_stop_patience, verbose=1)\n results.append(res)\n\n\ndef plot(ax, ls_batch, ls_dev, its, title):\n ax.plot(range(len(ls_batch)), ls_batch, label='Batch')\n ax.plot(range(len(ls_dev)), ls_dev, label='Dev')\n ax.text(0.3, 0.93, 'Batch: {:.3f}'.format(ls_batch[-1]), transform=ax.\n transAxes)\n ax.text(0.3, 0.86, 'Dev: {:.3f}'.format(ls_dev[-1]), transform=ax.transAxes\n )\n ax.text(0.3, 0.79, 'Its: {}'.format(its), transform=ax.transAxes)\n ax.set_xlabel('Iterations')\n ax.set_ylabel('Loss')\n ax.set_title(title)\n ax.legend(loc='upper right')\n\n\n<mask token>\nplt.figure(figsize=(4 * cols, 4 * rows))\n<mask token>\nfor i, ((title, net, opt), (ls_batch, ls_dev, its)) in enumerate(zip(\n configs, results)):\n ax = plt.subplot(rows, cols, i + 1, sharex=last_ax, sharey=last_ax)\n if len(ls_batch) > 0:\n plot(ax, ls_batch, ls_dev, its, title)\n else:\n print('Warning: Config {} did not return any results'.format(title))\n last_ax = ax\nplt.tight_layout()\nplt.show()\n",
"step-3": "<mask token>\nrandom.seed(42)\nnp.random.seed(42)\nmnist = fetch_mldata('MNIST original')\nx = mnist.data\ny = LabelBinarizer().fit_transform(mnist.target.astype(int))\nx_train, x_dev, y_train, y_dev = train_test_split(x, y, test_size=1000,\n stratify=y)\nscaler = StandardScaler(copy=False)\nx_train = scaler.fit_transform(x_train)\nx_dev = scaler.transform(x_dev)\nconfigs = []\nnet1 = NetworkClassifier(layers=[FC(n_in=x.shape[1], n_out=256, act=relu),\n FC(n_in=256, n_out=10, act=softmax_with_xentropy)])\nopt1 = SGD(loss=xentropy_with_softmax, lr=0.001, batch_size=64)\nconfigs.append(('SGD', net1, opt1))\nnet2 = net1.clone()\nopt2 = SGDM(loss=xentropy_with_softmax, lr=0.001, batch_size=64)\nconfigs.append(('SGDM', net2, opt2))\nnet3 = net1.clone()\nopt3 = RMSprop(loss=xentropy_with_softmax, lr=1e-08, batch_size=64)\nconfigs.append(('RMSprop', net3, opt3))\nnet4 = net1.clone()\nopt4 = Adam(loss=xentropy_with_softmax, lr=1e-08, batch_size=64)\nconfigs.append(('Adam', net4, opt4))\nepochs = 10\nearly_stop_patience = 200\nresults = []\nfor _, net, opt in configs:\n res = opt.optimize(net, x_train, y_train, epochs, x_dev=x_dev, y_dev=\n y_dev, track_loss=True, early_stop_pat=early_stop_patience, verbose=1)\n results.append(res)\n\n\ndef plot(ax, ls_batch, ls_dev, its, title):\n ax.plot(range(len(ls_batch)), ls_batch, label='Batch')\n ax.plot(range(len(ls_dev)), ls_dev, label='Dev')\n ax.text(0.3, 0.93, 'Batch: {:.3f}'.format(ls_batch[-1]), transform=ax.\n transAxes)\n ax.text(0.3, 0.86, 'Dev: {:.3f}'.format(ls_dev[-1]), transform=ax.transAxes\n )\n ax.text(0.3, 0.79, 'Its: {}'.format(its), transform=ax.transAxes)\n ax.set_xlabel('Iterations')\n ax.set_ylabel('Loss')\n ax.set_title(title)\n ax.legend(loc='upper right')\n\n\nrows = np.sqrt(len(configs)).astype(np.int)\ncols = np.ceil(len(configs) / rows).astype(np.int)\nplt.figure(figsize=(4 * cols, 4 * rows))\nlast_ax = None\nfor i, ((title, net, opt), (ls_batch, ls_dev, its)) in enumerate(zip(\n configs, results)):\n ax = plt.subplot(rows, cols, i + 1, sharex=last_ax, sharey=last_ax)\n if len(ls_batch) > 0:\n plot(ax, ls_batch, ls_dev, its, title)\n else:\n print('Warning: Config {} did not return any results'.format(title))\n last_ax = ax\nplt.tight_layout()\nplt.show()\n",
"step-4": "import random\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.datasets import fetch_mldata\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, LabelBinarizer\nfrom ann.act import relu, softmax_with_xentropy\nfrom ann.loss import xentropy_with_softmax\nfrom ann.opt import SGD, RMSprop, Adam, SGDM\nfrom ann.sklearn import NetworkClassifier, FC\nrandom.seed(42)\nnp.random.seed(42)\nmnist = fetch_mldata('MNIST original')\nx = mnist.data\ny = LabelBinarizer().fit_transform(mnist.target.astype(int))\nx_train, x_dev, y_train, y_dev = train_test_split(x, y, test_size=1000,\n stratify=y)\nscaler = StandardScaler(copy=False)\nx_train = scaler.fit_transform(x_train)\nx_dev = scaler.transform(x_dev)\nconfigs = []\nnet1 = NetworkClassifier(layers=[FC(n_in=x.shape[1], n_out=256, act=relu),\n FC(n_in=256, n_out=10, act=softmax_with_xentropy)])\nopt1 = SGD(loss=xentropy_with_softmax, lr=0.001, batch_size=64)\nconfigs.append(('SGD', net1, opt1))\nnet2 = net1.clone()\nopt2 = SGDM(loss=xentropy_with_softmax, lr=0.001, batch_size=64)\nconfigs.append(('SGDM', net2, opt2))\nnet3 = net1.clone()\nopt3 = RMSprop(loss=xentropy_with_softmax, lr=1e-08, batch_size=64)\nconfigs.append(('RMSprop', net3, opt3))\nnet4 = net1.clone()\nopt4 = Adam(loss=xentropy_with_softmax, lr=1e-08, batch_size=64)\nconfigs.append(('Adam', net4, opt4))\nepochs = 10\nearly_stop_patience = 200\nresults = []\nfor _, net, opt in configs:\n res = opt.optimize(net, x_train, y_train, epochs, x_dev=x_dev, y_dev=\n y_dev, track_loss=True, early_stop_pat=early_stop_patience, verbose=1)\n results.append(res)\n\n\ndef plot(ax, ls_batch, ls_dev, its, title):\n ax.plot(range(len(ls_batch)), ls_batch, label='Batch')\n ax.plot(range(len(ls_dev)), ls_dev, label='Dev')\n ax.text(0.3, 0.93, 'Batch: {:.3f}'.format(ls_batch[-1]), transform=ax.\n transAxes)\n ax.text(0.3, 0.86, 'Dev: {:.3f}'.format(ls_dev[-1]), transform=ax.transAxes\n )\n ax.text(0.3, 0.79, 'Its: {}'.format(its), transform=ax.transAxes)\n ax.set_xlabel('Iterations')\n ax.set_ylabel('Loss')\n ax.set_title(title)\n ax.legend(loc='upper right')\n\n\nrows = np.sqrt(len(configs)).astype(np.int)\ncols = np.ceil(len(configs) / rows).astype(np.int)\nplt.figure(figsize=(4 * cols, 4 * rows))\nlast_ax = None\nfor i, ((title, net, opt), (ls_batch, ls_dev, its)) in enumerate(zip(\n configs, results)):\n ax = plt.subplot(rows, cols, i + 1, sharex=last_ax, sharey=last_ax)\n if len(ls_batch) > 0:\n plot(ax, ls_batch, ls_dev, its, title)\n else:\n print('Warning: Config {} did not return any results'.format(title))\n last_ax = ax\nplt.tight_layout()\nplt.show()\n",
"step-5": "import random\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.datasets import fetch_mldata\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, LabelBinarizer\n\nfrom ann.act import relu, softmax_with_xentropy\nfrom ann.loss import xentropy_with_softmax\nfrom ann.opt import SGD, RMSprop, Adam, SGDM\nfrom ann.sklearn import NetworkClassifier, FC\n\n# set seeds\nrandom.seed(42)\nnp.random.seed(42)\n\n# prepare data\nmnist = fetch_mldata('MNIST original')\nx = mnist.data\ny = LabelBinarizer().fit_transform(mnist.target.astype(int))\nx_train, x_dev, y_train, y_dev = train_test_split(x, y, test_size=1000, stratify=y)\n# normalize input\nscaler = StandardScaler(copy=False)\nx_train = scaler.fit_transform(x_train)\nx_dev = scaler.transform(x_dev)\n\n# define configurations\nconfigs = []\n\nnet1 = NetworkClassifier(layers=[\n\tFC(n_in=x.shape[1], n_out=256, act=relu),\n\tFC(n_in=256, n_out=10, act=softmax_with_xentropy)\n])\nopt1 = SGD(loss=xentropy_with_softmax, lr=0.001, batch_size=64)\nconfigs.append((\"SGD\", net1, opt1))\n\nnet2 = net1.clone()\nopt2 = SGDM(loss=xentropy_with_softmax, lr=0.001, batch_size=64)\nconfigs.append((\"SGDM\", net2, opt2))\n\nnet3 = net1.clone()\nopt3 = RMSprop(loss=xentropy_with_softmax, lr=1e-8, batch_size=64)\nconfigs.append((\"RMSprop\", net3, opt3))\n\nnet4 = net1.clone()\nopt4 = Adam(loss=xentropy_with_softmax, lr=1e-8, batch_size=64)\nconfigs.append((\"Adam\", net4, opt4))\n\n# --- you can add other configurations here ---\n\n# define training procedure\nepochs = 10\nearly_stop_patience = 200\n\n# train networks\nresults = []\nfor _, net, opt in configs:\n\tres = opt.optimize(net, x_train, y_train, epochs, x_dev=x_dev, y_dev=y_dev, track_loss=True,\n\t\t\t\t\t early_stop_pat=early_stop_patience, verbose=1)\n\tresults.append(res)\n\n\ndef plot(ax, ls_batch, ls_dev, its, title):\n\tax.plot(range(len(ls_batch)), ls_batch, label=\"Batch\")\n\tax.plot(range(len(ls_dev)), ls_dev, label=\"Dev\")\n\tax.text(0.3, 0.93, \"Batch: {:.3f}\".format(ls_batch[-1]), transform=ax.transAxes)\n\tax.text(0.3, 0.86, \"Dev: {:.3f}\".format(ls_dev[-1]), transform=ax.transAxes)\n\tax.text(0.3, 0.79, \"Its: {}\".format(its), transform=ax.transAxes)\n\tax.set_xlabel(\"Iterations\")\n\tax.set_ylabel(\"Loss\")\n\tax.set_title(title)\n\tax.legend(loc=\"upper right\")\n\n\n# plot results\nrows = np.sqrt(len(configs)).astype(np.int)\ncols = np.ceil(len(configs) / rows).astype(np.int)\nplt.figure(figsize=(4 * cols, 4 * rows))\nlast_ax = None\nfor i, ((title, net, opt), (ls_batch, ls_dev, its)) in enumerate(zip(configs, results)):\n\tax = plt.subplot(rows, cols, i + 1, sharex=last_ax, sharey=last_ax)\n\tif len(ls_batch) > 0:\n\t\tplot(ax, ls_batch, ls_dev, its, title)\n\telse:\n\t\tprint(\"Warning: Config {} did not return any results\".format(title))\n\tlast_ax = ax\nplt.tight_layout()\nplt.show()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
router.register('', views.RoomViewSet)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app_name = 'rooms'
router = DefaultRouter()
router.register('', views.RoomViewSet)
urlpatterns = router.urls
<|reserved_special_token_1|>
from django.urls import path
from rest_framework.routers import DefaultRouter
from . import views
app_name = 'rooms'
router = DefaultRouter()
router.register('', views.RoomViewSet)
urlpatterns = router.urls
<|reserved_special_token_1|>
from django.urls import path
from rest_framework.routers import DefaultRouter
from . import views
app_name = "rooms"
router = DefaultRouter()
router.register("", views.RoomViewSet)
urlpatterns = router.urls
#
# urlpatterns = [
# # path("list/", views.ListRoomsView.as_view()),
# # path("list/", views.rooms_view),
# path("list/",views.RoomsView.as_view()),
# path('<int:pk>/',views.RoomView.as_view()),
# path('search/',views.room_search)
# ]
|
flexible
|
{
"blob_id": "96708216c5ffa56a60475b295c21b18225e6eed9",
"index": 6056,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrouter.register('', views.RoomViewSet)\n<mask token>\n",
"step-3": "<mask token>\napp_name = 'rooms'\nrouter = DefaultRouter()\nrouter.register('', views.RoomViewSet)\nurlpatterns = router.urls\n",
"step-4": "from django.urls import path\nfrom rest_framework.routers import DefaultRouter\nfrom . import views\napp_name = 'rooms'\nrouter = DefaultRouter()\nrouter.register('', views.RoomViewSet)\nurlpatterns = router.urls\n",
"step-5": "from django.urls import path\nfrom rest_framework.routers import DefaultRouter\nfrom . import views\n\napp_name = \"rooms\"\nrouter = DefaultRouter()\nrouter.register(\"\", views.RoomViewSet)\n\nurlpatterns = router.urls\n#\n# urlpatterns = [\n# # path(\"list/\", views.ListRoomsView.as_view()),\n# # path(\"list/\", views.rooms_view),\n# path(\"list/\",views.RoomsView.as_view()),\n# path('<int:pk>/',views.RoomView.as_view()),\n# path('search/',views.room_search)\n# ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class YahooHelper:
<|reserved_special_token_0|>
def __init__(self):
"""
Default constructor which initiates object
"""
pass
<|reserved_special_token_0|>
def get_stock_data(symbol):
"""
Function to get stock data for current year by ticker symbol.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
:return: Stock data for current year
"""
start = date(date.today().year, 1, 1)
end = date.today()
data = pdr.get_data_yahoo(symbol, start=start, end=end)
data.columns = ['Highest price (USD)', 'Lowest price (USD)',
'Opening price (USD)', 'Closing price (USD)', 'Volume',
'Adjusted closing price (USD)']
return data
def export_data(self):
"""
Function to extract stock data to csv.
"""
with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:
self.data.to_csv('../data/yahoodata.csv', sep='\t', encoding=
'utf-8')
template = ('# TSLA Stocks over time \n' +
"""# ---------------------------------------------------------------------
"""
+
'# Export of stock data of "Tesla Inc." for current year. The dataset\n'
+
"""# consists of selected key stock exchange figures on a daily basis.
"""
+
'# The data can be recreated at any time with the "load_data.py"-script.\n'
+
"""# The data record contains one record sorted per trading day.
"""
+ '#\n' +
'# The data is restricted to the NASDAQ symbol "TSLA" which represents \n'
+
"""# the company Tesla Inc. The stock information was limited to the period
"""
+ '# from 1st January to the current day of the year. \n' +
'#\n' +
"""# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/
"""
+ '# December, 26, 2018, Marco Romanutti \n' + '#\n' + '#\n' +
'{}')
with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:
fp.write(template.format(self.data.to_csv(index=True, encoding=
'utf-8')))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class YahooHelper:
<|reserved_special_token_0|>
def __init__(self):
"""
Default constructor which initiates object
"""
pass
def get_data(self, symbol):
"""
Function to collect Twitter data.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
"""
self.data = self.get_stock_data(symbol)
def get_stock_data(symbol):
"""
Function to get stock data for current year by ticker symbol.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
:return: Stock data for current year
"""
start = date(date.today().year, 1, 1)
end = date.today()
data = pdr.get_data_yahoo(symbol, start=start, end=end)
data.columns = ['Highest price (USD)', 'Lowest price (USD)',
'Opening price (USD)', 'Closing price (USD)', 'Volume',
'Adjusted closing price (USD)']
return data
def export_data(self):
"""
Function to extract stock data to csv.
"""
with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:
self.data.to_csv('../data/yahoodata.csv', sep='\t', encoding=
'utf-8')
template = ('# TSLA Stocks over time \n' +
"""# ---------------------------------------------------------------------
"""
+
'# Export of stock data of "Tesla Inc." for current year. The dataset\n'
+
"""# consists of selected key stock exchange figures on a daily basis.
"""
+
'# The data can be recreated at any time with the "load_data.py"-script.\n'
+
"""# The data record contains one record sorted per trading day.
"""
+ '#\n' +
'# The data is restricted to the NASDAQ symbol "TSLA" which represents \n'
+
"""# the company Tesla Inc. The stock information was limited to the period
"""
+ '# from 1st January to the current day of the year. \n' +
'#\n' +
"""# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/
"""
+ '# December, 26, 2018, Marco Romanutti \n' + '#\n' + '#\n' +
'{}')
with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:
fp.write(template.format(self.data.to_csv(index=True, encoding=
'utf-8')))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class YahooHelper:
"""
Class to fetch Yahoo data
"""
def __init__(self):
"""
Default constructor which initiates object
"""
pass
def get_data(self, symbol):
"""
Function to collect Twitter data.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
"""
self.data = self.get_stock_data(symbol)
def get_stock_data(symbol):
"""
Function to get stock data for current year by ticker symbol.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
:return: Stock data for current year
"""
start = date(date.today().year, 1, 1)
end = date.today()
data = pdr.get_data_yahoo(symbol, start=start, end=end)
data.columns = ['Highest price (USD)', 'Lowest price (USD)',
'Opening price (USD)', 'Closing price (USD)', 'Volume',
'Adjusted closing price (USD)']
return data
def export_data(self):
"""
Function to extract stock data to csv.
"""
with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:
self.data.to_csv('../data/yahoodata.csv', sep='\t', encoding=
'utf-8')
template = ('# TSLA Stocks over time \n' +
"""# ---------------------------------------------------------------------
"""
+
'# Export of stock data of "Tesla Inc." for current year. The dataset\n'
+
"""# consists of selected key stock exchange figures on a daily basis.
"""
+
'# The data can be recreated at any time with the "load_data.py"-script.\n'
+
"""# The data record contains one record sorted per trading day.
"""
+ '#\n' +
'# The data is restricted to the NASDAQ symbol "TSLA" which represents \n'
+
"""# the company Tesla Inc. The stock information was limited to the period
"""
+ '# from 1st January to the current day of the year. \n' +
'#\n' +
"""# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/
"""
+ '# December, 26, 2018, Marco Romanutti \n' + '#\n' + '#\n' +
'{}')
with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:
fp.write(template.format(self.data.to_csv(index=True, encoding=
'utf-8')))
<|reserved_special_token_1|>
from pandas_datareader import data as pdr
from datetime import date
class YahooHelper:
"""
Class to fetch Yahoo data
"""
def __init__(self):
"""
Default constructor which initiates object
"""
pass
def get_data(self, symbol):
"""
Function to collect Twitter data.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
"""
self.data = self.get_stock_data(symbol)
def get_stock_data(symbol):
"""
Function to get stock data for current year by ticker symbol.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
:return: Stock data for current year
"""
start = date(date.today().year, 1, 1)
end = date.today()
data = pdr.get_data_yahoo(symbol, start=start, end=end)
data.columns = ['Highest price (USD)', 'Lowest price (USD)',
'Opening price (USD)', 'Closing price (USD)', 'Volume',
'Adjusted closing price (USD)']
return data
def export_data(self):
"""
Function to extract stock data to csv.
"""
with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:
self.data.to_csv('../data/yahoodata.csv', sep='\t', encoding=
'utf-8')
template = ('# TSLA Stocks over time \n' +
"""# ---------------------------------------------------------------------
"""
+
'# Export of stock data of "Tesla Inc." for current year. The dataset\n'
+
"""# consists of selected key stock exchange figures on a daily basis.
"""
+
'# The data can be recreated at any time with the "load_data.py"-script.\n'
+
"""# The data record contains one record sorted per trading day.
"""
+ '#\n' +
'# The data is restricted to the NASDAQ symbol "TSLA" which represents \n'
+
"""# the company Tesla Inc. The stock information was limited to the period
"""
+ '# from 1st January to the current day of the year. \n' +
'#\n' +
"""# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/
"""
+ '# December, 26, 2018, Marco Romanutti \n' + '#\n' + '#\n' +
'{}')
with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:
fp.write(template.format(self.data.to_csv(index=True, encoding=
'utf-8')))
<|reserved_special_token_1|>
from pandas_datareader import data as pdr
from datetime import date
class YahooHelper:
"""
Class to fetch Yahoo data
"""
def __init__(self):
"""
Default constructor which initiates object
"""
pass
def get_data(self, symbol):
"""
Function to collect Twitter data.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
"""
# Collect stock market data
self.data = self.get_stock_data(symbol)
# Symbol lookup:
def get_stock_data(symbol):
"""
Function to get stock data for current year by ticker symbol.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
:return: Stock data for current year
"""
# Set current dates
start = date(date.today().year, 1, 1) # first of current year
end = date.today() # today
# Get yahoo Yahoo data
data = pdr.get_data_yahoo(symbol, start=start, end=end)
# Rename columns
data.columns = ["Highest price (USD)",
"Lowest price (USD)",
"Opening price (USD)",
"Closing price (USD)",
"Volume",
"Adjusted closing price (USD)"]
return data
# Export data to csv
def export_data(self):
"""
Function to extract stock data to csv.
"""
with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:
self.data.to_csv('../data/yahoodata.csv', sep='\t', encoding='utf-8')
# Header information
template = "# TSLA Stocks over time \n" + \
"# --------------------------------------------------------------------- \n" + \
"# Export of stock data of \"Tesla Inc.\" for current year. The dataset\n" + \
"# consists of selected key stock exchange figures on a daily basis. \n" + \
"# The data can be recreated at any time with the \"load_data.py\"-script.\n" + \
"# The data record contains one record sorted per trading day. \n" + \
"#\n" + \
"# The data is restricted to the NASDAQ symbol \"TSLA\" which represents \n" + \
"# the company Tesla Inc. The stock information was limited to the period \n" + \
"# from 1st January to the current day of the year. \n" + \
"#\n" + \
"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \n" + \
"# December, 26, 2018, Marco Romanutti \n" + \
"#\n" + \
"#\n" + \
"{}"""
with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:
fp.write(template.format(self.data.to_csv(index=True, encoding='utf-8')))
|
flexible
|
{
"blob_id": "b4b4dad5cf630dc1a627e323ea63577583d1e1c3",
"index": 1551,
"step-1": "<mask token>\n\n\nclass YahooHelper:\n <mask token>\n\n def __init__(self):\n \"\"\"\n Default constructor which initiates object\n \"\"\"\n pass\n <mask token>\n\n def get_stock_data(symbol):\n \"\"\"\n Function to get stock data for current year by ticker symbol.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n :return: Stock data for current year\n \"\"\"\n start = date(date.today().year, 1, 1)\n end = date.today()\n data = pdr.get_data_yahoo(symbol, start=start, end=end)\n data.columns = ['Highest price (USD)', 'Lowest price (USD)',\n 'Opening price (USD)', 'Closing price (USD)', 'Volume',\n 'Adjusted closing price (USD)']\n return data\n\n def export_data(self):\n \"\"\"\n Function to extract stock data to csv.\n \"\"\"\n with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:\n self.data.to_csv('../data/yahoodata.csv', sep='\\t', encoding=\n 'utf-8')\n template = ('# TSLA Stocks over time \\n' +\n \"\"\"# --------------------------------------------------------------------- \n\"\"\"\n +\n '# Export of stock data of \"Tesla Inc.\" for current year. The dataset\\n'\n +\n \"\"\"# consists of selected key stock exchange figures on a daily basis. \n\"\"\"\n +\n '# The data can be recreated at any time with the \"load_data.py\"-script.\\n'\n +\n \"\"\"# The data record contains one record sorted per trading day. \n\"\"\"\n + '#\\n' +\n '# The data is restricted to the NASDAQ symbol \"TSLA\" which represents \\n'\n +\n \"\"\"# the company Tesla Inc. The stock information was limited to the period \n\"\"\"\n + '# from 1st January to the current day of the year. \\n' +\n '#\\n' +\n \"\"\"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \n\"\"\"\n + '# December, 26, 2018, Marco Romanutti \\n' + '#\\n' + '#\\n' +\n '{}')\n with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:\n fp.write(template.format(self.data.to_csv(index=True, encoding=\n 'utf-8')))\n",
"step-2": "<mask token>\n\n\nclass YahooHelper:\n <mask token>\n\n def __init__(self):\n \"\"\"\n Default constructor which initiates object\n \"\"\"\n pass\n\n def get_data(self, symbol):\n \"\"\"\n Function to collect Twitter data.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n \"\"\"\n self.data = self.get_stock_data(symbol)\n\n def get_stock_data(symbol):\n \"\"\"\n Function to get stock data for current year by ticker symbol.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n :return: Stock data for current year\n \"\"\"\n start = date(date.today().year, 1, 1)\n end = date.today()\n data = pdr.get_data_yahoo(symbol, start=start, end=end)\n data.columns = ['Highest price (USD)', 'Lowest price (USD)',\n 'Opening price (USD)', 'Closing price (USD)', 'Volume',\n 'Adjusted closing price (USD)']\n return data\n\n def export_data(self):\n \"\"\"\n Function to extract stock data to csv.\n \"\"\"\n with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:\n self.data.to_csv('../data/yahoodata.csv', sep='\\t', encoding=\n 'utf-8')\n template = ('# TSLA Stocks over time \\n' +\n \"\"\"# --------------------------------------------------------------------- \n\"\"\"\n +\n '# Export of stock data of \"Tesla Inc.\" for current year. The dataset\\n'\n +\n \"\"\"# consists of selected key stock exchange figures on a daily basis. \n\"\"\"\n +\n '# The data can be recreated at any time with the \"load_data.py\"-script.\\n'\n +\n \"\"\"# The data record contains one record sorted per trading day. \n\"\"\"\n + '#\\n' +\n '# The data is restricted to the NASDAQ symbol \"TSLA\" which represents \\n'\n +\n \"\"\"# the company Tesla Inc. The stock information was limited to the period \n\"\"\"\n + '# from 1st January to the current day of the year. \\n' +\n '#\\n' +\n \"\"\"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \n\"\"\"\n + '# December, 26, 2018, Marco Romanutti \\n' + '#\\n' + '#\\n' +\n '{}')\n with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:\n fp.write(template.format(self.data.to_csv(index=True, encoding=\n 'utf-8')))\n",
"step-3": "<mask token>\n\n\nclass YahooHelper:\n \"\"\"\n Class to fetch Yahoo data\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Default constructor which initiates object\n \"\"\"\n pass\n\n def get_data(self, symbol):\n \"\"\"\n Function to collect Twitter data.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n \"\"\"\n self.data = self.get_stock_data(symbol)\n\n def get_stock_data(symbol):\n \"\"\"\n Function to get stock data for current year by ticker symbol.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n :return: Stock data for current year\n \"\"\"\n start = date(date.today().year, 1, 1)\n end = date.today()\n data = pdr.get_data_yahoo(symbol, start=start, end=end)\n data.columns = ['Highest price (USD)', 'Lowest price (USD)',\n 'Opening price (USD)', 'Closing price (USD)', 'Volume',\n 'Adjusted closing price (USD)']\n return data\n\n def export_data(self):\n \"\"\"\n Function to extract stock data to csv.\n \"\"\"\n with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:\n self.data.to_csv('../data/yahoodata.csv', sep='\\t', encoding=\n 'utf-8')\n template = ('# TSLA Stocks over time \\n' +\n \"\"\"# --------------------------------------------------------------------- \n\"\"\"\n +\n '# Export of stock data of \"Tesla Inc.\" for current year. The dataset\\n'\n +\n \"\"\"# consists of selected key stock exchange figures on a daily basis. \n\"\"\"\n +\n '# The data can be recreated at any time with the \"load_data.py\"-script.\\n'\n +\n \"\"\"# The data record contains one record sorted per trading day. \n\"\"\"\n + '#\\n' +\n '# The data is restricted to the NASDAQ symbol \"TSLA\" which represents \\n'\n +\n \"\"\"# the company Tesla Inc. The stock information was limited to the period \n\"\"\"\n + '# from 1st January to the current day of the year. \\n' +\n '#\\n' +\n \"\"\"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \n\"\"\"\n + '# December, 26, 2018, Marco Romanutti \\n' + '#\\n' + '#\\n' +\n '{}')\n with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:\n fp.write(template.format(self.data.to_csv(index=True, encoding=\n 'utf-8')))\n",
"step-4": "from pandas_datareader import data as pdr\nfrom datetime import date\n\n\nclass YahooHelper:\n \"\"\"\n Class to fetch Yahoo data\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Default constructor which initiates object\n \"\"\"\n pass\n\n def get_data(self, symbol):\n \"\"\"\n Function to collect Twitter data.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n \"\"\"\n self.data = self.get_stock_data(symbol)\n\n def get_stock_data(symbol):\n \"\"\"\n Function to get stock data for current year by ticker symbol.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n :return: Stock data for current year\n \"\"\"\n start = date(date.today().year, 1, 1)\n end = date.today()\n data = pdr.get_data_yahoo(symbol, start=start, end=end)\n data.columns = ['Highest price (USD)', 'Lowest price (USD)',\n 'Opening price (USD)', 'Closing price (USD)', 'Volume',\n 'Adjusted closing price (USD)']\n return data\n\n def export_data(self):\n \"\"\"\n Function to extract stock data to csv.\n \"\"\"\n with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:\n self.data.to_csv('../data/yahoodata.csv', sep='\\t', encoding=\n 'utf-8')\n template = ('# TSLA Stocks over time \\n' +\n \"\"\"# --------------------------------------------------------------------- \n\"\"\"\n +\n '# Export of stock data of \"Tesla Inc.\" for current year. The dataset\\n'\n +\n \"\"\"# consists of selected key stock exchange figures on a daily basis. \n\"\"\"\n +\n '# The data can be recreated at any time with the \"load_data.py\"-script.\\n'\n +\n \"\"\"# The data record contains one record sorted per trading day. \n\"\"\"\n + '#\\n' +\n '# The data is restricted to the NASDAQ symbol \"TSLA\" which represents \\n'\n +\n \"\"\"# the company Tesla Inc. The stock information was limited to the period \n\"\"\"\n + '# from 1st January to the current day of the year. \\n' +\n '#\\n' +\n \"\"\"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \n\"\"\"\n + '# December, 26, 2018, Marco Romanutti \\n' + '#\\n' + '#\\n' +\n '{}')\n with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:\n fp.write(template.format(self.data.to_csv(index=True, encoding=\n 'utf-8')))\n",
"step-5": "from pandas_datareader import data as pdr\nfrom datetime import date\n\n\nclass YahooHelper:\n \"\"\"\n Class to fetch Yahoo data\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Default constructor which initiates object\n \"\"\"\n pass\n\n def get_data(self, symbol):\n \"\"\"\n Function to collect Twitter data.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n \"\"\"\n # Collect stock market data\n self.data = self.get_stock_data(symbol)\n\n # Symbol lookup:\n def get_stock_data(symbol):\n \"\"\"\n Function to get stock data for current year by ticker symbol.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n :return: Stock data for current year\n \"\"\"\n # Set current dates\n start = date(date.today().year, 1, 1) # first of current year\n end = date.today() # today\n\n # Get yahoo Yahoo data\n data = pdr.get_data_yahoo(symbol, start=start, end=end)\n\n # Rename columns\n data.columns = [\"Highest price (USD)\",\n \"Lowest price (USD)\",\n \"Opening price (USD)\",\n \"Closing price (USD)\",\n \"Volume\",\n \"Adjusted closing price (USD)\"]\n\n return data\n\n # Export data to csv\n def export_data(self):\n \"\"\"\n Function to extract stock data to csv.\n \"\"\"\n with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:\n self.data.to_csv('../data/yahoodata.csv', sep='\\t', encoding='utf-8')\n # Header information\n template = \"# TSLA Stocks over time \\n\" + \\\n \"# --------------------------------------------------------------------- \\n\" + \\\n \"# Export of stock data of \\\"Tesla Inc.\\\" for current year. The dataset\\n\" + \\\n \"# consists of selected key stock exchange figures on a daily basis. \\n\" + \\\n \"# The data can be recreated at any time with the \\\"load_data.py\\\"-script.\\n\" + \\\n \"# The data record contains one record sorted per trading day. \\n\" + \\\n \"#\\n\" + \\\n \"# The data is restricted to the NASDAQ symbol \\\"TSLA\\\" which represents \\n\" + \\\n \"# the company Tesla Inc. The stock information was limited to the period \\n\" + \\\n \"# from 1st January to the current day of the year. \\n\" + \\\n \"#\\n\" + \\\n \"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \\n\" + \\\n \"# December, 26, 2018, Marco Romanutti \\n\" + \\\n \"#\\n\" + \\\n \"#\\n\" + \\\n \"{}\"\"\"\n\n with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:\n fp.write(template.format(self.data.to_csv(index=True, encoding='utf-8')))\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def save_location(ip_addr):
try:
existing_location = Location.query.filter_by(ip=ip_addr).first()
if existing_location:
location_data = existing_location.location
else:
location_data = get_location(ip_addr=ip_addr)
location = Location(ip=ip_addr, location=location_data)
save_changes(location)
except Exception as e:
if 'UNIQUE constraint failed: location.ip' not in str(e):
response_object = {'status': 'fail', 'message': e}
return response_object, 400
response_object = {'status': 'success', 'message':
'Successfully saved location.', 'location': location_data}
return response_object, 200
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save_location(ip_addr):
try:
existing_location = Location.query.filter_by(ip=ip_addr).first()
if existing_location:
location_data = existing_location.location
else:
location_data = get_location(ip_addr=ip_addr)
location = Location(ip=ip_addr, location=location_data)
save_changes(location)
except Exception as e:
if 'UNIQUE constraint failed: location.ip' not in str(e):
response_object = {'status': 'fail', 'message': e}
return response_object, 400
response_object = {'status': 'success', 'message':
'Successfully saved location.', 'location': location_data}
return response_object, 200
def get_location(ip_addr):
r = requests.get('http://api.ipstack.com/{ip}?access_key={key}'.format(
ip=ip_addr, key=key))
return r.text
<|reserved_special_token_1|>
<|reserved_special_token_0|>
key = 'a544aecdde85a1f52a56292f77ecde6e'
def save_location(ip_addr):
try:
existing_location = Location.query.filter_by(ip=ip_addr).first()
if existing_location:
location_data = existing_location.location
else:
location_data = get_location(ip_addr=ip_addr)
location = Location(ip=ip_addr, location=location_data)
save_changes(location)
except Exception as e:
if 'UNIQUE constraint failed: location.ip' not in str(e):
response_object = {'status': 'fail', 'message': e}
return response_object, 400
response_object = {'status': 'success', 'message':
'Successfully saved location.', 'location': location_data}
return response_object, 200
def get_location(ip_addr):
r = requests.get('http://api.ipstack.com/{ip}?access_key={key}'.format(
ip=ip_addr, key=key))
return r.text
<|reserved_special_token_1|>
import requests
from app.main.model.location import Location
from app.main.util.db_util import save_changes
key = 'a544aecdde85a1f52a56292f77ecde6e'
def save_location(ip_addr):
try:
existing_location = Location.query.filter_by(ip=ip_addr).first()
if existing_location:
location_data = existing_location.location
else:
location_data = get_location(ip_addr=ip_addr)
location = Location(ip=ip_addr, location=location_data)
save_changes(location)
except Exception as e:
if 'UNIQUE constraint failed: location.ip' not in str(e):
response_object = {'status': 'fail', 'message': e}
return response_object, 400
response_object = {'status': 'success', 'message':
'Successfully saved location.', 'location': location_data}
return response_object, 200
def get_location(ip_addr):
r = requests.get('http://api.ipstack.com/{ip}?access_key={key}'.format(
ip=ip_addr, key=key))
return r.text
|
flexible
|
{
"blob_id": "eb8aec947cc1eeeb56b3884286b46ec7468dcc23",
"index": 9035,
"step-1": "<mask token>\n\n\ndef save_location(ip_addr):\n try:\n existing_location = Location.query.filter_by(ip=ip_addr).first()\n if existing_location:\n location_data = existing_location.location\n else:\n location_data = get_location(ip_addr=ip_addr)\n location = Location(ip=ip_addr, location=location_data)\n save_changes(location)\n except Exception as e:\n if 'UNIQUE constraint failed: location.ip' not in str(e):\n response_object = {'status': 'fail', 'message': e}\n return response_object, 400\n response_object = {'status': 'success', 'message':\n 'Successfully saved location.', 'location': location_data}\n return response_object, 200\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef save_location(ip_addr):\n try:\n existing_location = Location.query.filter_by(ip=ip_addr).first()\n if existing_location:\n location_data = existing_location.location\n else:\n location_data = get_location(ip_addr=ip_addr)\n location = Location(ip=ip_addr, location=location_data)\n save_changes(location)\n except Exception as e:\n if 'UNIQUE constraint failed: location.ip' not in str(e):\n response_object = {'status': 'fail', 'message': e}\n return response_object, 400\n response_object = {'status': 'success', 'message':\n 'Successfully saved location.', 'location': location_data}\n return response_object, 200\n\n\ndef get_location(ip_addr):\n r = requests.get('http://api.ipstack.com/{ip}?access_key={key}'.format(\n ip=ip_addr, key=key))\n return r.text\n",
"step-3": "<mask token>\nkey = 'a544aecdde85a1f52a56292f77ecde6e'\n\n\ndef save_location(ip_addr):\n try:\n existing_location = Location.query.filter_by(ip=ip_addr).first()\n if existing_location:\n location_data = existing_location.location\n else:\n location_data = get_location(ip_addr=ip_addr)\n location = Location(ip=ip_addr, location=location_data)\n save_changes(location)\n except Exception as e:\n if 'UNIQUE constraint failed: location.ip' not in str(e):\n response_object = {'status': 'fail', 'message': e}\n return response_object, 400\n response_object = {'status': 'success', 'message':\n 'Successfully saved location.', 'location': location_data}\n return response_object, 200\n\n\ndef get_location(ip_addr):\n r = requests.get('http://api.ipstack.com/{ip}?access_key={key}'.format(\n ip=ip_addr, key=key))\n return r.text\n",
"step-4": "import requests\nfrom app.main.model.location import Location\nfrom app.main.util.db_util import save_changes\nkey = 'a544aecdde85a1f52a56292f77ecde6e'\n\n\ndef save_location(ip_addr):\n try:\n existing_location = Location.query.filter_by(ip=ip_addr).first()\n if existing_location:\n location_data = existing_location.location\n else:\n location_data = get_location(ip_addr=ip_addr)\n location = Location(ip=ip_addr, location=location_data)\n save_changes(location)\n except Exception as e:\n if 'UNIQUE constraint failed: location.ip' not in str(e):\n response_object = {'status': 'fail', 'message': e}\n return response_object, 400\n response_object = {'status': 'success', 'message':\n 'Successfully saved location.', 'location': location_data}\n return response_object, 200\n\n\ndef get_location(ip_addr):\n r = requests.get('http://api.ipstack.com/{ip}?access_key={key}'.format(\n ip=ip_addr, key=key))\n return r.text\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from pythongame.core.buff_effects import get_buff_effect, register_buff_effect, StatModifyingBuffEffect
from pythongame.core.common import ItemType, Sprite, BuffType, Millis, HeroStat
from pythongame.core.game_data import UiIconSprite, register_buff_text
from pythongame.core.game_state import Event, PlayerDamagedEnemy, GameState
from pythongame.core.item_effects import AbstractItemEffect
from pythongame.core.item_inventory import ItemEquipmentCategory
from pythongame.game_data.items.register_items_util import register_custom_effect_item
BUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND
HEALTH_REGEN_BONUS = 1
BUFF_DURATION = Millis(5000)
class ItemEffect(AbstractItemEffect):
def item_handle_event(self, event: Event, game_state: GameState):
if isinstance(event, PlayerDamagedEnemy):
game_state.player_state.gain_buff_effect(get_buff_effect(BUFF_TYPE), BUFF_DURATION)
class BuffedByHealingWand(StatModifyingBuffEffect):
def __init__(self):
super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS})
def register_healing_wand_item():
item_type = ItemType.HEALING_WAND
register_custom_effect_item(
item_type=item_type,
item_level=4,
ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND,
sprite=Sprite.ITEM_HEALING_WAND,
image_file_path="resources/graphics/item_healing_wand.png",
item_equipment_category=ItemEquipmentCategory.MAIN_HAND,
name="Healing wand",
custom_description=["When you damage an enemy, gain +" + str(HEALTH_REGEN_BONUS) + " health regen for " +
"{:.0f}".format(BUFF_DURATION / 1000) + "s"],
stat_modifier_intervals=[],
custom_effect=ItemEffect()
)
register_buff_effect(BUFF_TYPE, BuffedByHealingWand)
register_buff_text(BUFF_TYPE, "Healing wand")
|
normal
|
{
"blob_id": "61454a3d6b5b17bff871ededc6ddfe8384043884",
"index": 59,
"step-1": "<mask token>\n\n\nclass ItemEffect(AbstractItemEffect):\n <mask token>\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ItemEffect(AbstractItemEffect):\n\n def item_handle_event(self, event: Event, game_state: GameState):\n if isinstance(event, PlayerDamagedEnemy):\n game_state.player_state.gain_buff_effect(get_buff_effect(\n BUFF_TYPE), BUFF_DURATION)\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\nBUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND\nHEALTH_REGEN_BONUS = 1\nBUFF_DURATION = Millis(5000)\n\n\nclass ItemEffect(AbstractItemEffect):\n\n def item_handle_event(self, event: Event, game_state: GameState):\n if isinstance(event, PlayerDamagedEnemy):\n game_state.player_state.gain_buff_effect(get_buff_effect(\n BUFF_TYPE), BUFF_DURATION)\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\ndef register_healing_wand_item():\n item_type = ItemType.HEALING_WAND\n register_custom_effect_item(item_type=item_type, item_level=4,\n ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND, sprite=Sprite.\n ITEM_HEALING_WAND, image_file_path=\n 'resources/graphics/item_healing_wand.png', item_equipment_category\n =ItemEquipmentCategory.MAIN_HAND, name='Healing wand',\n custom_description=['When you damage an enemy, gain +' + str(\n HEALTH_REGEN_BONUS) + ' health regen for ' + '{:.0f}'.format(\n BUFF_DURATION / 1000) + 's'], stat_modifier_intervals=[],\n custom_effect=ItemEffect())\n register_buff_effect(BUFF_TYPE, BuffedByHealingWand)\n register_buff_text(BUFF_TYPE, 'Healing wand')\n",
"step-4": "from pythongame.core.buff_effects import get_buff_effect, register_buff_effect, StatModifyingBuffEffect\nfrom pythongame.core.common import ItemType, Sprite, BuffType, Millis, HeroStat\nfrom pythongame.core.game_data import UiIconSprite, register_buff_text\nfrom pythongame.core.game_state import Event, PlayerDamagedEnemy, GameState\nfrom pythongame.core.item_effects import AbstractItemEffect\nfrom pythongame.core.item_inventory import ItemEquipmentCategory\nfrom pythongame.game_data.items.register_items_util import register_custom_effect_item\nBUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND\nHEALTH_REGEN_BONUS = 1\nBUFF_DURATION = Millis(5000)\n\n\nclass ItemEffect(AbstractItemEffect):\n\n def item_handle_event(self, event: Event, game_state: GameState):\n if isinstance(event, PlayerDamagedEnemy):\n game_state.player_state.gain_buff_effect(get_buff_effect(\n BUFF_TYPE), BUFF_DURATION)\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\ndef register_healing_wand_item():\n item_type = ItemType.HEALING_WAND\n register_custom_effect_item(item_type=item_type, item_level=4,\n ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND, sprite=Sprite.\n ITEM_HEALING_WAND, image_file_path=\n 'resources/graphics/item_healing_wand.png', item_equipment_category\n =ItemEquipmentCategory.MAIN_HAND, name='Healing wand',\n custom_description=['When you damage an enemy, gain +' + str(\n HEALTH_REGEN_BONUS) + ' health regen for ' + '{:.0f}'.format(\n BUFF_DURATION / 1000) + 's'], stat_modifier_intervals=[],\n custom_effect=ItemEffect())\n register_buff_effect(BUFF_TYPE, BuffedByHealingWand)\n register_buff_text(BUFF_TYPE, 'Healing wand')\n",
"step-5": "from pythongame.core.buff_effects import get_buff_effect, register_buff_effect, StatModifyingBuffEffect\nfrom pythongame.core.common import ItemType, Sprite, BuffType, Millis, HeroStat\nfrom pythongame.core.game_data import UiIconSprite, register_buff_text\nfrom pythongame.core.game_state import Event, PlayerDamagedEnemy, GameState\nfrom pythongame.core.item_effects import AbstractItemEffect\nfrom pythongame.core.item_inventory import ItemEquipmentCategory\nfrom pythongame.game_data.items.register_items_util import register_custom_effect_item\n\nBUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND\nHEALTH_REGEN_BONUS = 1\nBUFF_DURATION = Millis(5000)\n\n\nclass ItemEffect(AbstractItemEffect):\n\n def item_handle_event(self, event: Event, game_state: GameState):\n if isinstance(event, PlayerDamagedEnemy):\n game_state.player_state.gain_buff_effect(get_buff_effect(BUFF_TYPE), BUFF_DURATION)\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS})\n\n\ndef register_healing_wand_item():\n item_type = ItemType.HEALING_WAND\n register_custom_effect_item(\n item_type=item_type,\n item_level=4,\n ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND,\n sprite=Sprite.ITEM_HEALING_WAND,\n image_file_path=\"resources/graphics/item_healing_wand.png\",\n item_equipment_category=ItemEquipmentCategory.MAIN_HAND,\n name=\"Healing wand\",\n custom_description=[\"When you damage an enemy, gain +\" + str(HEALTH_REGEN_BONUS) + \" health regen for \" +\n \"{:.0f}\".format(BUFF_DURATION / 1000) + \"s\"],\n stat_modifier_intervals=[],\n custom_effect=ItemEffect()\n )\n\n register_buff_effect(BUFF_TYPE, BuffedByHealingWand)\n register_buff_text(BUFF_TYPE, \"Healing wand\")\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
from django.shortcuts import render
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from django.conf import settings
import subprocess
import os
import json
class HookView(APIView):
def post(self, request, *args, **kwargs):
SCRIPT_PATH = os.path.join(settings.BASE_DIR, 'deploy/hooks.sh')
# payload from webhook
payload = json.loads(request.data['payload'])
ref = payload['ref']
if ref == 'refs/heads/deploy':
output = subprocess.run(['bash', SCRIPT_PATH]).stdout
return Response(status=status.HTTP_200_OK, data=output)
return Response(status=status.HTTP_400_BAD_REQUEST)
|
normal
|
{
"blob_id": "6f5bca8c1afcd9d9971a64300a576ca2b2f6ef70",
"index": 1694,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass HookView(APIView):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass HookView(APIView):\n\n def post(self, request, *args, **kwargs):\n SCRIPT_PATH = os.path.join(settings.BASE_DIR, 'deploy/hooks.sh')\n payload = json.loads(request.data['payload'])\n ref = payload['ref']\n if ref == 'refs/heads/deploy':\n output = subprocess.run(['bash', SCRIPT_PATH]).stdout\n return Response(status=status.HTTP_200_OK, data=output)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n",
"step-4": "from django.shortcuts import render\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.conf import settings\nimport subprocess\nimport os\nimport json\n\n\nclass HookView(APIView):\n\n def post(self, request, *args, **kwargs):\n SCRIPT_PATH = os.path.join(settings.BASE_DIR, 'deploy/hooks.sh')\n payload = json.loads(request.data['payload'])\n ref = payload['ref']\n if ref == 'refs/heads/deploy':\n output = subprocess.run(['bash', SCRIPT_PATH]).stdout\n return Response(status=status.HTTP_200_OK, data=output)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n",
"step-5": "from django.shortcuts import render\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.conf import settings\nimport subprocess\nimport os\nimport json\n\n\nclass HookView(APIView):\n def post(self, request, *args, **kwargs):\n SCRIPT_PATH = os.path.join(settings.BASE_DIR, 'deploy/hooks.sh')\n # payload from webhook\n payload = json.loads(request.data['payload'])\n ref = payload['ref']\n if ref == 'refs/heads/deploy':\n output = subprocess.run(['bash', SCRIPT_PATH]).stdout\n return Response(status=status.HTTP_200_OK, data=output)\n\n return Response(status=status.HTTP_400_BAD_REQUEST)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
cardlist = []
card = []
for j in range(1,5):
for k in range(1,14):
if j == 1:
cardlist.append(["S", "{}".format(k)])
elif j == 2:
cardlist.append(["H", "{}".format(k)])
elif j == 3:
cardlist.append(["C", "{}".format(k)])
elif j == 4:
cardlist.append(["D", "{}".format(k)])
num = int(input())
for i in range(num):
card.append(input().split())
for i in range(num):
cardlist.remove(card[i])
for i in range(52-num):
print("{0} {1}".format(cardlist[i][0], cardlist[i][1]))
|
normal
|
{
"blob_id": "937a101cf5c7e943fc62d18b77357eea151fdfaf",
"index": 7789,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor j in range(1, 5):\n for k in range(1, 14):\n if j == 1:\n cardlist.append(['S', '{}'.format(k)])\n elif j == 2:\n cardlist.append(['H', '{}'.format(k)])\n elif j == 3:\n cardlist.append(['C', '{}'.format(k)])\n elif j == 4:\n cardlist.append(['D', '{}'.format(k)])\n<mask token>\nfor i in range(num):\n card.append(input().split())\nfor i in range(num):\n cardlist.remove(card[i])\nfor i in range(52 - num):\n print('{0} {1}'.format(cardlist[i][0], cardlist[i][1]))\n",
"step-3": "cardlist = []\ncard = []\nfor j in range(1, 5):\n for k in range(1, 14):\n if j == 1:\n cardlist.append(['S', '{}'.format(k)])\n elif j == 2:\n cardlist.append(['H', '{}'.format(k)])\n elif j == 3:\n cardlist.append(['C', '{}'.format(k)])\n elif j == 4:\n cardlist.append(['D', '{}'.format(k)])\nnum = int(input())\nfor i in range(num):\n card.append(input().split())\nfor i in range(num):\n cardlist.remove(card[i])\nfor i in range(52 - num):\n print('{0} {1}'.format(cardlist[i][0], cardlist[i][1]))\n",
"step-4": "cardlist = []\ncard = []\n\nfor j in range(1,5):\n for k in range(1,14):\n if j == 1:\n cardlist.append([\"S\", \"{}\".format(k)])\n elif j == 2:\n cardlist.append([\"H\", \"{}\".format(k)])\n elif j == 3:\n cardlist.append([\"C\", \"{}\".format(k)])\n elif j == 4:\n cardlist.append([\"D\", \"{}\".format(k)])\n\nnum = int(input())\n\nfor i in range(num):\n card.append(input().split())\n\nfor i in range(num):\n cardlist.remove(card[i])\n\nfor i in range(52-num):\n print(\"{0} {1}\".format(cardlist[i][0], cardlist[i][1]))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Cell(Enum):
BasicRNN = 1
BasicLSTM = 2
LSTMCellPeephole = 3
GRU = 4
<|reserved_special_token_0|>
def normalize_data(df):
min_max_scaler = sklearn.preprocessing.MinMaxScaler()
df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))
df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))
df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))
df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-
1, 1))
df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape
(-1, 1))
return df
def load_data(stock, seq_len):
data_raw = stock.values
data = []
for index in range(len(data_raw) - seq_len):
data.append(data_raw[index:index + seq_len])
data = np.array(data)
valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.
shape[0]))
test_set_size = int(np.round(test_set_size_percentage / 100 * data.
shape[0]))
train_set_size = data.shape[0] - (valid_set_size + test_set_size)
x_train = data[:train_set_size, :-1, :]
y_train = data[:train_set_size, -1, :]
x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]
y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]
x_test = data[train_set_size + valid_set_size:, :-1, :]
y_test = data[train_set_size + valid_set_size:, -1, :]
return [x_train, y_train, x_valid, y_valid, x_test, y_test]
def show_predictions(ft, y_test_pred):
plt.figure(figsize=(15, 5))
plt.subplot(1, 1, 1)
plt.plot(np.arange(y_test.shape[0]), y_test[:, ft], color='black',
label='test target')
plt.plot(np.arange(y_test_pred.shape[0]), y_test_pred[:, ft], color=
'green', label='test prediction')
plt.title('future stock prices')
plt.xlabel('time [days]')
plt.ylabel('normalized price')
plt.legend(loc='best')
x = 0
error_percent = 5
for index in range(0, len(y_test)):
if abs(y_test_pred[:, ft][index] - y_test[:, ft][index]) / abs(y_test
[:, ft][index]) * 100 < error_percent:
x += 1
print('Percent of predictions which error is less then {}% = {}%'.
format(error_percent, x / len(y_test) * 100))
z = 0
distance = 10
for index in range(distance, len(y_test)):
if y_test[:, ft][index - distance] <= y_test[:, ft][index
] and y_test_pred[:, ft][index - distance] <= y_test_pred[:, ft][
index] or y_test[:, ft][index - distance] >= y_test[:, ft][index
] and y_test_pred[:, ft][index - distance] >= y_test_pred[:, ft][
index]:
z += 1
print('Percent of correct predicted direction = {}%'.format(z / len(
y_test) * 100))
plt.show()
<|reserved_special_token_0|>
def get_next_batch(batch_size):
global index_in_epoch, x_train, perm_array
start = index_in_epoch
index_in_epoch += batch_size
if index_in_epoch > x_train.shape[0]:
np.random.shuffle(perm_array)
start = 0
index_in_epoch = batch_size
end = index_in_epoch
return x_train[perm_array[start:end]], y_train[perm_array[start:end]]
<|reserved_special_token_0|>
def train_data(model_name):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
for iteration in range(int(n_epochs * train_set_size / batch_size)):
x_batch, y_batch = get_next_batch(batch_size)
sess.run(training_op, feed_dict={X: x_batch, y: y_batch})
if iteration % int(5 * train_set_size / batch_size) == 0:
mse_train = loss.eval(feed_dict={X: x_train, y: y_train})
mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})
print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (
iteration * batch_size / train_set_size, mse_train,
mse_valid))
saver.save(sess, 'train_models/' + model_name)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Cell(Enum):
BasicRNN = 1
BasicLSTM = 2
LSTMCellPeephole = 3
GRU = 4
<|reserved_special_token_0|>
def normalize_data(df):
min_max_scaler = sklearn.preprocessing.MinMaxScaler()
df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))
df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))
df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))
df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-
1, 1))
df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape
(-1, 1))
return df
def load_data(stock, seq_len):
data_raw = stock.values
data = []
for index in range(len(data_raw) - seq_len):
data.append(data_raw[index:index + seq_len])
data = np.array(data)
valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.
shape[0]))
test_set_size = int(np.round(test_set_size_percentage / 100 * data.
shape[0]))
train_set_size = data.shape[0] - (valid_set_size + test_set_size)
x_train = data[:train_set_size, :-1, :]
y_train = data[:train_set_size, -1, :]
x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]
y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]
x_test = data[train_set_size + valid_set_size:, :-1, :]
y_test = data[train_set_size + valid_set_size:, -1, :]
return [x_train, y_train, x_valid, y_valid, x_test, y_test]
def show_predictions(ft, y_test_pred):
plt.figure(figsize=(15, 5))
plt.subplot(1, 1, 1)
plt.plot(np.arange(y_test.shape[0]), y_test[:, ft], color='black',
label='test target')
plt.plot(np.arange(y_test_pred.shape[0]), y_test_pred[:, ft], color=
'green', label='test prediction')
plt.title('future stock prices')
plt.xlabel('time [days]')
plt.ylabel('normalized price')
plt.legend(loc='best')
x = 0
error_percent = 5
for index in range(0, len(y_test)):
if abs(y_test_pred[:, ft][index] - y_test[:, ft][index]) / abs(y_test
[:, ft][index]) * 100 < error_percent:
x += 1
print('Percent of predictions which error is less then {}% = {}%'.
format(error_percent, x / len(y_test) * 100))
z = 0
distance = 10
for index in range(distance, len(y_test)):
if y_test[:, ft][index - distance] <= y_test[:, ft][index
] and y_test_pred[:, ft][index - distance] <= y_test_pred[:, ft][
index] or y_test[:, ft][index - distance] >= y_test[:, ft][index
] and y_test_pred[:, ft][index - distance] >= y_test_pred[:, ft][
index]:
z += 1
print('Percent of correct predicted direction = {}%'.format(z / len(
y_test) * 100))
plt.show()
<|reserved_special_token_0|>
def get_next_batch(batch_size):
global index_in_epoch, x_train, perm_array
start = index_in_epoch
index_in_epoch += batch_size
if index_in_epoch > x_train.shape[0]:
np.random.shuffle(perm_array)
start = 0
index_in_epoch = batch_size
end = index_in_epoch
return x_train[perm_array[start:end]], y_train[perm_array[start:end]]
<|reserved_special_token_0|>
def train_data(model_name):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
for iteration in range(int(n_epochs * train_set_size / batch_size)):
x_batch, y_batch = get_next_batch(batch_size)
sess.run(training_op, feed_dict={X: x_batch, y: y_batch})
if iteration % int(5 * train_set_size / batch_size) == 0:
mse_train = loss.eval(feed_dict={X: x_train, y: y_train})
mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})
print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (
iteration * batch_size / train_set_size, mse_train,
mse_valid))
saver.save(sess, 'train_models/' + model_name)
def test(model_name):
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, 'train_models/' + model_name)
y_test_pred = sess.run(outputs, feed_dict={X: x_test})
show_predictions(1, y_test_pred)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Cell(Enum):
BasicRNN = 1
BasicLSTM = 2
LSTMCellPeephole = 3
GRU = 4
<|reserved_special_token_0|>
df.sort_values('Date')
def normalize_data(df):
min_max_scaler = sklearn.preprocessing.MinMaxScaler()
df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))
df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))
df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))
df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-
1, 1))
df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape
(-1, 1))
return df
def load_data(stock, seq_len):
data_raw = stock.values
data = []
for index in range(len(data_raw) - seq_len):
data.append(data_raw[index:index + seq_len])
data = np.array(data)
valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.
shape[0]))
test_set_size = int(np.round(test_set_size_percentage / 100 * data.
shape[0]))
train_set_size = data.shape[0] - (valid_set_size + test_set_size)
x_train = data[:train_set_size, :-1, :]
y_train = data[:train_set_size, -1, :]
x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]
y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]
x_test = data[train_set_size + valid_set_size:, :-1, :]
y_test = data[train_set_size + valid_set_size:, -1, :]
return [x_train, y_train, x_valid, y_valid, x_test, y_test]
def show_predictions(ft, y_test_pred):
plt.figure(figsize=(15, 5))
plt.subplot(1, 1, 1)
plt.plot(np.arange(y_test.shape[0]), y_test[:, ft], color='black',
label='test target')
plt.plot(np.arange(y_test_pred.shape[0]), y_test_pred[:, ft], color=
'green', label='test prediction')
plt.title('future stock prices')
plt.xlabel('time [days]')
plt.ylabel('normalized price')
plt.legend(loc='best')
x = 0
error_percent = 5
for index in range(0, len(y_test)):
if abs(y_test_pred[:, ft][index] - y_test[:, ft][index]) / abs(y_test
[:, ft][index]) * 100 < error_percent:
x += 1
print('Percent of predictions which error is less then {}% = {}%'.
format(error_percent, x / len(y_test) * 100))
z = 0
distance = 10
for index in range(distance, len(y_test)):
if y_test[:, ft][index - distance] <= y_test[:, ft][index
] and y_test_pred[:, ft][index - distance] <= y_test_pred[:, ft][
index] or y_test[:, ft][index - distance] >= y_test[:, ft][index
] and y_test_pred[:, ft][index - distance] >= y_test_pred[:, ft][
index]:
z += 1
print('Percent of correct predicted direction = {}%'.format(z / len(
y_test) * 100))
plt.show()
<|reserved_special_token_0|>
df_stock.drop(['Date'], 1, inplace=True)
<|reserved_special_token_0|>
np.random.shuffle(perm_array)
def get_next_batch(batch_size):
global index_in_epoch, x_train, perm_array
start = index_in_epoch
index_in_epoch += batch_size
if index_in_epoch > x_train.shape[0]:
np.random.shuffle(perm_array)
start = 0
index_in_epoch = batch_size
end = index_in_epoch
return x_train[perm_array[start:end]], y_train[perm_array[start:end]]
<|reserved_special_token_0|>
tf.reset_default_graph()
<|reserved_special_token_0|>
if CellType == Cell.BasicRNN:
layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=
tf.nn.elu) for layer in range(n_layers)]
elif CellType == Cell.BasicLSTM:
layers = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons, activation=
tf.nn.elu) for layer in range(n_layers)]
elif CellType == Cell.LSTMCellPeephole:
layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons, activation=tf.nn
.leaky_relu, use_peepholes=True) for layer in range(n_layers)]
elif CellType == Cell.GRU:
layers = [tf.contrib.rnn.GRUCell(num_units=n_neurons, activation=tf.nn.
leaky_relu) for layer in range(n_layers)]
<|reserved_special_token_0|>
def train_data(model_name):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
for iteration in range(int(n_epochs * train_set_size / batch_size)):
x_batch, y_batch = get_next_batch(batch_size)
sess.run(training_op, feed_dict={X: x_batch, y: y_batch})
if iteration % int(5 * train_set_size / batch_size) == 0:
mse_train = loss.eval(feed_dict={X: x_train, y: y_train})
mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})
print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (
iteration * batch_size / train_set_size, mse_train,
mse_valid))
saver.save(sess, 'train_models/' + model_name)
def test(model_name):
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, 'train_models/' + model_name)
y_test_pred = sess.run(outputs, feed_dict={X: x_test})
show_predictions(1, y_test_pred)
<|reserved_special_token_0|>
for i in y_test:
y_new.append(i[1] * 10000)
<|reserved_special_token_0|>
print(macd.calculate())
macd.validate()
<|reserved_special_token_0|>
print(tt.calculate())
plt.figure(figsize=(15, 5))
plt.subplot(1, 1, 1)
plt.plot(np.arange(len(y_new)), y_new, color='black', label='test target')
plt.plot(np.arange(len(macd.macd)), tt.calculate(), color='green', label=
'test prediction')
plt.plot(np.arange(len(macd.macd)), macd.macd_signal_line, color='red',
label='test prediction')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Cell(Enum):
BasicRNN = 1
BasicLSTM = 2
LSTMCellPeephole = 3
GRU = 4
valid_set_size_percentage = 10
test_set_size_percentage = 10
df = pd.read_csv('data_2019-01-06.csv')
df.sort_values('Date')
def normalize_data(df):
min_max_scaler = sklearn.preprocessing.MinMaxScaler()
df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))
df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))
df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))
df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-
1, 1))
df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape
(-1, 1))
return df
def load_data(stock, seq_len):
data_raw = stock.values
data = []
for index in range(len(data_raw) - seq_len):
data.append(data_raw[index:index + seq_len])
data = np.array(data)
valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.
shape[0]))
test_set_size = int(np.round(test_set_size_percentage / 100 * data.
shape[0]))
train_set_size = data.shape[0] - (valid_set_size + test_set_size)
x_train = data[:train_set_size, :-1, :]
y_train = data[:train_set_size, -1, :]
x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]
y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]
x_test = data[train_set_size + valid_set_size:, :-1, :]
y_test = data[train_set_size + valid_set_size:, -1, :]
return [x_train, y_train, x_valid, y_valid, x_test, y_test]
def show_predictions(ft, y_test_pred):
plt.figure(figsize=(15, 5))
plt.subplot(1, 1, 1)
plt.plot(np.arange(y_test.shape[0]), y_test[:, ft], color='black',
label='test target')
plt.plot(np.arange(y_test_pred.shape[0]), y_test_pred[:, ft], color=
'green', label='test prediction')
plt.title('future stock prices')
plt.xlabel('time [days]')
plt.ylabel('normalized price')
plt.legend(loc='best')
x = 0
error_percent = 5
for index in range(0, len(y_test)):
if abs(y_test_pred[:, ft][index] - y_test[:, ft][index]) / abs(y_test
[:, ft][index]) * 100 < error_percent:
x += 1
print('Percent of predictions which error is less then {}% = {}%'.
format(error_percent, x / len(y_test) * 100))
z = 0
distance = 10
for index in range(distance, len(y_test)):
if y_test[:, ft][index - distance] <= y_test[:, ft][index
] and y_test_pred[:, ft][index - distance] <= y_test_pred[:, ft][
index] or y_test[:, ft][index - distance] >= y_test[:, ft][index
] and y_test_pred[:, ft][index - distance] >= y_test_pred[:, ft][
index]:
z += 1
print('Percent of correct predicted direction = {}%'.format(z / len(
y_test) * 100))
plt.show()
df_stock = df.copy()
df_stock.drop(['Date'], 1, inplace=True)
cols = list(df_stock.columns.values)
df_stock_norm = df_stock.copy()
df_stock_norm = normalize_data(df_stock_norm)
seq_len = 50
x_train, y_train, x_valid, y_valid, x_test, y_test = load_data(df_stock_norm,
seq_len)
index_in_epoch = 0
perm_array = np.arange(x_train.shape[0])
np.random.shuffle(perm_array)
def get_next_batch(batch_size):
global index_in_epoch, x_train, perm_array
start = index_in_epoch
index_in_epoch += batch_size
if index_in_epoch > x_train.shape[0]:
np.random.shuffle(perm_array)
start = 0
index_in_epoch = batch_size
end = index_in_epoch
return x_train[perm_array[start:end]], y_train[perm_array[start:end]]
CellType = Cell.BasicRNN
n_steps = seq_len - 1
n_inputs = 5
n_neurons = 200
n_outputs = 5
n_layers = 2
learning_rate = 0.001
batch_size = 50
n_epochs = 10
train_set_size = x_train.shape[0]
test_set_size = x_test.shape[0]
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_outputs])
if CellType == Cell.BasicRNN:
layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=
tf.nn.elu) for layer in range(n_layers)]
elif CellType == Cell.BasicLSTM:
layers = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons, activation=
tf.nn.elu) for layer in range(n_layers)]
elif CellType == Cell.LSTMCellPeephole:
layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons, activation=tf.nn
.leaky_relu, use_peepholes=True) for layer in range(n_layers)]
elif CellType == Cell.GRU:
layers = [tf.contrib.rnn.GRUCell(num_units=n_neurons, activation=tf.nn.
leaky_relu) for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
outputs = outputs[:, n_steps - 1, :]
loss = tf.reduce_mean(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
def train_data(model_name):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
for iteration in range(int(n_epochs * train_set_size / batch_size)):
x_batch, y_batch = get_next_batch(batch_size)
sess.run(training_op, feed_dict={X: x_batch, y: y_batch})
if iteration % int(5 * train_set_size / batch_size) == 0:
mse_train = loss.eval(feed_dict={X: x_train, y: y_train})
mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})
print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (
iteration * batch_size / train_set_size, mse_train,
mse_valid))
saver.save(sess, 'train_models/' + model_name)
def test(model_name):
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, 'train_models/' + model_name)
y_test_pred = sess.run(outputs, feed_dict={X: x_test})
show_predictions(1, y_test_pred)
model = ['train_model', 'train_model_LSTM', 'train_model_with_batch_500',
'train_model_with_layers_4', 'train_model_with_volume',
'model_seq_len_100', 'model_GRU', 'model_LSTM_pipehole']
y_new = []
for i in y_test:
y_new.append(i[1] * 10000)
macd = trend.MovingAverageConvergenceDivergence(y_new)
print(macd.calculate())
macd.validate()
tt = trend.ExponentialMovingAverage(y_new, 10)
print(tt.calculate())
plt.figure(figsize=(15, 5))
plt.subplot(1, 1, 1)
plt.plot(np.arange(len(y_new)), y_new, color='black', label='test target')
plt.plot(np.arange(len(macd.macd)), tt.calculate(), color='green', label=
'test prediction')
plt.plot(np.arange(len(macd.macd)), macd.macd_signal_line, color='red',
label='test prediction')
plt.show()
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import sklearn
import sklearn.preprocessing
import matplotlib.pyplot as plt
import tensorflow as tf
from enum import Enum
from pytalib.indicators import trend
from pytalib.indicators import base
class Cell(Enum):
BasicRNN = 1
BasicLSTM = 2
LSTMCellPeephole = 3
GRU = 4
valid_set_size_percentage = 10
test_set_size_percentage = 10
df = pd.read_csv('data_2019-01-06.csv')
df.sort_values('Date')
# function for min-max normalization of stock
def normalize_data(df):
min_max_scaler = sklearn.preprocessing.MinMaxScaler()
df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))
df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))
df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))
df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-1, 1))
df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape(-1, 1))
return df
# function to create train, validation, test data given stock data and sequence length
def load_data(stock, seq_len):
data_raw = stock.values # convert to numpy array
data = []
# create all possible sequences of length seq_len
for index in range(len(data_raw) - seq_len):
data.append(data_raw[index: index + seq_len])
data = np.array(data)
valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.shape[0]))
test_set_size = int(np.round(test_set_size_percentage / 100 * data.shape[0]))
train_set_size = data.shape[0] - (valid_set_size + test_set_size)
x_train = data[:train_set_size, :-1, :]
y_train = data[:train_set_size, -1, :]
x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]
y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]
x_test = data[train_set_size + valid_set_size:, :-1, :]
y_test = data[train_set_size + valid_set_size:, -1, :]
return [x_train, y_train, x_valid, y_valid, x_test, y_test]
# show predictions: 0 = open, 1 = close, 2 = highest, 3 = lowest, 4 = volume
def show_predictions(ft, y_test_pred):
plt.figure(figsize=(15, 5))
plt.subplot(1, 1, 1)
plt.plot(np.arange(y_test.shape[0]),
y_test[:, ft], color='black', label='test target')
plt.plot(np.arange(y_test_pred.shape[0]),
y_test_pred[:, ft], color='green', label='test prediction')
plt.title('future stock prices')
plt.xlabel('time [days]')
plt.ylabel('normalized price')
plt.legend(loc='best')
x = 0
error_percent = 5
for index in range(0, len(y_test)):
if (abs((y_test_pred[:, ft][index] - y_test[:, ft][index])) / abs(y_test[:, ft][index]) * 100) < error_percent:
x += 1
print("Percent of predictions which error is less then {}% = {}%".format(error_percent, x / len(y_test) * 100))
# Calculating the direction between 2 points using true values and predicted values
z = 0
distance = 10
for index in range(distance, len(y_test)):
if (y_test[:, ft][index - distance] <= y_test[:, ft][index] and y_test_pred[:, ft][index - distance] <=
y_test_pred[:, ft]
[index]) or (
y_test[:, ft][index - distance] >= y_test[:, ft][index] and y_test_pred[:, ft][index - distance]
>= y_test_pred[:, ft][index]):
z += 1
print("Percent of correct predicted direction = {}%".format(z / len(y_test) * 100))
plt.show()
# choose one stock
df_stock = df.copy()
df_stock.drop(['Date'], 1, inplace=True)
cols = list(df_stock.columns.values)
# normalize stock
df_stock_norm = df_stock.copy()
df_stock_norm = normalize_data(df_stock_norm)
# create train, test data
seq_len = 50 # choose sequence length
x_train, y_train, x_valid, y_valid, x_test, y_test = load_data(df_stock_norm, seq_len)
index_in_epoch = 0
perm_array = np.arange(x_train.shape[0])
np.random.shuffle(perm_array)
# function to get the next batch
def get_next_batch(batch_size):
global index_in_epoch, x_train, perm_array
start = index_in_epoch
index_in_epoch += batch_size
if index_in_epoch > x_train.shape[0]:
np.random.shuffle(perm_array) # shuffle permutation array
start = 0 # start next epoch
index_in_epoch = batch_size
end = index_in_epoch
return x_train[perm_array[start:end]], y_train[perm_array[start:end]]
# parameters
CellType = Cell.BasicRNN
n_steps = seq_len - 1
n_inputs = 5
n_neurons = 200
n_outputs = 5
n_layers = 2
learning_rate = 0.001
batch_size = 50
n_epochs = 10
train_set_size = x_train.shape[0]
test_set_size = x_test.shape[0]
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_outputs])
if CellType == Cell.BasicRNN:
layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.elu)
for layer in range(n_layers)]
elif CellType == Cell.BasicLSTM:
layers = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons, activation=tf.nn.elu)
for layer in range(n_layers)]
elif CellType == Cell.LSTMCellPeephole:
layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons,
activation=tf.nn.leaky_relu, use_peepholes=True)
for layer in range(n_layers)]
elif CellType == Cell.GRU:
layers = [tf.contrib.rnn.GRUCell(num_units=n_neurons, activation=tf.nn.leaky_relu)
for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
outputs = outputs[:, n_steps - 1, :] # keep only last output of sequence
loss = tf.reduce_mean(tf.square(outputs - y)) # loss function = mean squared error
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
# run graph
def train_data(model_name):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
for iteration in range(int(n_epochs * train_set_size / batch_size)):
x_batch, y_batch = get_next_batch(batch_size) # fetch the next training batch
sess.run(training_op, feed_dict={X: x_batch, y: y_batch})
if iteration % int(5 * train_set_size / batch_size) == 0:
mse_train = loss.eval(feed_dict={X: x_train, y: y_train})
mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})
print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (
iteration * batch_size / train_set_size, mse_train, mse_valid))
saver.save(sess, 'train_models/' + model_name)
def test(model_name):
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, 'train_models/' + model_name)
y_test_pred = sess.run(outputs, feed_dict={X: x_test})
show_predictions(1, y_test_pred)
model = ['train_model', 'train_model_LSTM', 'train_model_with_batch_500', 'train_model_with_layers_4',
'train_model_with_volume', 'model_seq_len_100', "model_GRU", 'model_LSTM_pipehole']
# train_data(model[0])
# test(model[0])
y_new = []
for i in y_test:
y_new.append(i[1] * 10000)
macd = trend.MovingAverageConvergenceDivergence(y_new)
print(macd.calculate())
macd.validate()
tt = trend.ExponentialMovingAverage(y_new, 10)
print(tt.calculate())
plt.figure(figsize=(15, 5))
plt.subplot(1, 1, 1)
plt.plot(np.arange(len(y_new)), y_new, color='black', label='test target')
plt.plot(np.arange(len(macd.macd)), tt.calculate(), color='green', label='test prediction')
plt.plot(np.arange(len(macd.macd)), macd.macd_signal_line, color='red', label='test prediction')
plt.show()
|
flexible
|
{
"blob_id": "4379d89c2ada89822acbf523d2e364599f996f8c",
"index": 5456,
"step-1": "<mask token>\n\n\nclass Cell(Enum):\n BasicRNN = 1\n BasicLSTM = 2\n LSTMCellPeephole = 3\n GRU = 4\n\n\n<mask token>\n\n\ndef normalize_data(df):\n min_max_scaler = sklearn.preprocessing.MinMaxScaler()\n df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))\n df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))\n df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))\n df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-\n 1, 1))\n df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape\n (-1, 1))\n return df\n\n\ndef load_data(stock, seq_len):\n data_raw = stock.values\n data = []\n for index in range(len(data_raw) - seq_len):\n data.append(data_raw[index:index + seq_len])\n data = np.array(data)\n valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.\n shape[0]))\n test_set_size = int(np.round(test_set_size_percentage / 100 * data.\n shape[0]))\n train_set_size = data.shape[0] - (valid_set_size + test_set_size)\n x_train = data[:train_set_size, :-1, :]\n y_train = data[:train_set_size, -1, :]\n x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]\n y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]\n x_test = data[train_set_size + valid_set_size:, :-1, :]\n y_test = data[train_set_size + valid_set_size:, -1, :]\n return [x_train, y_train, x_valid, y_valid, x_test, y_test]\n\n\ndef show_predictions(ft, y_test_pred):\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 1, 1)\n plt.plot(np.arange(y_test.shape[0]), y_test[:, ft], color='black',\n label='test target')\n plt.plot(np.arange(y_test_pred.shape[0]), y_test_pred[:, ft], color=\n 'green', label='test prediction')\n plt.title('future stock prices')\n plt.xlabel('time [days]')\n plt.ylabel('normalized price')\n plt.legend(loc='best')\n x = 0\n error_percent = 5\n for index in range(0, len(y_test)):\n if abs(y_test_pred[:, ft][index] - y_test[:, ft][index]) / abs(y_test\n [:, ft][index]) * 100 < error_percent:\n x += 1\n print('Percent of predictions which error is less then {}% = {}%'.\n format(error_percent, x / len(y_test) * 100))\n z = 0\n distance = 10\n for index in range(distance, len(y_test)):\n if y_test[:, ft][index - distance] <= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] <= y_test_pred[:, ft][\n index] or y_test[:, ft][index - distance] >= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] >= y_test_pred[:, ft][\n index]:\n z += 1\n print('Percent of correct predicted direction = {}%'.format(z / len(\n y_test) * 100))\n plt.show()\n\n\n<mask token>\n\n\ndef get_next_batch(batch_size):\n global index_in_epoch, x_train, perm_array\n start = index_in_epoch\n index_in_epoch += batch_size\n if index_in_epoch > x_train.shape[0]:\n np.random.shuffle(perm_array)\n start = 0\n index_in_epoch = batch_size\n end = index_in_epoch\n return x_train[perm_array[start:end]], y_train[perm_array[start:end]]\n\n\n<mask token>\n\n\ndef train_data(model_name):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for iteration in range(int(n_epochs * train_set_size / batch_size)):\n x_batch, y_batch = get_next_batch(batch_size)\n sess.run(training_op, feed_dict={X: x_batch, y: y_batch})\n if iteration % int(5 * train_set_size / batch_size) == 0:\n mse_train = loss.eval(feed_dict={X: x_train, y: y_train})\n mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})\n print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (\n iteration * batch_size / train_set_size, mse_train,\n mse_valid))\n saver.save(sess, 'train_models/' + model_name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Cell(Enum):\n BasicRNN = 1\n BasicLSTM = 2\n LSTMCellPeephole = 3\n GRU = 4\n\n\n<mask token>\n\n\ndef normalize_data(df):\n min_max_scaler = sklearn.preprocessing.MinMaxScaler()\n df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))\n df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))\n df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))\n df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-\n 1, 1))\n df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape\n (-1, 1))\n return df\n\n\ndef load_data(stock, seq_len):\n data_raw = stock.values\n data = []\n for index in range(len(data_raw) - seq_len):\n data.append(data_raw[index:index + seq_len])\n data = np.array(data)\n valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.\n shape[0]))\n test_set_size = int(np.round(test_set_size_percentage / 100 * data.\n shape[0]))\n train_set_size = data.shape[0] - (valid_set_size + test_set_size)\n x_train = data[:train_set_size, :-1, :]\n y_train = data[:train_set_size, -1, :]\n x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]\n y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]\n x_test = data[train_set_size + valid_set_size:, :-1, :]\n y_test = data[train_set_size + valid_set_size:, -1, :]\n return [x_train, y_train, x_valid, y_valid, x_test, y_test]\n\n\ndef show_predictions(ft, y_test_pred):\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 1, 1)\n plt.plot(np.arange(y_test.shape[0]), y_test[:, ft], color='black',\n label='test target')\n plt.plot(np.arange(y_test_pred.shape[0]), y_test_pred[:, ft], color=\n 'green', label='test prediction')\n plt.title('future stock prices')\n plt.xlabel('time [days]')\n plt.ylabel('normalized price')\n plt.legend(loc='best')\n x = 0\n error_percent = 5\n for index in range(0, len(y_test)):\n if abs(y_test_pred[:, ft][index] - y_test[:, ft][index]) / abs(y_test\n [:, ft][index]) * 100 < error_percent:\n x += 1\n print('Percent of predictions which error is less then {}% = {}%'.\n format(error_percent, x / len(y_test) * 100))\n z = 0\n distance = 10\n for index in range(distance, len(y_test)):\n if y_test[:, ft][index - distance] <= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] <= y_test_pred[:, ft][\n index] or y_test[:, ft][index - distance] >= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] >= y_test_pred[:, ft][\n index]:\n z += 1\n print('Percent of correct predicted direction = {}%'.format(z / len(\n y_test) * 100))\n plt.show()\n\n\n<mask token>\n\n\ndef get_next_batch(batch_size):\n global index_in_epoch, x_train, perm_array\n start = index_in_epoch\n index_in_epoch += batch_size\n if index_in_epoch > x_train.shape[0]:\n np.random.shuffle(perm_array)\n start = 0\n index_in_epoch = batch_size\n end = index_in_epoch\n return x_train[perm_array[start:end]], y_train[perm_array[start:end]]\n\n\n<mask token>\n\n\ndef train_data(model_name):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for iteration in range(int(n_epochs * train_set_size / batch_size)):\n x_batch, y_batch = get_next_batch(batch_size)\n sess.run(training_op, feed_dict={X: x_batch, y: y_batch})\n if iteration % int(5 * train_set_size / batch_size) == 0:\n mse_train = loss.eval(feed_dict={X: x_train, y: y_train})\n mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})\n print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (\n iteration * batch_size / train_set_size, mse_train,\n mse_valid))\n saver.save(sess, 'train_models/' + model_name)\n\n\ndef test(model_name):\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, 'train_models/' + model_name)\n y_test_pred = sess.run(outputs, feed_dict={X: x_test})\n show_predictions(1, y_test_pred)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Cell(Enum):\n BasicRNN = 1\n BasicLSTM = 2\n LSTMCellPeephole = 3\n GRU = 4\n\n\n<mask token>\ndf.sort_values('Date')\n\n\ndef normalize_data(df):\n min_max_scaler = sklearn.preprocessing.MinMaxScaler()\n df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))\n df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))\n df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))\n df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-\n 1, 1))\n df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape\n (-1, 1))\n return df\n\n\ndef load_data(stock, seq_len):\n data_raw = stock.values\n data = []\n for index in range(len(data_raw) - seq_len):\n data.append(data_raw[index:index + seq_len])\n data = np.array(data)\n valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.\n shape[0]))\n test_set_size = int(np.round(test_set_size_percentage / 100 * data.\n shape[0]))\n train_set_size = data.shape[0] - (valid_set_size + test_set_size)\n x_train = data[:train_set_size, :-1, :]\n y_train = data[:train_set_size, -1, :]\n x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]\n y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]\n x_test = data[train_set_size + valid_set_size:, :-1, :]\n y_test = data[train_set_size + valid_set_size:, -1, :]\n return [x_train, y_train, x_valid, y_valid, x_test, y_test]\n\n\ndef show_predictions(ft, y_test_pred):\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 1, 1)\n plt.plot(np.arange(y_test.shape[0]), y_test[:, ft], color='black',\n label='test target')\n plt.plot(np.arange(y_test_pred.shape[0]), y_test_pred[:, ft], color=\n 'green', label='test prediction')\n plt.title('future stock prices')\n plt.xlabel('time [days]')\n plt.ylabel('normalized price')\n plt.legend(loc='best')\n x = 0\n error_percent = 5\n for index in range(0, len(y_test)):\n if abs(y_test_pred[:, ft][index] - y_test[:, ft][index]) / abs(y_test\n [:, ft][index]) * 100 < error_percent:\n x += 1\n print('Percent of predictions which error is less then {}% = {}%'.\n format(error_percent, x / len(y_test) * 100))\n z = 0\n distance = 10\n for index in range(distance, len(y_test)):\n if y_test[:, ft][index - distance] <= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] <= y_test_pred[:, ft][\n index] or y_test[:, ft][index - distance] >= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] >= y_test_pred[:, ft][\n index]:\n z += 1\n print('Percent of correct predicted direction = {}%'.format(z / len(\n y_test) * 100))\n plt.show()\n\n\n<mask token>\ndf_stock.drop(['Date'], 1, inplace=True)\n<mask token>\nnp.random.shuffle(perm_array)\n\n\ndef get_next_batch(batch_size):\n global index_in_epoch, x_train, perm_array\n start = index_in_epoch\n index_in_epoch += batch_size\n if index_in_epoch > x_train.shape[0]:\n np.random.shuffle(perm_array)\n start = 0\n index_in_epoch = batch_size\n end = index_in_epoch\n return x_train[perm_array[start:end]], y_train[perm_array[start:end]]\n\n\n<mask token>\ntf.reset_default_graph()\n<mask token>\nif CellType == Cell.BasicRNN:\n layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=\n tf.nn.elu) for layer in range(n_layers)]\nelif CellType == Cell.BasicLSTM:\n layers = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons, activation=\n tf.nn.elu) for layer in range(n_layers)]\nelif CellType == Cell.LSTMCellPeephole:\n layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons, activation=tf.nn\n .leaky_relu, use_peepholes=True) for layer in range(n_layers)]\nelif CellType == Cell.GRU:\n layers = [tf.contrib.rnn.GRUCell(num_units=n_neurons, activation=tf.nn.\n leaky_relu) for layer in range(n_layers)]\n<mask token>\n\n\ndef train_data(model_name):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for iteration in range(int(n_epochs * train_set_size / batch_size)):\n x_batch, y_batch = get_next_batch(batch_size)\n sess.run(training_op, feed_dict={X: x_batch, y: y_batch})\n if iteration % int(5 * train_set_size / batch_size) == 0:\n mse_train = loss.eval(feed_dict={X: x_train, y: y_train})\n mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})\n print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (\n iteration * batch_size / train_set_size, mse_train,\n mse_valid))\n saver.save(sess, 'train_models/' + model_name)\n\n\ndef test(model_name):\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, 'train_models/' + model_name)\n y_test_pred = sess.run(outputs, feed_dict={X: x_test})\n show_predictions(1, y_test_pred)\n\n\n<mask token>\nfor i in y_test:\n y_new.append(i[1] * 10000)\n<mask token>\nprint(macd.calculate())\nmacd.validate()\n<mask token>\nprint(tt.calculate())\nplt.figure(figsize=(15, 5))\nplt.subplot(1, 1, 1)\nplt.plot(np.arange(len(y_new)), y_new, color='black', label='test target')\nplt.plot(np.arange(len(macd.macd)), tt.calculate(), color='green', label=\n 'test prediction')\nplt.plot(np.arange(len(macd.macd)), macd.macd_signal_line, color='red',\n label='test prediction')\nplt.show()\n",
"step-4": "<mask token>\n\n\nclass Cell(Enum):\n BasicRNN = 1\n BasicLSTM = 2\n LSTMCellPeephole = 3\n GRU = 4\n\n\nvalid_set_size_percentage = 10\ntest_set_size_percentage = 10\ndf = pd.read_csv('data_2019-01-06.csv')\ndf.sort_values('Date')\n\n\ndef normalize_data(df):\n min_max_scaler = sklearn.preprocessing.MinMaxScaler()\n df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))\n df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))\n df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))\n df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-\n 1, 1))\n df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape\n (-1, 1))\n return df\n\n\ndef load_data(stock, seq_len):\n data_raw = stock.values\n data = []\n for index in range(len(data_raw) - seq_len):\n data.append(data_raw[index:index + seq_len])\n data = np.array(data)\n valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.\n shape[0]))\n test_set_size = int(np.round(test_set_size_percentage / 100 * data.\n shape[0]))\n train_set_size = data.shape[0] - (valid_set_size + test_set_size)\n x_train = data[:train_set_size, :-1, :]\n y_train = data[:train_set_size, -1, :]\n x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]\n y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]\n x_test = data[train_set_size + valid_set_size:, :-1, :]\n y_test = data[train_set_size + valid_set_size:, -1, :]\n return [x_train, y_train, x_valid, y_valid, x_test, y_test]\n\n\ndef show_predictions(ft, y_test_pred):\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 1, 1)\n plt.plot(np.arange(y_test.shape[0]), y_test[:, ft], color='black',\n label='test target')\n plt.plot(np.arange(y_test_pred.shape[0]), y_test_pred[:, ft], color=\n 'green', label='test prediction')\n plt.title('future stock prices')\n plt.xlabel('time [days]')\n plt.ylabel('normalized price')\n plt.legend(loc='best')\n x = 0\n error_percent = 5\n for index in range(0, len(y_test)):\n if abs(y_test_pred[:, ft][index] - y_test[:, ft][index]) / abs(y_test\n [:, ft][index]) * 100 < error_percent:\n x += 1\n print('Percent of predictions which error is less then {}% = {}%'.\n format(error_percent, x / len(y_test) * 100))\n z = 0\n distance = 10\n for index in range(distance, len(y_test)):\n if y_test[:, ft][index - distance] <= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] <= y_test_pred[:, ft][\n index] or y_test[:, ft][index - distance] >= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] >= y_test_pred[:, ft][\n index]:\n z += 1\n print('Percent of correct predicted direction = {}%'.format(z / len(\n y_test) * 100))\n plt.show()\n\n\ndf_stock = df.copy()\ndf_stock.drop(['Date'], 1, inplace=True)\ncols = list(df_stock.columns.values)\ndf_stock_norm = df_stock.copy()\ndf_stock_norm = normalize_data(df_stock_norm)\nseq_len = 50\nx_train, y_train, x_valid, y_valid, x_test, y_test = load_data(df_stock_norm,\n seq_len)\nindex_in_epoch = 0\nperm_array = np.arange(x_train.shape[0])\nnp.random.shuffle(perm_array)\n\n\ndef get_next_batch(batch_size):\n global index_in_epoch, x_train, perm_array\n start = index_in_epoch\n index_in_epoch += batch_size\n if index_in_epoch > x_train.shape[0]:\n np.random.shuffle(perm_array)\n start = 0\n index_in_epoch = batch_size\n end = index_in_epoch\n return x_train[perm_array[start:end]], y_train[perm_array[start:end]]\n\n\nCellType = Cell.BasicRNN\nn_steps = seq_len - 1\nn_inputs = 5\nn_neurons = 200\nn_outputs = 5\nn_layers = 2\nlearning_rate = 0.001\nbatch_size = 50\nn_epochs = 10\ntrain_set_size = x_train.shape[0]\ntest_set_size = x_test.shape[0]\ntf.reset_default_graph()\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_outputs])\nif CellType == Cell.BasicRNN:\n layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=\n tf.nn.elu) for layer in range(n_layers)]\nelif CellType == Cell.BasicLSTM:\n layers = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons, activation=\n tf.nn.elu) for layer in range(n_layers)]\nelif CellType == Cell.LSTMCellPeephole:\n layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons, activation=tf.nn\n .leaky_relu, use_peepholes=True) for layer in range(n_layers)]\nelif CellType == Cell.GRU:\n layers = [tf.contrib.rnn.GRUCell(num_units=n_neurons, activation=tf.nn.\n leaky_relu) for layer in range(n_layers)]\nmulti_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)\nrnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)\nstacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])\nstacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)\noutputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])\noutputs = outputs[:, n_steps - 1, :]\nloss = tf.reduce_mean(tf.square(outputs - y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(loss)\n\n\ndef train_data(model_name):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for iteration in range(int(n_epochs * train_set_size / batch_size)):\n x_batch, y_batch = get_next_batch(batch_size)\n sess.run(training_op, feed_dict={X: x_batch, y: y_batch})\n if iteration % int(5 * train_set_size / batch_size) == 0:\n mse_train = loss.eval(feed_dict={X: x_train, y: y_train})\n mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})\n print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (\n iteration * batch_size / train_set_size, mse_train,\n mse_valid))\n saver.save(sess, 'train_models/' + model_name)\n\n\ndef test(model_name):\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, 'train_models/' + model_name)\n y_test_pred = sess.run(outputs, feed_dict={X: x_test})\n show_predictions(1, y_test_pred)\n\n\nmodel = ['train_model', 'train_model_LSTM', 'train_model_with_batch_500',\n 'train_model_with_layers_4', 'train_model_with_volume',\n 'model_seq_len_100', 'model_GRU', 'model_LSTM_pipehole']\ny_new = []\nfor i in y_test:\n y_new.append(i[1] * 10000)\nmacd = trend.MovingAverageConvergenceDivergence(y_new)\nprint(macd.calculate())\nmacd.validate()\ntt = trend.ExponentialMovingAverage(y_new, 10)\nprint(tt.calculate())\nplt.figure(figsize=(15, 5))\nplt.subplot(1, 1, 1)\nplt.plot(np.arange(len(y_new)), y_new, color='black', label='test target')\nplt.plot(np.arange(len(macd.macd)), tt.calculate(), color='green', label=\n 'test prediction')\nplt.plot(np.arange(len(macd.macd)), macd.macd_signal_line, color='red',\n label='test prediction')\nplt.show()\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport sklearn\nimport sklearn.preprocessing\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom enum import Enum\nfrom pytalib.indicators import trend\nfrom pytalib.indicators import base\n\n\nclass Cell(Enum):\n BasicRNN = 1\n BasicLSTM = 2\n LSTMCellPeephole = 3\n GRU = 4\n\n\nvalid_set_size_percentage = 10\ntest_set_size_percentage = 10\n\ndf = pd.read_csv('data_2019-01-06.csv')\ndf.sort_values('Date')\n\n\n# function for min-max normalization of stock\ndef normalize_data(df):\n min_max_scaler = sklearn.preprocessing.MinMaxScaler()\n df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))\n df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))\n df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))\n df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-1, 1))\n df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape(-1, 1))\n return df\n\n\n# function to create train, validation, test data given stock data and sequence length\ndef load_data(stock, seq_len):\n data_raw = stock.values # convert to numpy array\n data = []\n\n # create all possible sequences of length seq_len\n for index in range(len(data_raw) - seq_len):\n data.append(data_raw[index: index + seq_len])\n\n data = np.array(data)\n valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.shape[0]))\n test_set_size = int(np.round(test_set_size_percentage / 100 * data.shape[0]))\n train_set_size = data.shape[0] - (valid_set_size + test_set_size)\n\n x_train = data[:train_set_size, :-1, :]\n y_train = data[:train_set_size, -1, :]\n\n x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]\n y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]\n\n x_test = data[train_set_size + valid_set_size:, :-1, :]\n y_test = data[train_set_size + valid_set_size:, -1, :]\n\n return [x_train, y_train, x_valid, y_valid, x_test, y_test]\n\n\n# show predictions: 0 = open, 1 = close, 2 = highest, 3 = lowest, 4 = volume\ndef show_predictions(ft, y_test_pred):\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 1, 1)\n plt.plot(np.arange(y_test.shape[0]),\n y_test[:, ft], color='black', label='test target')\n\n plt.plot(np.arange(y_test_pred.shape[0]),\n y_test_pred[:, ft], color='green', label='test prediction')\n\n plt.title('future stock prices')\n plt.xlabel('time [days]')\n plt.ylabel('normalized price')\n plt.legend(loc='best')\n\n x = 0\n error_percent = 5\n for index in range(0, len(y_test)):\n if (abs((y_test_pred[:, ft][index] - y_test[:, ft][index])) / abs(y_test[:, ft][index]) * 100) < error_percent:\n x += 1\n print(\"Percent of predictions which error is less then {}% = {}%\".format(error_percent, x / len(y_test) * 100))\n\n # Calculating the direction between 2 points using true values and predicted values\n z = 0\n distance = 10\n for index in range(distance, len(y_test)):\n if (y_test[:, ft][index - distance] <= y_test[:, ft][index] and y_test_pred[:, ft][index - distance] <=\n y_test_pred[:, ft]\n [index]) or (\n y_test[:, ft][index - distance] >= y_test[:, ft][index] and y_test_pred[:, ft][index - distance]\n >= y_test_pred[:, ft][index]):\n z += 1\n print(\"Percent of correct predicted direction = {}%\".format(z / len(y_test) * 100))\n\n plt.show()\n\n\n# choose one stock\ndf_stock = df.copy()\ndf_stock.drop(['Date'], 1, inplace=True)\ncols = list(df_stock.columns.values)\n\n# normalize stock\ndf_stock_norm = df_stock.copy()\ndf_stock_norm = normalize_data(df_stock_norm)\n\n# create train, test data\nseq_len = 50 # choose sequence length\nx_train, y_train, x_valid, y_valid, x_test, y_test = load_data(df_stock_norm, seq_len)\n\nindex_in_epoch = 0\nperm_array = np.arange(x_train.shape[0])\nnp.random.shuffle(perm_array)\n\n\n# function to get the next batch\ndef get_next_batch(batch_size):\n global index_in_epoch, x_train, perm_array\n start = index_in_epoch\n index_in_epoch += batch_size\n\n if index_in_epoch > x_train.shape[0]:\n np.random.shuffle(perm_array) # shuffle permutation array\n start = 0 # start next epoch\n index_in_epoch = batch_size\n\n end = index_in_epoch\n return x_train[perm_array[start:end]], y_train[perm_array[start:end]]\n\n\n# parameters\nCellType = Cell.BasicRNN\nn_steps = seq_len - 1\nn_inputs = 5\nn_neurons = 200\nn_outputs = 5\nn_layers = 2\nlearning_rate = 0.001\nbatch_size = 50\nn_epochs = 10\ntrain_set_size = x_train.shape[0]\ntest_set_size = x_test.shape[0]\n\ntf.reset_default_graph()\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_outputs])\n\nif CellType == Cell.BasicRNN:\n layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.elu)\n for layer in range(n_layers)]\nelif CellType == Cell.BasicLSTM:\n layers = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons, activation=tf.nn.elu)\n for layer in range(n_layers)]\nelif CellType == Cell.LSTMCellPeephole:\n layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons,\n activation=tf.nn.leaky_relu, use_peepholes=True)\n for layer in range(n_layers)]\nelif CellType == Cell.GRU:\n layers = [tf.contrib.rnn.GRUCell(num_units=n_neurons, activation=tf.nn.leaky_relu)\n for layer in range(n_layers)]\n\nmulti_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)\nrnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)\n\nstacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])\nstacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)\noutputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])\noutputs = outputs[:, n_steps - 1, :] # keep only last output of sequence\n\nloss = tf.reduce_mean(tf.square(outputs - y)) # loss function = mean squared error\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(loss)\n\n\n# run graph\ndef train_data(model_name):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for iteration in range(int(n_epochs * train_set_size / batch_size)):\n x_batch, y_batch = get_next_batch(batch_size) # fetch the next training batch\n sess.run(training_op, feed_dict={X: x_batch, y: y_batch})\n if iteration % int(5 * train_set_size / batch_size) == 0:\n mse_train = loss.eval(feed_dict={X: x_train, y: y_train})\n mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})\n print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (\n iteration * batch_size / train_set_size, mse_train, mse_valid))\n saver.save(sess, 'train_models/' + model_name)\n\n\ndef test(model_name):\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, 'train_models/' + model_name)\n y_test_pred = sess.run(outputs, feed_dict={X: x_test})\n show_predictions(1, y_test_pred)\n\n\nmodel = ['train_model', 'train_model_LSTM', 'train_model_with_batch_500', 'train_model_with_layers_4',\n 'train_model_with_volume', 'model_seq_len_100', \"model_GRU\", 'model_LSTM_pipehole']\n\n# train_data(model[0])\n# test(model[0])\ny_new = []\nfor i in y_test:\n y_new.append(i[1] * 10000)\nmacd = trend.MovingAverageConvergenceDivergence(y_new)\nprint(macd.calculate())\nmacd.validate()\n\ntt = trend.ExponentialMovingAverage(y_new, 10)\nprint(tt.calculate())\n\nplt.figure(figsize=(15, 5))\nplt.subplot(1, 1, 1)\nplt.plot(np.arange(len(y_new)), y_new, color='black', label='test target')\nplt.plot(np.arange(len(macd.macd)), tt.calculate(), color='green', label='test prediction')\nplt.plot(np.arange(len(macd.macd)), macd.macd_signal_line, color='red', label='test prediction')\nplt.show()\n",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
# coding=utf-8
import pyautogui
from xpinyin import Pinyin
rubbish_dic=1
if rubbish_dic==0:
chinese_rubbish=(
u"草泥马",
u"你妈死了",
u"你是不是",
u"低能",
u"人话都听不懂",
u"没家教的狗东西",
)
elif rubbish_dic==1:
rubbish_file=open("rubbish_dic.txt")
chinese_rubbish=rubbish_file.read().splitlines()
rubbish_set=[] #最终的拼音方式
p=Pinyin() #用于转换拼音
#通过点击的方式切屏
def trans_screen():
pyautogui.doubleClick(492,974)
pyautogui.typewrite(['enter'],0.01)
#将中文转化成拼音
def trans_chinese():
for c_rubbish in chinese_rubbish:
pin=p.get_pinyin(c_rubbish,'')
pin_list=list(pin)
pin_list.append("1")
rubbish_set.append(pin_list)
#发送text
def send_rubbish():
for p_rubbish in rubbish_set:
pyautogui.typewrite(p_rubbish,0.01)
pyautogui.typewrite(['enter'],0.01)
#查看当前的rubbish_set内容
def chk_rubbish():
for p_dirty in rubbish_set:
print(p_dirty)
if __name__ == "__main__":
trans_chinese()
#chk_rubbish()
trans_screen()
send_rubbish()
|
normal
|
{
"blob_id": "23e673909b2f1eb9a265ce84ad63464e20e99c6a",
"index": 3449,
"step-1": "<mask token>\n\n\ndef trans_screen():\n pyautogui.doubleClick(492, 974)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef trans_chinese():\n for c_rubbish in chinese_rubbish:\n pin = p.get_pinyin(c_rubbish, '')\n pin_list = list(pin)\n pin_list.append('1')\n rubbish_set.append(pin_list)\n\n\ndef send_rubbish():\n for p_rubbish in rubbish_set:\n pyautogui.typewrite(p_rubbish, 0.01)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef chk_rubbish():\n for p_dirty in rubbish_set:\n print(p_dirty)\n\n\n<mask token>\n",
"step-2": "<mask token>\nif rubbish_dic == 0:\n chinese_rubbish = u'草泥马', u'你妈死了', u'你是不是', u'低能', u'人话都听不懂', u'没家教的狗东西'\nelif rubbish_dic == 1:\n rubbish_file = open('rubbish_dic.txt')\n chinese_rubbish = rubbish_file.read().splitlines()\n<mask token>\n\n\ndef trans_screen():\n pyautogui.doubleClick(492, 974)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef trans_chinese():\n for c_rubbish in chinese_rubbish:\n pin = p.get_pinyin(c_rubbish, '')\n pin_list = list(pin)\n pin_list.append('1')\n rubbish_set.append(pin_list)\n\n\ndef send_rubbish():\n for p_rubbish in rubbish_set:\n pyautogui.typewrite(p_rubbish, 0.01)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef chk_rubbish():\n for p_dirty in rubbish_set:\n print(p_dirty)\n\n\nif __name__ == '__main__':\n trans_chinese()\n trans_screen()\n send_rubbish()\n",
"step-3": "<mask token>\nrubbish_dic = 1\nif rubbish_dic == 0:\n chinese_rubbish = u'草泥马', u'你妈死了', u'你是不是', u'低能', u'人话都听不懂', u'没家教的狗东西'\nelif rubbish_dic == 1:\n rubbish_file = open('rubbish_dic.txt')\n chinese_rubbish = rubbish_file.read().splitlines()\nrubbish_set = []\np = Pinyin()\n\n\ndef trans_screen():\n pyautogui.doubleClick(492, 974)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef trans_chinese():\n for c_rubbish in chinese_rubbish:\n pin = p.get_pinyin(c_rubbish, '')\n pin_list = list(pin)\n pin_list.append('1')\n rubbish_set.append(pin_list)\n\n\ndef send_rubbish():\n for p_rubbish in rubbish_set:\n pyautogui.typewrite(p_rubbish, 0.01)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef chk_rubbish():\n for p_dirty in rubbish_set:\n print(p_dirty)\n\n\nif __name__ == '__main__':\n trans_chinese()\n trans_screen()\n send_rubbish()\n",
"step-4": "import pyautogui\nfrom xpinyin import Pinyin\nrubbish_dic = 1\nif rubbish_dic == 0:\n chinese_rubbish = u'草泥马', u'你妈死了', u'你是不是', u'低能', u'人话都听不懂', u'没家教的狗东西'\nelif rubbish_dic == 1:\n rubbish_file = open('rubbish_dic.txt')\n chinese_rubbish = rubbish_file.read().splitlines()\nrubbish_set = []\np = Pinyin()\n\n\ndef trans_screen():\n pyautogui.doubleClick(492, 974)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef trans_chinese():\n for c_rubbish in chinese_rubbish:\n pin = p.get_pinyin(c_rubbish, '')\n pin_list = list(pin)\n pin_list.append('1')\n rubbish_set.append(pin_list)\n\n\ndef send_rubbish():\n for p_rubbish in rubbish_set:\n pyautogui.typewrite(p_rubbish, 0.01)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef chk_rubbish():\n for p_dirty in rubbish_set:\n print(p_dirty)\n\n\nif __name__ == '__main__':\n trans_chinese()\n trans_screen()\n send_rubbish()\n",
"step-5": "# coding=utf-8\nimport pyautogui\nfrom xpinyin import Pinyin\n\nrubbish_dic=1\n\nif rubbish_dic==0:\n chinese_rubbish=(\n u\"草泥马\",\n u\"你妈死了\",\n u\"你是不是\",\n u\"低能\",\n u\"人话都听不懂\",\n u\"没家教的狗东西\", \n )\nelif rubbish_dic==1:\n rubbish_file=open(\"rubbish_dic.txt\")\n chinese_rubbish=rubbish_file.read().splitlines()\n\n\nrubbish_set=[] #最终的拼音方式\np=Pinyin() #用于转换拼音\n\n#通过点击的方式切屏 \ndef trans_screen():\n pyautogui.doubleClick(492,974)\n pyautogui.typewrite(['enter'],0.01)\n\n#将中文转化成拼音\ndef trans_chinese():\n for c_rubbish in chinese_rubbish:\n pin=p.get_pinyin(c_rubbish,'')\n pin_list=list(pin)\n pin_list.append(\"1\")\n rubbish_set.append(pin_list)\n\n#发送text\ndef send_rubbish(): \n for p_rubbish in rubbish_set:\n pyautogui.typewrite(p_rubbish,0.01)\n pyautogui.typewrite(['enter'],0.01)\n\n#查看当前的rubbish_set内容\ndef chk_rubbish():\n for p_dirty in rubbish_set:\n print(p_dirty)\n\nif __name__ == \"__main__\":\n trans_chinese()\n #chk_rubbish()\n trans_screen()\n send_rubbish()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from django import forms
from . import models
class PhotoForm(forms.Form):
image = forms.ImageField()
|
normal
|
{
"blob_id": "3983f8dfb9c7b7e664af05857a0f6fe380154424",
"index": 3684,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass PhotoForm(forms.Form):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass PhotoForm(forms.Form):\n image = forms.ImageField()\n",
"step-4": "from django import forms\nfrom . import models\n\n\nclass PhotoForm(forms.Form):\n image = forms.ImageField()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
IS_ZERO = lambda x: x == 0
ONE = 1
SUB1 = lambda x: x - 1
MULT = lambda x: lambda y: x * y
IF = lambda cond: lambda t_func: lambda f_func: t_func(None) if cond else f_func(None)
print(
(
lambda myself: (
lambda n: (
IF(
IS_ZERO(n)
)(
lambda _: ONE
)(
lambda _: MULT(n)( myself(myself)(SUB1(n)) )
)
)
)
)(
lambda myself: (
lambda n: (
IF(
IS_ZERO(n)
)(
lambda _: ONE
)(
lambda _: MULT(n)( myself(myself)(SUB1(n)) )
)
)
)
)
(6)
)
|
normal
|
{
"blob_id": "f8601ed7ba7c2b8d2dd8d5f74f7b5ae8e99dad78",
"index": 186,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint((lambda myself: lambda n: IF(IS_ZERO(n))(lambda _: ONE)(lambda _:\n MULT(n)(myself(myself)(SUB1(n)))))(lambda myself: lambda n: IF(IS_ZERO(\n n))(lambda _: ONE)(lambda _: MULT(n)(myself(myself)(SUB1(n)))))(6))\n",
"step-3": "IS_ZERO = lambda x: x == 0\nONE = 1\nSUB1 = lambda x: x - 1\nMULT = lambda x: lambda y: x * y\nIF = lambda cond: lambda t_func: lambda f_func: t_func(None\n ) if cond else f_func(None)\nprint((lambda myself: lambda n: IF(IS_ZERO(n))(lambda _: ONE)(lambda _:\n MULT(n)(myself(myself)(SUB1(n)))))(lambda myself: lambda n: IF(IS_ZERO(\n n))(lambda _: ONE)(lambda _: MULT(n)(myself(myself)(SUB1(n)))))(6))\n",
"step-4": "IS_ZERO = lambda x: x == 0\nONE = 1\nSUB1 = lambda x: x - 1\nMULT = lambda x: lambda y: x * y\nIF = lambda cond: lambda t_func: lambda f_func: t_func(None) if cond else f_func(None)\n\nprint(\n (\n lambda myself: (\n lambda n: (\n IF(\n IS_ZERO(n)\n )(\n lambda _: ONE\n )(\n lambda _: MULT(n)( myself(myself)(SUB1(n)) )\n )\n )\n )\n )(\n lambda myself: (\n lambda n: (\n IF(\n IS_ZERO(n)\n )(\n lambda _: ONE\n )(\n lambda _: MULT(n)( myself(myself)(SUB1(n)) )\n )\n )\n )\n )\n (6)\n)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
n= int(sys.stdin.readline())
dp = {1:'SK', 2: 'CY', 3:'SK', 4:'SK', 5:'SK',6:'SK'}
def sol(k):
if k in dp:
return dp[k]
else:
for i in range(7, k+1):
if dp[i-3]=='SK' and dp[i-1]=='SK' and dp[i-4]=='SK':
dp[i] = 'CY'
else:
dp[i] = 'SK'
return dp[k]
print(sol(n))
|
normal
|
{
"blob_id": "4b85479af7d65d208fab08c10afbf66086877329",
"index": 8981,
"step-1": "<mask token>\n\n\ndef sol(k):\n if k in dp:\n return dp[k]\n else:\n for i in range(7, k + 1):\n if dp[i - 3] == 'SK' and dp[i - 1] == 'SK' and dp[i - 4] == 'SK':\n dp[i] = 'CY'\n else:\n dp[i] = 'SK'\n return dp[k]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sol(k):\n if k in dp:\n return dp[k]\n else:\n for i in range(7, k + 1):\n if dp[i - 3] == 'SK' and dp[i - 1] == 'SK' and dp[i - 4] == 'SK':\n dp[i] = 'CY'\n else:\n dp[i] = 'SK'\n return dp[k]\n\n\nprint(sol(n))\n",
"step-3": "<mask token>\nn = int(sys.stdin.readline())\ndp = {(1): 'SK', (2): 'CY', (3): 'SK', (4): 'SK', (5): 'SK', (6): 'SK'}\n\n\ndef sol(k):\n if k in dp:\n return dp[k]\n else:\n for i in range(7, k + 1):\n if dp[i - 3] == 'SK' and dp[i - 1] == 'SK' and dp[i - 4] == 'SK':\n dp[i] = 'CY'\n else:\n dp[i] = 'SK'\n return dp[k]\n\n\nprint(sol(n))\n",
"step-4": "import sys\nn = int(sys.stdin.readline())\ndp = {(1): 'SK', (2): 'CY', (3): 'SK', (4): 'SK', (5): 'SK', (6): 'SK'}\n\n\ndef sol(k):\n if k in dp:\n return dp[k]\n else:\n for i in range(7, k + 1):\n if dp[i - 3] == 'SK' and dp[i - 1] == 'SK' and dp[i - 4] == 'SK':\n dp[i] = 'CY'\n else:\n dp[i] = 'SK'\n return dp[k]\n\n\nprint(sol(n))\n",
"step-5": "import sys\n\nn= int(sys.stdin.readline())\n\ndp = {1:'SK', 2: 'CY', 3:'SK', 4:'SK', 5:'SK',6:'SK'}\n\ndef sol(k):\n if k in dp:\n return dp[k]\n else:\n for i in range(7, k+1):\n if dp[i-3]=='SK' and dp[i-1]=='SK' and dp[i-4]=='SK':\n dp[i] = 'CY'\n else:\n dp[i] = 'SK'\n return dp[k]\n\nprint(sol(n))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class FinalLevel(BaseLevel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FinalLevel(BaseLevel):
def __init__(self):
lvl_map = DefinedMap('levels/demon_lair.xp')
super().__init__(lvl_map.width, lvl_map.height)
self.map = lvl_map
self.set_entrance(50, 29)
boss = Daemon(8, 27, 10)
self.add_entity(boss)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FinalLevel(BaseLevel):
def __init__(self):
lvl_map = DefinedMap('levels/demon_lair.xp')
super().__init__(lvl_map.width, lvl_map.height)
self.map = lvl_map
self.set_entrance(50, 29)
boss = Daemon(8, 27, 10)
self.add_entity(boss)
def add_player(self, player):
super().add_player(player)
self.player.fov = 100
self.player.weapon = Axe()
<|reserved_special_token_1|>
from .base import BaseLevel
from map_objects import DefinedMap
from entity.monster import Daemon
from entity.weapons import Axe
class FinalLevel(BaseLevel):
def __init__(self):
lvl_map = DefinedMap('levels/demon_lair.xp')
super().__init__(lvl_map.width, lvl_map.height)
self.map = lvl_map
self.set_entrance(50, 29)
boss = Daemon(8, 27, 10)
self.add_entity(boss)
def add_player(self, player):
super().add_player(player)
self.player.fov = 100
self.player.weapon = Axe()
|
flexible
|
{
"blob_id": "7ba8f0bd962413f6ff825df27330447b11360f10",
"index": 6089,
"step-1": "<mask token>\n\n\nclass FinalLevel(BaseLevel):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FinalLevel(BaseLevel):\n\n def __init__(self):\n lvl_map = DefinedMap('levels/demon_lair.xp')\n super().__init__(lvl_map.width, lvl_map.height)\n self.map = lvl_map\n self.set_entrance(50, 29)\n boss = Daemon(8, 27, 10)\n self.add_entity(boss)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FinalLevel(BaseLevel):\n\n def __init__(self):\n lvl_map = DefinedMap('levels/demon_lair.xp')\n super().__init__(lvl_map.width, lvl_map.height)\n self.map = lvl_map\n self.set_entrance(50, 29)\n boss = Daemon(8, 27, 10)\n self.add_entity(boss)\n\n def add_player(self, player):\n super().add_player(player)\n self.player.fov = 100\n self.player.weapon = Axe()\n",
"step-4": "from .base import BaseLevel\nfrom map_objects import DefinedMap\nfrom entity.monster import Daemon\nfrom entity.weapons import Axe\n\n\nclass FinalLevel(BaseLevel):\n\n def __init__(self):\n lvl_map = DefinedMap('levels/demon_lair.xp')\n super().__init__(lvl_map.width, lvl_map.height)\n self.map = lvl_map\n self.set_entrance(50, 29)\n boss = Daemon(8, 27, 10)\n self.add_entity(boss)\n\n def add_player(self, player):\n super().add_player(player)\n self.player.fov = 100\n self.player.weapon = Axe()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
# My Godzilla Hat Code - @alt_bier
from adafruit_circuitplayground.express import cpx
import random
#cpx.pixels.brightness = 0.5 # 50 pct
cpx.pixels.fill((0, 0, 0)) # Turn off the NeoPixels if they're on!
# Function to give us a nice color swirl on the built in NeoPixel (R,G,B)
def wheeln(pos, sft):
if (pos + sft) > 255:
pos = (pos + sft) - 256
else:
pos = (pos + sft)
if (pos < 0) or (pos > 255):
return (0, 0, 0)
if pos < 85:
return (int(255 - pos*3), int(pos*3), 0)
elif pos < 170:
pos -= 85
return (0, int(255 - (pos*3)), int(pos*3))
else:
pos -= 170
return (int(pos*3), 0, int(255 - pos*3))
# Function to flash random colors
def randcolor():
randgr = randrd = randbl = 0
# determine if all colors off
if (random.randint(0,14) == 1):
# if on then determine if each color is off and return an intensity value if on
if (random.randint(0,1) == 1):
randgr = random.randint(1,255)
if (random.randint(0,1) == 1):
randrd = random.randint(1,255)
if (random.randint(0,1) == 1):
randbl = random.randint(1,255)
return (randgr, randrd, randbl)
# Function to simulate a flame effect on built in NeoPixel (R,G,B)
def flame(pos, clr, sft):
# pos = position, sft = shift
if (pos + sft) > 255:
pos = (pos + sft) - 256
else:
pos = (pos + sft)
#
# RETURN VALUES
if pos < 32:
# OFF
rval = 0
elif (pos > 31) and (pos < 64):
# Low-High
rval = int((pos*8) - 249)
elif (pos > 63) and (pos < 96):
# High-Low
rval = int(767 - (pos*8))
elif (pos > 95) and (pos < 128):
# OFF
rval = 0
elif (pos > 127) and (pos < 160):
# Low-High
rval = int((pos*8) - 1017)
elif (pos > 159) and (pos < 192):
# High-Low
rval = int(1535 - (pos*8))
elif (pos > 191) and (pos < 224):
# OFF
rval = 0
elif (pos > 223):
# OFF
rval = 0
#
# RETURN COLOR
if (clr == 0):
# Red
return (rval, 0, 0)
elif (clr == 1):
# Red & Green
return (rval, rval, 0)
elif (clr == 2):
# Green
return (0, rval, 0)
elif (clr == 3):
# Green & Blue
return (0, rval, rval)
elif (clr == 4):
# Blue
return (0, rval, rval)
elif (clr == 5):
# Blue & Red
return (rval, 0, rval)
else:
return (0, 0, 0)
# Function to turn off all the built in NeoPixels
def alloff():
cpx.pixels.fill((0, 0, 0))
mode = 1
pusha = 0
pushb = 0
clr = 0
i = 0
while True:
# NeoPixels are cpx.pixels[0-9]
if (mode == 1):
cpx.pixels[0] = flame(i, clr, 32)
cpx.pixels[1] = flame(i, clr, 24)
cpx.pixels[2] = flame(i, clr, 16)
cpx.pixels[3] = flame(i, clr, 8)
cpx.pixels[4] = flame(i, clr, 0)
cpx.pixels[5] = flame(i, clr, 0)
cpx.pixels[6] = flame(i, clr, 8)
cpx.pixels[7] = flame(i, clr, 16)
cpx.pixels[8] = flame(i, clr, 24)
cpx.pixels[9] = flame(i, clr, 32)
elif (mode == 2):
cpx.pixels[0] = wheeln(i, 0)
cpx.pixels[1] = wheeln(i, 24)
cpx.pixels[2] = wheeln(i, 48)
cpx.pixels[3] = wheeln(i, 72)
cpx.pixels[4] = wheeln(i, 96)
cpx.pixels[5] = wheeln(i, 120)
cpx.pixels[6] = wheeln(i, 144)
cpx.pixels[7] = wheeln(i, 168)
cpx.pixels[8] = wheeln(i, 192)
cpx.pixels[9] = wheeln(i, 216)
elif (mode == 3):
cpx.pixels[0] = randcolor()
cpx.pixels[1] = randcolor()
cpx.pixels[2] = randcolor()
cpx.pixels[3] = randcolor()
cpx.pixels[4] = randcolor()
cpx.pixels[5] = randcolor()
cpx.pixels[6] = randcolor()
cpx.pixels[7] = randcolor()
cpx.pixels[8] = randcolor()
cpx.pixels[9] = randcolor()
else:
# Mode = 0 so turn All Off
alloff()
# Button A is bottom button on hat
if cpx.button_a:
print("Button A on Bottom Pressed! Changing mode to ALL OFF.")
pusha = 1
# Button B is top button on hat
if cpx.button_b:
print("Button B on Top Pressed! Changing mode.")
pushb = 1
i = (i+1) % 256
#print (i)
if (i == 255):
clr = (clr+1) % 6
if ((i == 63) | (i == 127) | (i == 191) | (i >= 255)) and (pusha == 1):
mode = 0
pusha = 0
i = 0
if ((i == 63) | (i == 127) | (i == 191) | (i >= 255)) and (pushb == 1):
mode = (mode+1)
pushb = 0
i = 0
if (mode > 3):
mode = 1
|
normal
|
{
"blob_id": "1dd223854c10e69a397098511eab50b9ebd347c8",
"index": 6027,
"step-1": "<mask token>\n\n\ndef wheeln(pos, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 0 or pos > 255:\n return 0, 0, 0\n if pos < 85:\n return int(255 - pos * 3), int(pos * 3), 0\n elif pos < 170:\n pos -= 85\n return 0, int(255 - pos * 3), int(pos * 3)\n else:\n pos -= 170\n return int(pos * 3), 0, int(255 - pos * 3)\n\n\ndef randcolor():\n randgr = randrd = randbl = 0\n if random.randint(0, 14) == 1:\n if random.randint(0, 1) == 1:\n randgr = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randrd = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randbl = random.randint(1, 255)\n return randgr, randrd, randbl\n\n\ndef flame(pos, clr, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 32:\n rval = 0\n elif pos > 31 and pos < 64:\n rval = int(pos * 8 - 249)\n elif pos > 63 and pos < 96:\n rval = int(767 - pos * 8)\n elif pos > 95 and pos < 128:\n rval = 0\n elif pos > 127 and pos < 160:\n rval = int(pos * 8 - 1017)\n elif pos > 159 and pos < 192:\n rval = int(1535 - pos * 8)\n elif pos > 191 and pos < 224:\n rval = 0\n elif pos > 223:\n rval = 0\n if clr == 0:\n return rval, 0, 0\n elif clr == 1:\n return rval, rval, 0\n elif clr == 2:\n return 0, rval, 0\n elif clr == 3:\n return 0, rval, rval\n elif clr == 4:\n return 0, rval, rval\n elif clr == 5:\n return rval, 0, rval\n else:\n return 0, 0, 0\n\n\ndef alloff():\n cpx.pixels.fill((0, 0, 0))\n\n\n<mask token>\n",
"step-2": "<mask token>\ncpx.pixels.fill((0, 0, 0))\n\n\ndef wheeln(pos, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 0 or pos > 255:\n return 0, 0, 0\n if pos < 85:\n return int(255 - pos * 3), int(pos * 3), 0\n elif pos < 170:\n pos -= 85\n return 0, int(255 - pos * 3), int(pos * 3)\n else:\n pos -= 170\n return int(pos * 3), 0, int(255 - pos * 3)\n\n\ndef randcolor():\n randgr = randrd = randbl = 0\n if random.randint(0, 14) == 1:\n if random.randint(0, 1) == 1:\n randgr = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randrd = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randbl = random.randint(1, 255)\n return randgr, randrd, randbl\n\n\ndef flame(pos, clr, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 32:\n rval = 0\n elif pos > 31 and pos < 64:\n rval = int(pos * 8 - 249)\n elif pos > 63 and pos < 96:\n rval = int(767 - pos * 8)\n elif pos > 95 and pos < 128:\n rval = 0\n elif pos > 127 and pos < 160:\n rval = int(pos * 8 - 1017)\n elif pos > 159 and pos < 192:\n rval = int(1535 - pos * 8)\n elif pos > 191 and pos < 224:\n rval = 0\n elif pos > 223:\n rval = 0\n if clr == 0:\n return rval, 0, 0\n elif clr == 1:\n return rval, rval, 0\n elif clr == 2:\n return 0, rval, 0\n elif clr == 3:\n return 0, rval, rval\n elif clr == 4:\n return 0, rval, rval\n elif clr == 5:\n return rval, 0, rval\n else:\n return 0, 0, 0\n\n\ndef alloff():\n cpx.pixels.fill((0, 0, 0))\n\n\n<mask token>\nwhile True:\n if mode == 1:\n cpx.pixels[0] = flame(i, clr, 32)\n cpx.pixels[1] = flame(i, clr, 24)\n cpx.pixels[2] = flame(i, clr, 16)\n cpx.pixels[3] = flame(i, clr, 8)\n cpx.pixels[4] = flame(i, clr, 0)\n cpx.pixels[5] = flame(i, clr, 0)\n cpx.pixels[6] = flame(i, clr, 8)\n cpx.pixels[7] = flame(i, clr, 16)\n cpx.pixels[8] = flame(i, clr, 24)\n cpx.pixels[9] = flame(i, clr, 32)\n elif mode == 2:\n cpx.pixels[0] = wheeln(i, 0)\n cpx.pixels[1] = wheeln(i, 24)\n cpx.pixels[2] = wheeln(i, 48)\n cpx.pixels[3] = wheeln(i, 72)\n cpx.pixels[4] = wheeln(i, 96)\n cpx.pixels[5] = wheeln(i, 120)\n cpx.pixels[6] = wheeln(i, 144)\n cpx.pixels[7] = wheeln(i, 168)\n cpx.pixels[8] = wheeln(i, 192)\n cpx.pixels[9] = wheeln(i, 216)\n elif mode == 3:\n cpx.pixels[0] = randcolor()\n cpx.pixels[1] = randcolor()\n cpx.pixels[2] = randcolor()\n cpx.pixels[3] = randcolor()\n cpx.pixels[4] = randcolor()\n cpx.pixels[5] = randcolor()\n cpx.pixels[6] = randcolor()\n cpx.pixels[7] = randcolor()\n cpx.pixels[8] = randcolor()\n cpx.pixels[9] = randcolor()\n else:\n alloff()\n if cpx.button_a:\n print('Button A on Bottom Pressed! Changing mode to ALL OFF.')\n pusha = 1\n if cpx.button_b:\n print('Button B on Top Pressed! Changing mode.')\n pushb = 1\n i = (i + 1) % 256\n if i == 255:\n clr = (clr + 1) % 6\n if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pusha == 1:\n mode = 0\n pusha = 0\n i = 0\n if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pushb == 1:\n mode = mode + 1\n pushb = 0\n i = 0\n if mode > 3:\n mode = 1\n",
"step-3": "<mask token>\ncpx.pixels.fill((0, 0, 0))\n\n\ndef wheeln(pos, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 0 or pos > 255:\n return 0, 0, 0\n if pos < 85:\n return int(255 - pos * 3), int(pos * 3), 0\n elif pos < 170:\n pos -= 85\n return 0, int(255 - pos * 3), int(pos * 3)\n else:\n pos -= 170\n return int(pos * 3), 0, int(255 - pos * 3)\n\n\ndef randcolor():\n randgr = randrd = randbl = 0\n if random.randint(0, 14) == 1:\n if random.randint(0, 1) == 1:\n randgr = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randrd = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randbl = random.randint(1, 255)\n return randgr, randrd, randbl\n\n\ndef flame(pos, clr, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 32:\n rval = 0\n elif pos > 31 and pos < 64:\n rval = int(pos * 8 - 249)\n elif pos > 63 and pos < 96:\n rval = int(767 - pos * 8)\n elif pos > 95 and pos < 128:\n rval = 0\n elif pos > 127 and pos < 160:\n rval = int(pos * 8 - 1017)\n elif pos > 159 and pos < 192:\n rval = int(1535 - pos * 8)\n elif pos > 191 and pos < 224:\n rval = 0\n elif pos > 223:\n rval = 0\n if clr == 0:\n return rval, 0, 0\n elif clr == 1:\n return rval, rval, 0\n elif clr == 2:\n return 0, rval, 0\n elif clr == 3:\n return 0, rval, rval\n elif clr == 4:\n return 0, rval, rval\n elif clr == 5:\n return rval, 0, rval\n else:\n return 0, 0, 0\n\n\ndef alloff():\n cpx.pixels.fill((0, 0, 0))\n\n\nmode = 1\npusha = 0\npushb = 0\nclr = 0\ni = 0\nwhile True:\n if mode == 1:\n cpx.pixels[0] = flame(i, clr, 32)\n cpx.pixels[1] = flame(i, clr, 24)\n cpx.pixels[2] = flame(i, clr, 16)\n cpx.pixels[3] = flame(i, clr, 8)\n cpx.pixels[4] = flame(i, clr, 0)\n cpx.pixels[5] = flame(i, clr, 0)\n cpx.pixels[6] = flame(i, clr, 8)\n cpx.pixels[7] = flame(i, clr, 16)\n cpx.pixels[8] = flame(i, clr, 24)\n cpx.pixels[9] = flame(i, clr, 32)\n elif mode == 2:\n cpx.pixels[0] = wheeln(i, 0)\n cpx.pixels[1] = wheeln(i, 24)\n cpx.pixels[2] = wheeln(i, 48)\n cpx.pixels[3] = wheeln(i, 72)\n cpx.pixels[4] = wheeln(i, 96)\n cpx.pixels[5] = wheeln(i, 120)\n cpx.pixels[6] = wheeln(i, 144)\n cpx.pixels[7] = wheeln(i, 168)\n cpx.pixels[8] = wheeln(i, 192)\n cpx.pixels[9] = wheeln(i, 216)\n elif mode == 3:\n cpx.pixels[0] = randcolor()\n cpx.pixels[1] = randcolor()\n cpx.pixels[2] = randcolor()\n cpx.pixels[3] = randcolor()\n cpx.pixels[4] = randcolor()\n cpx.pixels[5] = randcolor()\n cpx.pixels[6] = randcolor()\n cpx.pixels[7] = randcolor()\n cpx.pixels[8] = randcolor()\n cpx.pixels[9] = randcolor()\n else:\n alloff()\n if cpx.button_a:\n print('Button A on Bottom Pressed! Changing mode to ALL OFF.')\n pusha = 1\n if cpx.button_b:\n print('Button B on Top Pressed! Changing mode.')\n pushb = 1\n i = (i + 1) % 256\n if i == 255:\n clr = (clr + 1) % 6\n if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pusha == 1:\n mode = 0\n pusha = 0\n i = 0\n if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pushb == 1:\n mode = mode + 1\n pushb = 0\n i = 0\n if mode > 3:\n mode = 1\n",
"step-4": "from adafruit_circuitplayground.express import cpx\nimport random\ncpx.pixels.fill((0, 0, 0))\n\n\ndef wheeln(pos, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 0 or pos > 255:\n return 0, 0, 0\n if pos < 85:\n return int(255 - pos * 3), int(pos * 3), 0\n elif pos < 170:\n pos -= 85\n return 0, int(255 - pos * 3), int(pos * 3)\n else:\n pos -= 170\n return int(pos * 3), 0, int(255 - pos * 3)\n\n\ndef randcolor():\n randgr = randrd = randbl = 0\n if random.randint(0, 14) == 1:\n if random.randint(0, 1) == 1:\n randgr = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randrd = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randbl = random.randint(1, 255)\n return randgr, randrd, randbl\n\n\ndef flame(pos, clr, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 32:\n rval = 0\n elif pos > 31 and pos < 64:\n rval = int(pos * 8 - 249)\n elif pos > 63 and pos < 96:\n rval = int(767 - pos * 8)\n elif pos > 95 and pos < 128:\n rval = 0\n elif pos > 127 and pos < 160:\n rval = int(pos * 8 - 1017)\n elif pos > 159 and pos < 192:\n rval = int(1535 - pos * 8)\n elif pos > 191 and pos < 224:\n rval = 0\n elif pos > 223:\n rval = 0\n if clr == 0:\n return rval, 0, 0\n elif clr == 1:\n return rval, rval, 0\n elif clr == 2:\n return 0, rval, 0\n elif clr == 3:\n return 0, rval, rval\n elif clr == 4:\n return 0, rval, rval\n elif clr == 5:\n return rval, 0, rval\n else:\n return 0, 0, 0\n\n\ndef alloff():\n cpx.pixels.fill((0, 0, 0))\n\n\nmode = 1\npusha = 0\npushb = 0\nclr = 0\ni = 0\nwhile True:\n if mode == 1:\n cpx.pixels[0] = flame(i, clr, 32)\n cpx.pixels[1] = flame(i, clr, 24)\n cpx.pixels[2] = flame(i, clr, 16)\n cpx.pixels[3] = flame(i, clr, 8)\n cpx.pixels[4] = flame(i, clr, 0)\n cpx.pixels[5] = flame(i, clr, 0)\n cpx.pixels[6] = flame(i, clr, 8)\n cpx.pixels[7] = flame(i, clr, 16)\n cpx.pixels[8] = flame(i, clr, 24)\n cpx.pixels[9] = flame(i, clr, 32)\n elif mode == 2:\n cpx.pixels[0] = wheeln(i, 0)\n cpx.pixels[1] = wheeln(i, 24)\n cpx.pixels[2] = wheeln(i, 48)\n cpx.pixels[3] = wheeln(i, 72)\n cpx.pixels[4] = wheeln(i, 96)\n cpx.pixels[5] = wheeln(i, 120)\n cpx.pixels[6] = wheeln(i, 144)\n cpx.pixels[7] = wheeln(i, 168)\n cpx.pixels[8] = wheeln(i, 192)\n cpx.pixels[9] = wheeln(i, 216)\n elif mode == 3:\n cpx.pixels[0] = randcolor()\n cpx.pixels[1] = randcolor()\n cpx.pixels[2] = randcolor()\n cpx.pixels[3] = randcolor()\n cpx.pixels[4] = randcolor()\n cpx.pixels[5] = randcolor()\n cpx.pixels[6] = randcolor()\n cpx.pixels[7] = randcolor()\n cpx.pixels[8] = randcolor()\n cpx.pixels[9] = randcolor()\n else:\n alloff()\n if cpx.button_a:\n print('Button A on Bottom Pressed! Changing mode to ALL OFF.')\n pusha = 1\n if cpx.button_b:\n print('Button B on Top Pressed! Changing mode.')\n pushb = 1\n i = (i + 1) % 256\n if i == 255:\n clr = (clr + 1) % 6\n if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pusha == 1:\n mode = 0\n pusha = 0\n i = 0\n if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pushb == 1:\n mode = mode + 1\n pushb = 0\n i = 0\n if mode > 3:\n mode = 1\n",
"step-5": "# My Godzilla Hat Code - @alt_bier\nfrom adafruit_circuitplayground.express import cpx\nimport random\n\n#cpx.pixels.brightness = 0.5 # 50 pct\ncpx.pixels.fill((0, 0, 0)) # Turn off the NeoPixels if they're on!\n\n# Function to give us a nice color swirl on the built in NeoPixel (R,G,B)\ndef wheeln(pos, sft):\n if (pos + sft) > 255:\n pos = (pos + sft) - 256\n else:\n pos = (pos + sft)\n if (pos < 0) or (pos > 255):\n return (0, 0, 0)\n if pos < 85:\n return (int(255 - pos*3), int(pos*3), 0)\n elif pos < 170:\n pos -= 85\n return (0, int(255 - (pos*3)), int(pos*3))\n else:\n pos -= 170\n return (int(pos*3), 0, int(255 - pos*3))\n\n# Function to flash random colors\ndef randcolor():\n randgr = randrd = randbl = 0\n # determine if all colors off\n if (random.randint(0,14) == 1):\n # if on then determine if each color is off and return an intensity value if on\n if (random.randint(0,1) == 1):\n randgr = random.randint(1,255)\n if (random.randint(0,1) == 1):\n randrd = random.randint(1,255)\n if (random.randint(0,1) == 1):\n randbl = random.randint(1,255)\n return (randgr, randrd, randbl)\n\n# Function to simulate a flame effect on built in NeoPixel (R,G,B)\ndef flame(pos, clr, sft):\n # pos = position, sft = shift\n if (pos + sft) > 255:\n pos = (pos + sft) - 256\n else:\n pos = (pos + sft)\n #\n # RETURN VALUES\n if pos < 32:\n # OFF\n rval = 0\n elif (pos > 31) and (pos < 64):\n # Low-High\n rval = int((pos*8) - 249)\n elif (pos > 63) and (pos < 96):\n # High-Low\n rval = int(767 - (pos*8))\n elif (pos > 95) and (pos < 128):\n # OFF\n rval = 0\n elif (pos > 127) and (pos < 160):\n # Low-High\n rval = int((pos*8) - 1017)\n elif (pos > 159) and (pos < 192):\n # High-Low\n rval = int(1535 - (pos*8))\n elif (pos > 191) and (pos < 224):\n # OFF\n rval = 0\n elif (pos > 223):\n # OFF\n rval = 0\n #\n # RETURN COLOR\n if (clr == 0):\n # Red\n return (rval, 0, 0)\n elif (clr == 1):\n # Red & Green\n return (rval, rval, 0)\n elif (clr == 2):\n # Green\n return (0, rval, 0)\n elif (clr == 3):\n # Green & Blue\n return (0, rval, rval)\n elif (clr == 4):\n # Blue\n return (0, rval, rval)\n elif (clr == 5):\n # Blue & Red\n return (rval, 0, rval)\n else:\n return (0, 0, 0)\n\n# Function to turn off all the built in NeoPixels\ndef alloff():\n cpx.pixels.fill((0, 0, 0))\n\nmode = 1\npusha = 0\npushb = 0\nclr = 0\ni = 0\nwhile True:\n # NeoPixels are cpx.pixels[0-9]\n\n if (mode == 1):\n cpx.pixels[0] = flame(i, clr, 32)\n cpx.pixels[1] = flame(i, clr, 24)\n cpx.pixels[2] = flame(i, clr, 16)\n cpx.pixels[3] = flame(i, clr, 8)\n cpx.pixels[4] = flame(i, clr, 0)\n cpx.pixels[5] = flame(i, clr, 0)\n cpx.pixels[6] = flame(i, clr, 8)\n cpx.pixels[7] = flame(i, clr, 16)\n cpx.pixels[8] = flame(i, clr, 24)\n cpx.pixels[9] = flame(i, clr, 32)\n elif (mode == 2):\n cpx.pixels[0] = wheeln(i, 0)\n cpx.pixels[1] = wheeln(i, 24)\n cpx.pixels[2] = wheeln(i, 48)\n cpx.pixels[3] = wheeln(i, 72)\n cpx.pixels[4] = wheeln(i, 96)\n cpx.pixels[5] = wheeln(i, 120)\n cpx.pixels[6] = wheeln(i, 144)\n cpx.pixels[7] = wheeln(i, 168)\n cpx.pixels[8] = wheeln(i, 192)\n cpx.pixels[9] = wheeln(i, 216)\n elif (mode == 3):\n cpx.pixels[0] = randcolor()\n cpx.pixels[1] = randcolor()\n cpx.pixels[2] = randcolor()\n cpx.pixels[3] = randcolor()\n cpx.pixels[4] = randcolor()\n cpx.pixels[5] = randcolor()\n cpx.pixels[6] = randcolor()\n cpx.pixels[7] = randcolor()\n cpx.pixels[8] = randcolor()\n cpx.pixels[9] = randcolor()\n else:\n # Mode = 0 so turn All Off\n alloff()\n\n # Button A is bottom button on hat\n if cpx.button_a:\n print(\"Button A on Bottom Pressed! Changing mode to ALL OFF.\")\n pusha = 1\n # Button B is top button on hat\n if cpx.button_b:\n print(\"Button B on Top Pressed! Changing mode.\")\n pushb = 1\n\n i = (i+1) % 256\n #print (i)\n if (i == 255):\n clr = (clr+1) % 6\n\n if ((i == 63) | (i == 127) | (i == 191) | (i >= 255)) and (pusha == 1):\n mode = 0\n pusha = 0\n i = 0\n if ((i == 63) | (i == 127) | (i == 191) | (i >= 255)) and (pushb == 1):\n mode = (mode+1)\n pushb = 0\n i = 0\n if (mode > 3):\n mode = 1\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import discord
from discord.ext import commands
class TestCommands(commands.Cog, description="Unstable test commands", command_attrs=dict(hidden=True, description="Can only be used by an Owner")):
def __init__(self, bot):
self.bot = bot
self.hidden = True
print("Loaded", __name__)
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
def setup(bot):
if getattr(bot, "debug", False):
bot.add_cog(TestCommands(bot))
|
normal
|
{
"blob_id": "d5a5c6f9d483b2998cd0d9e47b37ab4499fa1c2a",
"index": 6279,
"step-1": "<mask token>\n\n\nclass TestCommands(commands.Cog, description='Unstable test commands',\n command_attrs=dict(hidden=True, description='Can only be used by an Owner')\n ):\n <mask token>\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCommands(commands.Cog, description='Unstable test commands',\n command_attrs=dict(hidden=True, description='Can only be used by an Owner')\n ):\n\n def __init__(self, bot):\n self.bot = bot\n self.hidden = True\n print('Loaded', __name__)\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestCommands(commands.Cog, description='Unstable test commands',\n command_attrs=dict(hidden=True, description='Can only be used by an Owner')\n ):\n\n def __init__(self, bot):\n self.bot = bot\n self.hidden = True\n print('Loaded', __name__)\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\ndef setup(bot):\n if getattr(bot, 'debug', False):\n bot.add_cog(TestCommands(bot))\n",
"step-4": "import discord\nfrom discord.ext import commands\n\n\nclass TestCommands(commands.Cog, description='Unstable test commands',\n command_attrs=dict(hidden=True, description='Can only be used by an Owner')\n ):\n\n def __init__(self, bot):\n self.bot = bot\n self.hidden = True\n print('Loaded', __name__)\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\ndef setup(bot):\n if getattr(bot, 'debug', False):\n bot.add_cog(TestCommands(bot))\n",
"step-5": "import discord\nfrom discord.ext import commands\n\n\nclass TestCommands(commands.Cog, description=\"Unstable test commands\", command_attrs=dict(hidden=True, description=\"Can only be used by an Owner\")):\n def __init__(self, bot):\n self.bot = bot\n self.hidden = True\n print(\"Loaded\", __name__)\n\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\ndef setup(bot):\n if getattr(bot, \"debug\", False):\n bot.add_cog(TestCommands(bot))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def str2int(strtime: str):
hh, mm, ss = strtime.split(':')
return 3600 * int(hh) + 60 * int(mm) + int(ss)
def int2str(inttime: int):
hh = inttime // 3600
mm = inttime % 3600 // 60
ss = inttime % 60
return str(hh).zfill(2) + ':' + str(mm).zfill(2) + ':' + str(ss).zfill(2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solution(play_time, adv_time, logs):
"""
Strategy :
adv_start_time을 log start time 부터 < 995959 - adv time
sliding window
Step 1.
String time -> integer time
Step 2. pseudo code : Two pointer algorithm
max time = 0
return max time
"""
MAX = str2int(play_time)
max_view = 0
ans_time = 0
adv_time = str2int(adv_time)
logs = [[str2int(log.split('-')[0]), str2int(log.split('-')[1])] for
log in logs]
view_list = [0] * (MAX + 1)
for start_time, end_time in logs:
view_list[start_time] += 1
view_list[end_time] -= 1
for i in range(1, MAX + 1):
view_list[i] = view_list[i] + view_list[i - 1]
for i in range(1, MAX + 1):
view_list[i] = view_list[i] + view_list[i - 1]
for start_time in range(MAX - adv_time + 1):
end_time = start_time + adv_time
temp_view = view_list[end_time] - view_list[start_time]
if temp_view > max_view:
max_view = temp_view
ans_time = start_time
if ans_time != 0:
ans_time += 1
return int2str(ans_time)
def str2int(strtime: str):
hh, mm, ss = strtime.split(':')
return 3600 * int(hh) + 60 * int(mm) + int(ss)
def int2str(inttime: int):
hh = inttime // 3600
mm = inttime % 3600 // 60
ss = inttime % 60
return str(hh).zfill(2) + ':' + str(mm).zfill(2) + ':' + str(ss).zfill(2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solution(play_time, adv_time, logs):
"""
Strategy :
adv_start_time을 log start time 부터 < 995959 - adv time
sliding window
Step 1.
String time -> integer time
Step 2. pseudo code : Two pointer algorithm
max time = 0
return max time
"""
MAX = str2int(play_time)
max_view = 0
ans_time = 0
adv_time = str2int(adv_time)
logs = [[str2int(log.split('-')[0]), str2int(log.split('-')[1])] for
log in logs]
view_list = [0] * (MAX + 1)
for start_time, end_time in logs:
view_list[start_time] += 1
view_list[end_time] -= 1
for i in range(1, MAX + 1):
view_list[i] = view_list[i] + view_list[i - 1]
for i in range(1, MAX + 1):
view_list[i] = view_list[i] + view_list[i - 1]
for start_time in range(MAX - adv_time + 1):
end_time = start_time + adv_time
temp_view = view_list[end_time] - view_list[start_time]
if temp_view > max_view:
max_view = temp_view
ans_time = start_time
if ans_time != 0:
ans_time += 1
return int2str(ans_time)
def str2int(strtime: str):
hh, mm, ss = strtime.split(':')
return 3600 * int(hh) + 60 * int(mm) + int(ss)
def int2str(inttime: int):
hh = inttime // 3600
mm = inttime % 3600 // 60
ss = inttime % 60
return str(hh).zfill(2) + ':' + str(mm).zfill(2) + ':' + str(ss).zfill(2)
if __name__ == '__main__':
play_time = '02:03:55'
adv_time = '00:14:15'
logs = ['01:20:15-01:45:14', '00:25:50-00:48:29', '00:40:31-01:00:00',
'01:37:44-02:02:30', '01:30:59-01:53:29']
result = '01:30:59'
print(solution(play_time, adv_time, logs))
print(result)
play_time = '99:59:59'
adv_time = '25:00:00'
logs = ['69:59:59-89:59:59', '01:00:00-21:00:00', '79:59:59-99:59:59',
'11:00:00-31:00:00']
result = '01:00:00'
print(solution(play_time, adv_time, logs))
print(result)
play_time = '50:00:00'
adv_time = '50:00:00'
logs = ['15:36:51-38:21:49', '10:14:18-15:36:51', '38:21:49-42:51:45']
result = '00:00:00'
print(solution(play_time, adv_time, logs))
print(result)
<|reserved_special_token_1|>
from collections import deque
def solution(play_time, adv_time, logs):
"""
Strategy :
adv_start_time을 log start time 부터 < 995959 - adv time
sliding window
Step 1.
String time -> integer time
Step 2. pseudo code : Two pointer algorithm
max time = 0
return max time
"""
MAX = str2int(play_time)
max_view = 0
ans_time = 0
adv_time = str2int(adv_time)
logs = [[str2int(log.split('-')[0]), str2int(log.split('-')[1])] for
log in logs]
view_list = [0] * (MAX + 1)
for start_time, end_time in logs:
view_list[start_time] += 1
view_list[end_time] -= 1
for i in range(1, MAX + 1):
view_list[i] = view_list[i] + view_list[i - 1]
for i in range(1, MAX + 1):
view_list[i] = view_list[i] + view_list[i - 1]
for start_time in range(MAX - adv_time + 1):
end_time = start_time + adv_time
temp_view = view_list[end_time] - view_list[start_time]
if temp_view > max_view:
max_view = temp_view
ans_time = start_time
if ans_time != 0:
ans_time += 1
return int2str(ans_time)
def str2int(strtime: str):
hh, mm, ss = strtime.split(':')
return 3600 * int(hh) + 60 * int(mm) + int(ss)
def int2str(inttime: int):
hh = inttime // 3600
mm = inttime % 3600 // 60
ss = inttime % 60
return str(hh).zfill(2) + ':' + str(mm).zfill(2) + ':' + str(ss).zfill(2)
if __name__ == '__main__':
play_time = '02:03:55'
adv_time = '00:14:15'
logs = ['01:20:15-01:45:14', '00:25:50-00:48:29', '00:40:31-01:00:00',
'01:37:44-02:02:30', '01:30:59-01:53:29']
result = '01:30:59'
print(solution(play_time, adv_time, logs))
print(result)
play_time = '99:59:59'
adv_time = '25:00:00'
logs = ['69:59:59-89:59:59', '01:00:00-21:00:00', '79:59:59-99:59:59',
'11:00:00-31:00:00']
result = '01:00:00'
print(solution(play_time, adv_time, logs))
print(result)
play_time = '50:00:00'
adv_time = '50:00:00'
logs = ['15:36:51-38:21:49', '10:14:18-15:36:51', '38:21:49-42:51:45']
result = '00:00:00'
print(solution(play_time, adv_time, logs))
print(result)
<|reserved_special_token_1|>
from collections import deque
def solution(play_time, adv_time, logs):
'''
Strategy :
adv_start_time을 log start time 부터 < 995959 - adv time
sliding window
Step 1.
String time -> integer time
Step 2. pseudo code : Two pointer algorithm
max time = 0
return max time
'''
## Step 1.
MAX = str2int(play_time)
max_view = 0
ans_time = 0
adv_time = str2int(adv_time)
logs = [[str2int(log.split("-")[0]),str2int(log.split("-")[1])] for log in logs]
view_list = [0] * (MAX+1)
## Step 2.
## 도함수
for start_time,end_time in logs:
view_list[start_time] += 1
view_list[end_time] -= 1
## 함수
for i in range(1,MAX+1):
view_list[i] = view_list[i]+view_list[i-1]
## 누적 합
for i in range(1,MAX+1):
view_list[i] = view_list[i]+view_list[i-1]
for start_time in range(MAX-adv_time+1):
## start time 0,1,2,... MAX-adv_time
## end time adv_time, ... MAX
end_time = start_time + adv_time
temp_view = view_list[end_time] - view_list[start_time]
if temp_view > max_view:
max_view = temp_view
ans_time = start_time
if ans_time != 0:
ans_time += 1
return int2str(ans_time)
def str2int(strtime:str):
hh,mm,ss = strtime.split(":")
return 3600*int(hh)+60*int(mm)+int(ss)
def int2str(inttime:int):
hh = inttime//3600
mm = (inttime%3600)//60
ss = inttime%60
return str(hh).zfill(2)+":"+str(mm).zfill(2)+":"+str(ss).zfill(2)
if __name__ == "__main__":
play_time = "02:03:55"
adv_time = "00:14:15"
logs = ["01:20:15-01:45:14", "00:25:50-00:48:29", "00:40:31-01:00:00", "01:37:44-02:02:30", "01:30:59-01:53:29"]
result = "01:30:59"
print(solution(play_time, adv_time, logs))
print(result)
play_time = "99:59:59"
adv_time = "25:00:00"
logs = ["69:59:59-89:59:59", "01:00:00-21:00:00", "79:59:59-99:59:59", "11:00:00-31:00:00"]
result = "01:00:00"
print(solution(play_time, adv_time, logs))
print(result)
play_time = "50:00:00"
adv_time = "50:00:00"
logs = ["15:36:51-38:21:49", "10:14:18-15:36:51", "38:21:49-42:51:45"]
result = "00:00:00"
print(solution(play_time, adv_time, logs))
print(result)
|
flexible
|
{
"blob_id": "cb50a5352b0ad7b04dee9393c50da54fdf507376",
"index": 2018,
"step-1": "<mask token>\n\n\ndef str2int(strtime: str):\n hh, mm, ss = strtime.split(':')\n return 3600 * int(hh) + 60 * int(mm) + int(ss)\n\n\ndef int2str(inttime: int):\n hh = inttime // 3600\n mm = inttime % 3600 // 60\n ss = inttime % 60\n return str(hh).zfill(2) + ':' + str(mm).zfill(2) + ':' + str(ss).zfill(2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef solution(play_time, adv_time, logs):\n \"\"\"\n Strategy : \n adv_start_time을 log start time 부터 < 995959 - adv time\n sliding window \n\n Step 1. \n String time -> integer time\n\n Step 2. pseudo code : Two pointer algorithm\n max time = 0\n \n return max time\n \"\"\"\n MAX = str2int(play_time)\n max_view = 0\n ans_time = 0\n adv_time = str2int(adv_time)\n logs = [[str2int(log.split('-')[0]), str2int(log.split('-')[1])] for\n log in logs]\n view_list = [0] * (MAX + 1)\n for start_time, end_time in logs:\n view_list[start_time] += 1\n view_list[end_time] -= 1\n for i in range(1, MAX + 1):\n view_list[i] = view_list[i] + view_list[i - 1]\n for i in range(1, MAX + 1):\n view_list[i] = view_list[i] + view_list[i - 1]\n for start_time in range(MAX - adv_time + 1):\n end_time = start_time + adv_time\n temp_view = view_list[end_time] - view_list[start_time]\n if temp_view > max_view:\n max_view = temp_view\n ans_time = start_time\n if ans_time != 0:\n ans_time += 1\n return int2str(ans_time)\n\n\ndef str2int(strtime: str):\n hh, mm, ss = strtime.split(':')\n return 3600 * int(hh) + 60 * int(mm) + int(ss)\n\n\ndef int2str(inttime: int):\n hh = inttime // 3600\n mm = inttime % 3600 // 60\n ss = inttime % 60\n return str(hh).zfill(2) + ':' + str(mm).zfill(2) + ':' + str(ss).zfill(2)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef solution(play_time, adv_time, logs):\n \"\"\"\n Strategy : \n adv_start_time을 log start time 부터 < 995959 - adv time\n sliding window \n\n Step 1. \n String time -> integer time\n\n Step 2. pseudo code : Two pointer algorithm\n max time = 0\n \n return max time\n \"\"\"\n MAX = str2int(play_time)\n max_view = 0\n ans_time = 0\n adv_time = str2int(adv_time)\n logs = [[str2int(log.split('-')[0]), str2int(log.split('-')[1])] for\n log in logs]\n view_list = [0] * (MAX + 1)\n for start_time, end_time in logs:\n view_list[start_time] += 1\n view_list[end_time] -= 1\n for i in range(1, MAX + 1):\n view_list[i] = view_list[i] + view_list[i - 1]\n for i in range(1, MAX + 1):\n view_list[i] = view_list[i] + view_list[i - 1]\n for start_time in range(MAX - adv_time + 1):\n end_time = start_time + adv_time\n temp_view = view_list[end_time] - view_list[start_time]\n if temp_view > max_view:\n max_view = temp_view\n ans_time = start_time\n if ans_time != 0:\n ans_time += 1\n return int2str(ans_time)\n\n\ndef str2int(strtime: str):\n hh, mm, ss = strtime.split(':')\n return 3600 * int(hh) + 60 * int(mm) + int(ss)\n\n\ndef int2str(inttime: int):\n hh = inttime // 3600\n mm = inttime % 3600 // 60\n ss = inttime % 60\n return str(hh).zfill(2) + ':' + str(mm).zfill(2) + ':' + str(ss).zfill(2)\n\n\nif __name__ == '__main__':\n play_time = '02:03:55'\n adv_time = '00:14:15'\n logs = ['01:20:15-01:45:14', '00:25:50-00:48:29', '00:40:31-01:00:00',\n '01:37:44-02:02:30', '01:30:59-01:53:29']\n result = '01:30:59'\n print(solution(play_time, adv_time, logs))\n print(result)\n play_time = '99:59:59'\n adv_time = '25:00:00'\n logs = ['69:59:59-89:59:59', '01:00:00-21:00:00', '79:59:59-99:59:59',\n '11:00:00-31:00:00']\n result = '01:00:00'\n print(solution(play_time, adv_time, logs))\n print(result)\n play_time = '50:00:00'\n adv_time = '50:00:00'\n logs = ['15:36:51-38:21:49', '10:14:18-15:36:51', '38:21:49-42:51:45']\n result = '00:00:00'\n print(solution(play_time, adv_time, logs))\n print(result)\n",
"step-4": "from collections import deque\n\n\ndef solution(play_time, adv_time, logs):\n \"\"\"\n Strategy : \n adv_start_time을 log start time 부터 < 995959 - adv time\n sliding window \n\n Step 1. \n String time -> integer time\n\n Step 2. pseudo code : Two pointer algorithm\n max time = 0\n \n return max time\n \"\"\"\n MAX = str2int(play_time)\n max_view = 0\n ans_time = 0\n adv_time = str2int(adv_time)\n logs = [[str2int(log.split('-')[0]), str2int(log.split('-')[1])] for\n log in logs]\n view_list = [0] * (MAX + 1)\n for start_time, end_time in logs:\n view_list[start_time] += 1\n view_list[end_time] -= 1\n for i in range(1, MAX + 1):\n view_list[i] = view_list[i] + view_list[i - 1]\n for i in range(1, MAX + 1):\n view_list[i] = view_list[i] + view_list[i - 1]\n for start_time in range(MAX - adv_time + 1):\n end_time = start_time + adv_time\n temp_view = view_list[end_time] - view_list[start_time]\n if temp_view > max_view:\n max_view = temp_view\n ans_time = start_time\n if ans_time != 0:\n ans_time += 1\n return int2str(ans_time)\n\n\ndef str2int(strtime: str):\n hh, mm, ss = strtime.split(':')\n return 3600 * int(hh) + 60 * int(mm) + int(ss)\n\n\ndef int2str(inttime: int):\n hh = inttime // 3600\n mm = inttime % 3600 // 60\n ss = inttime % 60\n return str(hh).zfill(2) + ':' + str(mm).zfill(2) + ':' + str(ss).zfill(2)\n\n\nif __name__ == '__main__':\n play_time = '02:03:55'\n adv_time = '00:14:15'\n logs = ['01:20:15-01:45:14', '00:25:50-00:48:29', '00:40:31-01:00:00',\n '01:37:44-02:02:30', '01:30:59-01:53:29']\n result = '01:30:59'\n print(solution(play_time, adv_time, logs))\n print(result)\n play_time = '99:59:59'\n adv_time = '25:00:00'\n logs = ['69:59:59-89:59:59', '01:00:00-21:00:00', '79:59:59-99:59:59',\n '11:00:00-31:00:00']\n result = '01:00:00'\n print(solution(play_time, adv_time, logs))\n print(result)\n play_time = '50:00:00'\n adv_time = '50:00:00'\n logs = ['15:36:51-38:21:49', '10:14:18-15:36:51', '38:21:49-42:51:45']\n result = '00:00:00'\n print(solution(play_time, adv_time, logs))\n print(result)\n",
"step-5": "from collections import deque\ndef solution(play_time, adv_time, logs):\n\n '''\n Strategy : \n adv_start_time을 log start time 부터 < 995959 - adv time\n sliding window \n\n Step 1. \n String time -> integer time\n\n Step 2. pseudo code : Two pointer algorithm\n max time = 0\n \n return max time\n '''\n ## Step 1.\n MAX = str2int(play_time)\n max_view = 0\n ans_time = 0\n adv_time = str2int(adv_time)\n logs = [[str2int(log.split(\"-\")[0]),str2int(log.split(\"-\")[1])] for log in logs]\n view_list = [0] * (MAX+1)\n ## Step 2.\n ## 도함수\n for start_time,end_time in logs:\n view_list[start_time] += 1\n view_list[end_time] -= 1\n\n ## 함수\n for i in range(1,MAX+1):\n view_list[i] = view_list[i]+view_list[i-1]\n\n ## 누적 합\n for i in range(1,MAX+1):\n view_list[i] = view_list[i]+view_list[i-1]\n \n\n for start_time in range(MAX-adv_time+1):\n ## start time 0,1,2,... MAX-adv_time\n ## end time adv_time, ... MAX\n end_time = start_time + adv_time\n temp_view = view_list[end_time] - view_list[start_time]\n if temp_view > max_view:\n max_view = temp_view\n ans_time = start_time\n if ans_time != 0:\n ans_time += 1\n return int2str(ans_time)\n\ndef str2int(strtime:str):\n hh,mm,ss = strtime.split(\":\")\n return 3600*int(hh)+60*int(mm)+int(ss)\n\ndef int2str(inttime:int):\n hh = inttime//3600\n mm = (inttime%3600)//60\n ss = inttime%60\n return str(hh).zfill(2)+\":\"+str(mm).zfill(2)+\":\"+str(ss).zfill(2)\n\n\nif __name__ == \"__main__\":\n play_time = \"02:03:55\"\n adv_time = \"00:14:15\"\n logs = [\"01:20:15-01:45:14\", \"00:25:50-00:48:29\", \"00:40:31-01:00:00\", \"01:37:44-02:02:30\", \"01:30:59-01:53:29\"]\n result = \"01:30:59\"\n print(solution(play_time, adv_time, logs))\n print(result)\n play_time = \"99:59:59\"\n adv_time = \"25:00:00\"\n logs = [\"69:59:59-89:59:59\", \"01:00:00-21:00:00\", \"79:59:59-99:59:59\", \"11:00:00-31:00:00\"]\n result = \"01:00:00\"\n print(solution(play_time, adv_time, logs))\n print(result)\n play_time = \"50:00:00\"\n adv_time = \"50:00:00\"\n logs = [\"15:36:51-38:21:49\", \"10:14:18-15:36:51\", \"38:21:49-42:51:45\"]\n result = \"00:00:00\"\n print(solution(play_time, adv_time, logs))\n print(result)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('hello')
print('===================================================')
print('Nama Lengkap : Agung Dharmawan')
print('Kelas : Teknik Informatika 2018 A')
print('Kampus : Universitas Nahdlatul Ulama Sidoarjo')
print('===================================================')
<|reserved_special_token_1|>
print ("hello")
print ("===================================================")
print ("Nama Lengkap : Agung Dharmawan")
print ("Kelas : Teknik Informatika 2018 A")
print ("Kampus : Universitas Nahdlatul Ulama Sidoarjo")
print ("===================================================")
|
flexible
|
{
"blob_id": "4e10bc876797d0939c91cff5eff497b36af35dcb",
"index": 1932,
"step-1": "<mask token>\n",
"step-2": "print('hello')\nprint('===================================================')\nprint('Nama Lengkap : Agung Dharmawan')\nprint('Kelas : Teknik Informatika 2018 A')\nprint('Kampus : Universitas Nahdlatul Ulama Sidoarjo')\nprint('===================================================')\n",
"step-3": "print (\"hello\")\nprint (\"===================================================\")\nprint (\"Nama Lengkap : Agung Dharmawan\")\nprint (\"Kelas : Teknik Informatika 2018 A\")\nprint (\"Kampus : Universitas Nahdlatul Ulama Sidoarjo\")\nprint (\"===================================================\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
rom diseas import Disease
from parse import analyzing
from config import FILE_NAME
from random import randint
if __name__ == '__main__':
"""
Main module that runs the program.
"""
def working_with_user(disea):
print('Choose what you want to know about that disease:\naverage_value(will return the average value\
of deaths for the certain period of time)\naverage_changing(will return the average annual changing for the death rate)\n\
graphic(will show you a plot for the death rates)\n\
predicting(will make a prediction for the year, that you type)\n\
min_value and max_value')
new1_command = input()
if new1_command in ['average_value', 'average_changing', 'max_value', 'min_value']:
print(eval(f'Disease(disea).{new1_command}()'))
elif new1_command == 'graphic':
value1 = input("Do you want to have the prediction on your graphic?\
Type 2018 in this case. Otherwise type nothing\n")
Disease(disea).graphic(int(value1))
elif new1_command == 'predicting':
value1 = input("Type the year, which value have to be predicted(int bigger than 2018)")
Disease(disea).graphic(value1)
else:
print('Something went wrong')
while True:
print('Hello, now you are using the program, that can acknowledge you with data about death rates')
print('Here you can use following commands:\nshow - to show the list of the death causes\n\
leave - to go out of the program')
command = input()
if command == 'show':
for index, illness in enumerate(analyzing(FILE_NAME).keys()):
print(index, illness)
new_command = input("Now, choose the number of the disease or type randomly\
if you don't want to read a lot\n")
if new_command == 'randomly':
value = randint(0, 55)
for index1, illness1 in enumerate(analyzing(FILE_NAME).keys()):
if index1 == value:
print(illness1)
working_with_user(illness1)
elif '0' <= new_command <= '55':
for index2, illness2 in enumerate(analyzing(FILE_NAME).keys()):
if index2 == int(new_command):
working_with_user(illness2)
elif command == 'leave':
break
|
normal
|
{
"blob_id": "b33af7aff0f3fde6499d5e24fc036d5bd74b6e47",
"index": 3550,
"step-1": "rom diseas import Disease\nfrom parse import analyzing\nfrom config import FILE_NAME\nfrom random import randint\n\nif __name__ == '__main__':\n \"\"\"\n Main module that runs the program.\n \"\"\"\n def working_with_user(disea):\n print('Choose what you want to know about that disease:\\naverage_value(will return the average value\\\nof deaths for the certain period of time)\\naverage_changing(will return the average annual changing for the death rate)\\n\\\ngraphic(will show you a plot for the death rates)\\n\\\npredicting(will make a prediction for the year, that you type)\\n\\\nmin_value and max_value')\n new1_command = input()\n if new1_command in ['average_value', 'average_changing', 'max_value', 'min_value']:\n print(eval(f'Disease(disea).{new1_command}()'))\n elif new1_command == 'graphic':\n value1 = input(\"Do you want to have the prediction on your graphic?\\\n Type 2018 in this case. Otherwise type nothing\\n\")\n Disease(disea).graphic(int(value1))\n elif new1_command == 'predicting':\n value1 = input(\"Type the year, which value have to be predicted(int bigger than 2018)\")\n Disease(disea).graphic(value1)\n else:\n print('Something went wrong')\n\n\n while True:\n print('Hello, now you are using the program, that can acknowledge you with data about death rates')\n print('Here you can use following commands:\\nshow - to show the list of the death causes\\n\\\nleave - to go out of the program')\n command = input()\n if command == 'show':\n for index, illness in enumerate(analyzing(FILE_NAME).keys()):\n print(index, illness)\n new_command = input(\"Now, choose the number of the disease or type randomly\\\nif you don't want to read a lot\\n\")\n\n if new_command == 'randomly':\n value = randint(0, 55)\n for index1, illness1 in enumerate(analyzing(FILE_NAME).keys()):\n if index1 == value:\n print(illness1)\n working_with_user(illness1)\n\n elif '0' <= new_command <= '55':\n for index2, illness2 in enumerate(analyzing(FILE_NAME).keys()):\n if index2 == int(new_command):\n working_with_user(illness2)\n\n elif command == 'leave':\n break\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class State:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class State:
def __init__(self, id):
self.id = id
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class State:
def __init__(self, id):
self.id = id
def NotinClosed(problem, node):
NotVisited = 1
for tuple in problem.closed:
if node.state.id == tuple[0].id and node.depth >= tuple[1]:
NotVisited = 0
return NotVisited
<|reserved_special_token_1|>
class State:
def __init__(self, id):
self.id = id
def NotinClosed(problem, node): #restituisce 1 se lo stato non è stato già visitato (al netto di controlli sulla depth) è quindi bisogna aggiungerlo
NotVisited = 1
for tuple in problem.closed:
if node.state.id == tuple[0].id and node.depth >= tuple[1]:
NotVisited = 0 #presente nei visited ma selected_node ha maggiore/uguale depth
return NotVisited
|
flexible
|
{
"blob_id": "200deda300e39b07e0e558277a340b7ad01c7dee",
"index": 2216,
"step-1": "<mask token>\n",
"step-2": "class State:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class State:\n\n def __init__(self, id):\n self.id = id\n\n\n<mask token>\n",
"step-4": "class State:\n\n def __init__(self, id):\n self.id = id\n\n\ndef NotinClosed(problem, node):\n NotVisited = 1\n for tuple in problem.closed:\n if node.state.id == tuple[0].id and node.depth >= tuple[1]:\n NotVisited = 0\n return NotVisited\n",
"step-5": "\nclass State:\n def __init__(self, id):\n self.id = id\n\n\ndef NotinClosed(problem, node): #restituisce 1 se lo stato non è stato già visitato (al netto di controlli sulla depth) è quindi bisogna aggiungerlo\n NotVisited = 1\n for tuple in problem.closed:\n if node.state.id == tuple[0].id and node.depth >= tuple[1]:\n NotVisited = 0 #presente nei visited ma selected_node ha maggiore/uguale depth\n return NotVisited",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ChatService:
@staticmethod
def is_room_exists(room_id: int) ->bool:
return RoomGroup.objects.filter(id=room_id).exists()
@staticmethod
def create_users_room(**data) ->RoomGroup:
room = RoomGroup.objects.create(room_id=data.get('room_id'))
room.add_users([data.get('asker_id'), data.get('expert_id')])
return room
@staticmethod
def get_group_by_id(room_id: int):
return get_object_or_404(RoomGroup, room_id=room_id)
@staticmethod
def socket_chat_created(data: dict) ->None:
message = f"<div><b>Question:</b>{data.get('message')}</div>"
author = MainService.get_user_client(data.get('asker_id'))
room = ChatService.get_group_by_id(data.get('room_id'))
ChatService.save_chat_message(user=author, message=message, room=
room, is_system=True)
@staticmethod
def save_chat_message(message: str, user: UserClient, room: RoomGroup,
is_system: bool) ->Message:
return Message.objects.create(author=user, room_group=room, message
=message, is_system=is_system)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AsyncChatService:
<|reserved_special_token_0|>
@staticmethod
@database_sync_to_async
def is_room_open(room_id: int):
try:
return RoomGroup.objects.get(room_id=room_id).status
except RoomGroup.DoesNotExist:
return None
<|reserved_special_token_0|>
@staticmethod
@database_sync_to_async
def save_chat_message(message, user, room):
return Message.objects.create(author=user, room_group=room, message
=message)
class ChatService:
@staticmethod
def is_room_exists(room_id: int) ->bool:
return RoomGroup.objects.filter(id=room_id).exists()
@staticmethod
def create_users_room(**data) ->RoomGroup:
room = RoomGroup.objects.create(room_id=data.get('room_id'))
room.add_users([data.get('asker_id'), data.get('expert_id')])
return room
@staticmethod
def get_group_by_id(room_id: int):
return get_object_or_404(RoomGroup, room_id=room_id)
@staticmethod
def socket_chat_created(data: dict) ->None:
message = f"<div><b>Question:</b>{data.get('message')}</div>"
author = MainService.get_user_client(data.get('asker_id'))
room = ChatService.get_group_by_id(data.get('room_id'))
ChatService.save_chat_message(user=author, message=message, room=
room, is_system=True)
@staticmethod
def save_chat_message(message: str, user: UserClient, room: RoomGroup,
is_system: bool) ->Message:
return Message.objects.create(author=user, room_group=room, message
=message, is_system=is_system)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AsyncChatService:
@staticmethod
@database_sync_to_async
def get_group_by_id(room_id):
try:
return RoomGroup.objects.get(room_id=room_id)
except RoomGroup.DoesNotExist:
return None
@staticmethod
@database_sync_to_async
def is_room_open(room_id: int):
try:
return RoomGroup.objects.get(room_id=room_id).status
except RoomGroup.DoesNotExist:
return None
@staticmethod
@database_sync_to_async
def is_user_in_room(room_id, user):
return UsersRoomGroup.objects.filter(Q(user=user) & Q(
room_group__room_id=room_id)).exists()
@staticmethod
@database_sync_to_async
def save_chat_message(message, user, room):
return Message.objects.create(author=user, room_group=room, message
=message)
class ChatService:
@staticmethod
def is_room_exists(room_id: int) ->bool:
return RoomGroup.objects.filter(id=room_id).exists()
@staticmethod
def create_users_room(**data) ->RoomGroup:
room = RoomGroup.objects.create(room_id=data.get('room_id'))
room.add_users([data.get('asker_id'), data.get('expert_id')])
return room
@staticmethod
def get_group_by_id(room_id: int):
return get_object_or_404(RoomGroup, room_id=room_id)
@staticmethod
def socket_chat_created(data: dict) ->None:
message = f"<div><b>Question:</b>{data.get('message')}</div>"
author = MainService.get_user_client(data.get('asker_id'))
room = ChatService.get_group_by_id(data.get('room_id'))
ChatService.save_chat_message(user=author, message=message, room=
room, is_system=True)
@staticmethod
def save_chat_message(message: str, user: UserClient, room: RoomGroup,
is_system: bool) ->Message:
return Message.objects.create(author=user, room_group=room, message
=message, is_system=is_system)
<|reserved_special_token_1|>
from channels.db import database_sync_to_async
from django.db.models import Q
from rest_framework.generics import get_object_or_404
from main.models import UserClient
from main.services import MainService
from .models import Message, RoomGroup, UsersRoomGroup
class AsyncChatService:
@staticmethod
@database_sync_to_async
def get_group_by_id(room_id):
try:
return RoomGroup.objects.get(room_id=room_id)
except RoomGroup.DoesNotExist:
return None
@staticmethod
@database_sync_to_async
def is_room_open(room_id: int):
try:
return RoomGroup.objects.get(room_id=room_id).status
except RoomGroup.DoesNotExist:
return None
@staticmethod
@database_sync_to_async
def is_user_in_room(room_id, user):
return UsersRoomGroup.objects.filter(Q(user=user) & Q(
room_group__room_id=room_id)).exists()
@staticmethod
@database_sync_to_async
def save_chat_message(message, user, room):
return Message.objects.create(author=user, room_group=room, message
=message)
class ChatService:
@staticmethod
def is_room_exists(room_id: int) ->bool:
return RoomGroup.objects.filter(id=room_id).exists()
@staticmethod
def create_users_room(**data) ->RoomGroup:
room = RoomGroup.objects.create(room_id=data.get('room_id'))
room.add_users([data.get('asker_id'), data.get('expert_id')])
return room
@staticmethod
def get_group_by_id(room_id: int):
return get_object_or_404(RoomGroup, room_id=room_id)
@staticmethod
def socket_chat_created(data: dict) ->None:
message = f"<div><b>Question:</b>{data.get('message')}</div>"
author = MainService.get_user_client(data.get('asker_id'))
room = ChatService.get_group_by_id(data.get('room_id'))
ChatService.save_chat_message(user=author, message=message, room=
room, is_system=True)
@staticmethod
def save_chat_message(message: str, user: UserClient, room: RoomGroup,
is_system: bool) ->Message:
return Message.objects.create(author=user, room_group=room, message
=message, is_system=is_system)
<|reserved_special_token_1|>
from channels.db import database_sync_to_async
from django.db.models import Q
from rest_framework.generics import get_object_or_404
from main.models import UserClient
from main.services import MainService
from .models import Message, RoomGroup, UsersRoomGroup
class AsyncChatService:
@staticmethod
@database_sync_to_async
def get_group_by_id(room_id):
try:
return RoomGroup.objects.get(room_id=room_id)
except RoomGroup.DoesNotExist:
return None
@staticmethod
@database_sync_to_async
def is_room_open(room_id: int):
try:
return RoomGroup.objects.get(room_id=room_id).status
except RoomGroup.DoesNotExist:
return None
@staticmethod
@database_sync_to_async
def is_user_in_room(room_id, user):
return UsersRoomGroup.objects.filter(Q(user=user) & Q(room_group__room_id=room_id)).exists()
@staticmethod
@database_sync_to_async
def save_chat_message(message, user, room):
return Message.objects.create(author=user, room_group=room, message=message)
class ChatService:
@staticmethod
def is_room_exists(room_id: int) -> bool:
return RoomGroup.objects.filter(id=room_id).exists()
@staticmethod
def create_users_room(**data) -> RoomGroup:
room = RoomGroup.objects.create(room_id=data.get('room_id'))
room.add_users([data.get('asker_id'), data.get('expert_id')])
return room
@staticmethod
def get_group_by_id(room_id: int):
return get_object_or_404(RoomGroup, room_id=room_id)
@staticmethod
def socket_chat_created(data: dict) -> None:
message = f"""<div><b>Question:</b>{data.get('message')}</div>"""
author = MainService.get_user_client(data.get('asker_id'))
room = ChatService.get_group_by_id(data.get('room_id'))
ChatService.save_chat_message(user=author, message=message, room=room, is_system=True)
@staticmethod
def save_chat_message(message: str, user: UserClient, room: RoomGroup, is_system: bool) -> Message:
return Message.objects.create(author=user, room_group=room, message=message, is_system=is_system)
|
flexible
|
{
"blob_id": "d71ffd022d87aa547b2a379f4c92d767b91212fd",
"index": 3827,
"step-1": "<mask token>\n\n\nclass ChatService:\n\n @staticmethod\n def is_room_exists(room_id: int) ->bool:\n return RoomGroup.objects.filter(id=room_id).exists()\n\n @staticmethod\n def create_users_room(**data) ->RoomGroup:\n room = RoomGroup.objects.create(room_id=data.get('room_id'))\n room.add_users([data.get('asker_id'), data.get('expert_id')])\n return room\n\n @staticmethod\n def get_group_by_id(room_id: int):\n return get_object_or_404(RoomGroup, room_id=room_id)\n\n @staticmethod\n def socket_chat_created(data: dict) ->None:\n message = f\"<div><b>Question:</b>{data.get('message')}</div>\"\n author = MainService.get_user_client(data.get('asker_id'))\n room = ChatService.get_group_by_id(data.get('room_id'))\n ChatService.save_chat_message(user=author, message=message, room=\n room, is_system=True)\n\n @staticmethod\n def save_chat_message(message: str, user: UserClient, room: RoomGroup,\n is_system: bool) ->Message:\n return Message.objects.create(author=user, room_group=room, message\n =message, is_system=is_system)\n",
"step-2": "<mask token>\n\n\nclass AsyncChatService:\n <mask token>\n\n @staticmethod\n @database_sync_to_async\n def is_room_open(room_id: int):\n try:\n return RoomGroup.objects.get(room_id=room_id).status\n except RoomGroup.DoesNotExist:\n return None\n <mask token>\n\n @staticmethod\n @database_sync_to_async\n def save_chat_message(message, user, room):\n return Message.objects.create(author=user, room_group=room, message\n =message)\n\n\nclass ChatService:\n\n @staticmethod\n def is_room_exists(room_id: int) ->bool:\n return RoomGroup.objects.filter(id=room_id).exists()\n\n @staticmethod\n def create_users_room(**data) ->RoomGroup:\n room = RoomGroup.objects.create(room_id=data.get('room_id'))\n room.add_users([data.get('asker_id'), data.get('expert_id')])\n return room\n\n @staticmethod\n def get_group_by_id(room_id: int):\n return get_object_or_404(RoomGroup, room_id=room_id)\n\n @staticmethod\n def socket_chat_created(data: dict) ->None:\n message = f\"<div><b>Question:</b>{data.get('message')}</div>\"\n author = MainService.get_user_client(data.get('asker_id'))\n room = ChatService.get_group_by_id(data.get('room_id'))\n ChatService.save_chat_message(user=author, message=message, room=\n room, is_system=True)\n\n @staticmethod\n def save_chat_message(message: str, user: UserClient, room: RoomGroup,\n is_system: bool) ->Message:\n return Message.objects.create(author=user, room_group=room, message\n =message, is_system=is_system)\n",
"step-3": "<mask token>\n\n\nclass AsyncChatService:\n\n @staticmethod\n @database_sync_to_async\n def get_group_by_id(room_id):\n try:\n return RoomGroup.objects.get(room_id=room_id)\n except RoomGroup.DoesNotExist:\n return None\n\n @staticmethod\n @database_sync_to_async\n def is_room_open(room_id: int):\n try:\n return RoomGroup.objects.get(room_id=room_id).status\n except RoomGroup.DoesNotExist:\n return None\n\n @staticmethod\n @database_sync_to_async\n def is_user_in_room(room_id, user):\n return UsersRoomGroup.objects.filter(Q(user=user) & Q(\n room_group__room_id=room_id)).exists()\n\n @staticmethod\n @database_sync_to_async\n def save_chat_message(message, user, room):\n return Message.objects.create(author=user, room_group=room, message\n =message)\n\n\nclass ChatService:\n\n @staticmethod\n def is_room_exists(room_id: int) ->bool:\n return RoomGroup.objects.filter(id=room_id).exists()\n\n @staticmethod\n def create_users_room(**data) ->RoomGroup:\n room = RoomGroup.objects.create(room_id=data.get('room_id'))\n room.add_users([data.get('asker_id'), data.get('expert_id')])\n return room\n\n @staticmethod\n def get_group_by_id(room_id: int):\n return get_object_or_404(RoomGroup, room_id=room_id)\n\n @staticmethod\n def socket_chat_created(data: dict) ->None:\n message = f\"<div><b>Question:</b>{data.get('message')}</div>\"\n author = MainService.get_user_client(data.get('asker_id'))\n room = ChatService.get_group_by_id(data.get('room_id'))\n ChatService.save_chat_message(user=author, message=message, room=\n room, is_system=True)\n\n @staticmethod\n def save_chat_message(message: str, user: UserClient, room: RoomGroup,\n is_system: bool) ->Message:\n return Message.objects.create(author=user, room_group=room, message\n =message, is_system=is_system)\n",
"step-4": "from channels.db import database_sync_to_async\nfrom django.db.models import Q\nfrom rest_framework.generics import get_object_or_404\nfrom main.models import UserClient\nfrom main.services import MainService\nfrom .models import Message, RoomGroup, UsersRoomGroup\n\n\nclass AsyncChatService:\n\n @staticmethod\n @database_sync_to_async\n def get_group_by_id(room_id):\n try:\n return RoomGroup.objects.get(room_id=room_id)\n except RoomGroup.DoesNotExist:\n return None\n\n @staticmethod\n @database_sync_to_async\n def is_room_open(room_id: int):\n try:\n return RoomGroup.objects.get(room_id=room_id).status\n except RoomGroup.DoesNotExist:\n return None\n\n @staticmethod\n @database_sync_to_async\n def is_user_in_room(room_id, user):\n return UsersRoomGroup.objects.filter(Q(user=user) & Q(\n room_group__room_id=room_id)).exists()\n\n @staticmethod\n @database_sync_to_async\n def save_chat_message(message, user, room):\n return Message.objects.create(author=user, room_group=room, message\n =message)\n\n\nclass ChatService:\n\n @staticmethod\n def is_room_exists(room_id: int) ->bool:\n return RoomGroup.objects.filter(id=room_id).exists()\n\n @staticmethod\n def create_users_room(**data) ->RoomGroup:\n room = RoomGroup.objects.create(room_id=data.get('room_id'))\n room.add_users([data.get('asker_id'), data.get('expert_id')])\n return room\n\n @staticmethod\n def get_group_by_id(room_id: int):\n return get_object_or_404(RoomGroup, room_id=room_id)\n\n @staticmethod\n def socket_chat_created(data: dict) ->None:\n message = f\"<div><b>Question:</b>{data.get('message')}</div>\"\n author = MainService.get_user_client(data.get('asker_id'))\n room = ChatService.get_group_by_id(data.get('room_id'))\n ChatService.save_chat_message(user=author, message=message, room=\n room, is_system=True)\n\n @staticmethod\n def save_chat_message(message: str, user: UserClient, room: RoomGroup,\n is_system: bool) ->Message:\n return Message.objects.create(author=user, room_group=room, message\n =message, is_system=is_system)\n",
"step-5": "from channels.db import database_sync_to_async\nfrom django.db.models import Q\nfrom rest_framework.generics import get_object_or_404\n\nfrom main.models import UserClient\nfrom main.services import MainService\nfrom .models import Message, RoomGroup, UsersRoomGroup\n\n\nclass AsyncChatService:\n @staticmethod\n @database_sync_to_async\n def get_group_by_id(room_id):\n try:\n return RoomGroup.objects.get(room_id=room_id)\n except RoomGroup.DoesNotExist:\n return None\n\n @staticmethod\n @database_sync_to_async\n def is_room_open(room_id: int):\n try:\n return RoomGroup.objects.get(room_id=room_id).status\n except RoomGroup.DoesNotExist:\n return None\n\n @staticmethod\n @database_sync_to_async\n def is_user_in_room(room_id, user):\n return UsersRoomGroup.objects.filter(Q(user=user) & Q(room_group__room_id=room_id)).exists()\n\n @staticmethod\n @database_sync_to_async\n def save_chat_message(message, user, room):\n return Message.objects.create(author=user, room_group=room, message=message)\n\n\nclass ChatService:\n @staticmethod\n def is_room_exists(room_id: int) -> bool:\n return RoomGroup.objects.filter(id=room_id).exists()\n\n @staticmethod\n def create_users_room(**data) -> RoomGroup:\n room = RoomGroup.objects.create(room_id=data.get('room_id'))\n room.add_users([data.get('asker_id'), data.get('expert_id')])\n return room\n\n @staticmethod\n def get_group_by_id(room_id: int):\n return get_object_or_404(RoomGroup, room_id=room_id)\n\n @staticmethod\n def socket_chat_created(data: dict) -> None:\n message = f\"\"\"<div><b>Question:</b>{data.get('message')}</div>\"\"\"\n author = MainService.get_user_client(data.get('asker_id'))\n room = ChatService.get_group_by_id(data.get('room_id'))\n ChatService.save_chat_message(user=author, message=message, room=room, is_system=True)\n\n @staticmethod\n def save_chat_message(message: str, user: UserClient, room: RoomGroup, is_system: bool) -> Message:\n return Message.objects.create(author=user, room_group=room, message=message, is_system=is_system)\n",
"step-ids": [
6,
9,
11,
12,
13
]
}
|
[
6,
9,
11,
12,
13
] |
import torch,cv2,os,time
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# GPU kullanımı
device=torch.device(0)
class NET(nn.Module):
def __init__(self):
super(). __init__()
self.conv1=nn.Conv2d(1,64,5)
self.conv2=nn.Conv2d(64,128,5)
self.conv3=nn.Conv2d(128,64,5)
x=torch.randn(86,86).view(-1,1,86,86)
self.boyut=None
self.uzunluk(x)
self.fkl1=nn.Linear(self.boyut,512)
self.fkl2=nn.Linear(512,3)
def uzunluk(self,x):
x=F.max_pool2d(F.relu(self.conv1(x)),(2,2))
x=F.max_pool2d(F.relu(self.conv2(x)),(2,2))
x=F.max_pool2d(F.relu(self.conv3(x)),(2,2))
if self.boyut is None:
self.boyut=x[0].shape[0]*x[0].shape[1]*x[0].shape[2]
return x
def forward(self,x):
x=self.uzunluk(x)
x=x.view(-1,self.boyut)
x=F.relu(self.fkl1(x))
x=F.softmax(self.fkl2(x))
return x
|
normal
|
{
"blob_id": "ad63beedc460b3d64a51d0b1f81f8e44cb559749",
"index": 1655,
"step-1": "<mask token>\n\n\nclass NET(nn.Module):\n <mask token>\n\n def uzunluk(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))\n if self.boyut is None:\n self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]\n return x\n\n def forward(self, x):\n x = self.uzunluk(x)\n x = x.view(-1, self.boyut)\n x = F.relu(self.fkl1(x))\n x = F.softmax(self.fkl2(x))\n return x\n",
"step-2": "<mask token>\n\n\nclass NET(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 64, 5)\n self.conv2 = nn.Conv2d(64, 128, 5)\n self.conv3 = nn.Conv2d(128, 64, 5)\n x = torch.randn(86, 86).view(-1, 1, 86, 86)\n self.boyut = None\n self.uzunluk(x)\n self.fkl1 = nn.Linear(self.boyut, 512)\n self.fkl2 = nn.Linear(512, 3)\n\n def uzunluk(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))\n if self.boyut is None:\n self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]\n return x\n\n def forward(self, x):\n x = self.uzunluk(x)\n x = x.view(-1, self.boyut)\n x = F.relu(self.fkl1(x))\n x = F.softmax(self.fkl2(x))\n return x\n",
"step-3": "<mask token>\ndevice = torch.device(0)\n\n\nclass NET(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 64, 5)\n self.conv2 = nn.Conv2d(64, 128, 5)\n self.conv3 = nn.Conv2d(128, 64, 5)\n x = torch.randn(86, 86).view(-1, 1, 86, 86)\n self.boyut = None\n self.uzunluk(x)\n self.fkl1 = nn.Linear(self.boyut, 512)\n self.fkl2 = nn.Linear(512, 3)\n\n def uzunluk(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))\n if self.boyut is None:\n self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]\n return x\n\n def forward(self, x):\n x = self.uzunluk(x)\n x = x.view(-1, self.boyut)\n x = F.relu(self.fkl1(x))\n x = F.softmax(self.fkl2(x))\n return x\n",
"step-4": "import torch, cv2, os, time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\ndevice = torch.device(0)\n\n\nclass NET(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 64, 5)\n self.conv2 = nn.Conv2d(64, 128, 5)\n self.conv3 = nn.Conv2d(128, 64, 5)\n x = torch.randn(86, 86).view(-1, 1, 86, 86)\n self.boyut = None\n self.uzunluk(x)\n self.fkl1 = nn.Linear(self.boyut, 512)\n self.fkl2 = nn.Linear(512, 3)\n\n def uzunluk(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))\n if self.boyut is None:\n self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]\n return x\n\n def forward(self, x):\n x = self.uzunluk(x)\n x = x.view(-1, self.boyut)\n x = F.relu(self.fkl1(x))\n x = F.softmax(self.fkl2(x))\n return x\n",
"step-5": "import torch,cv2,os,time\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\n\r\n\r\n# GPU kullanımı\r\ndevice=torch.device(0)\r\n\r\n\r\nclass NET(nn.Module):\r\n def __init__(self):\r\n super(). __init__()\r\n self.conv1=nn.Conv2d(1,64,5)\r\n self.conv2=nn.Conv2d(64,128,5)\r\n self.conv3=nn.Conv2d(128,64,5)\r\n \r\n x=torch.randn(86,86).view(-1,1,86,86)\r\n \r\n self.boyut=None\r\n self.uzunluk(x)\r\n \r\n self.fkl1=nn.Linear(self.boyut,512)\r\n self.fkl2=nn.Linear(512,3)\r\n def uzunluk(self,x):\r\n \r\n x=F.max_pool2d(F.relu(self.conv1(x)),(2,2))\r\n x=F.max_pool2d(F.relu(self.conv2(x)),(2,2))\r\n x=F.max_pool2d(F.relu(self.conv3(x)),(2,2))\r\n \r\n if self.boyut is None:\r\n self.boyut=x[0].shape[0]*x[0].shape[1]*x[0].shape[2]\r\n return x\r\n def forward(self,x):\r\n x=self.uzunluk(x)\r\n x=x.view(-1,self.boyut)\r\n \r\n x=F.relu(self.fkl1(x))\r\n x=F.softmax(self.fkl2(x))\r\n \r\n return x\r\n\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for myfile in files:
if myfile[-4:] != 'xlsx':
continue
tg_xlsx = load_workbook(os.path.join(path, myfile), read_only=True)
tg_sheet = tg_xlsx.active
for row in tg_sheet.iter_rows():
row_data = []
for cell in row:
row_data.append(cell.value)
result_sheet.append(row_data)
result_xlsx.save(f'{CUR_PATH}/result.xlsx')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ROOT_PATH = os.getcwd()
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
path = f'{ROOT_PATH}/xlsx_files'
files = listdir(path)
result_xlsx = Workbook()
result_sheet = result_xlsx.active
for myfile in files:
if myfile[-4:] != 'xlsx':
continue
tg_xlsx = load_workbook(os.path.join(path, myfile), read_only=True)
tg_sheet = tg_xlsx.active
for row in tg_sheet.iter_rows():
row_data = []
for cell in row:
row_data.append(cell.value)
result_sheet.append(row_data)
result_xlsx.save(f'{CUR_PATH}/result.xlsx')
<|reserved_special_token_1|>
import os
from os import listdir
from openpyxl import load_workbook, Workbook
ROOT_PATH = os.getcwd()
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
path = f'{ROOT_PATH}/xlsx_files'
files = listdir(path)
result_xlsx = Workbook()
result_sheet = result_xlsx.active
for myfile in files:
if myfile[-4:] != 'xlsx':
continue
tg_xlsx = load_workbook(os.path.join(path, myfile), read_only=True)
tg_sheet = tg_xlsx.active
for row in tg_sheet.iter_rows():
row_data = []
for cell in row:
row_data.append(cell.value)
result_sheet.append(row_data)
result_xlsx.save(f'{CUR_PATH}/result.xlsx')
<|reserved_special_token_1|>
import os
from os import listdir
from openpyxl import load_workbook, Workbook
ROOT_PATH = os.getcwd()
# print(f'ROOT_PATH : {ROOT_PATH}')
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
# print(f'CUR_PATH : {CUR_PATH}')
path = f'{ROOT_PATH}/xlsx_files'
files = listdir(path)
result_xlsx = Workbook()
result_sheet = result_xlsx.active
for myfile in files:
if myfile[-4:] != 'xlsx':
continue
tg_xlsx = load_workbook(os.path.join(path, myfile), read_only=True)
tg_sheet = tg_xlsx.active
for row in tg_sheet.iter_rows():
row_data = []
for cell in row:
row_data.append(cell.value)
result_sheet.append(row_data)
result_xlsx.save(f'{CUR_PATH}/result.xlsx')
|
flexible
|
{
"blob_id": "d23700f03e8498a5ff3d1d03d8808048ba79a56b",
"index": 9381,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor myfile in files:\n if myfile[-4:] != 'xlsx':\n continue\n tg_xlsx = load_workbook(os.path.join(path, myfile), read_only=True)\n tg_sheet = tg_xlsx.active\n for row in tg_sheet.iter_rows():\n row_data = []\n for cell in row:\n row_data.append(cell.value)\n result_sheet.append(row_data)\nresult_xlsx.save(f'{CUR_PATH}/result.xlsx')\n",
"step-3": "<mask token>\nROOT_PATH = os.getcwd()\nCUR_PATH = os.path.dirname(os.path.abspath(__file__))\npath = f'{ROOT_PATH}/xlsx_files'\nfiles = listdir(path)\nresult_xlsx = Workbook()\nresult_sheet = result_xlsx.active\nfor myfile in files:\n if myfile[-4:] != 'xlsx':\n continue\n tg_xlsx = load_workbook(os.path.join(path, myfile), read_only=True)\n tg_sheet = tg_xlsx.active\n for row in tg_sheet.iter_rows():\n row_data = []\n for cell in row:\n row_data.append(cell.value)\n result_sheet.append(row_data)\nresult_xlsx.save(f'{CUR_PATH}/result.xlsx')\n",
"step-4": "import os\nfrom os import listdir\nfrom openpyxl import load_workbook, Workbook\nROOT_PATH = os.getcwd()\nCUR_PATH = os.path.dirname(os.path.abspath(__file__))\npath = f'{ROOT_PATH}/xlsx_files'\nfiles = listdir(path)\nresult_xlsx = Workbook()\nresult_sheet = result_xlsx.active\nfor myfile in files:\n if myfile[-4:] != 'xlsx':\n continue\n tg_xlsx = load_workbook(os.path.join(path, myfile), read_only=True)\n tg_sheet = tg_xlsx.active\n for row in tg_sheet.iter_rows():\n row_data = []\n for cell in row:\n row_data.append(cell.value)\n result_sheet.append(row_data)\nresult_xlsx.save(f'{CUR_PATH}/result.xlsx')\n",
"step-5": "import os\nfrom os import listdir\nfrom openpyxl import load_workbook, Workbook\n\nROOT_PATH = os.getcwd()\n# print(f'ROOT_PATH : {ROOT_PATH}')\nCUR_PATH = os.path.dirname(os.path.abspath(__file__))\n# print(f'CUR_PATH : {CUR_PATH}')\npath = f'{ROOT_PATH}/xlsx_files'\nfiles = listdir(path)\n\nresult_xlsx = Workbook()\nresult_sheet = result_xlsx.active\n\nfor myfile in files:\n if myfile[-4:] != 'xlsx':\n continue\n\n tg_xlsx = load_workbook(os.path.join(path, myfile), read_only=True)\n tg_sheet = tg_xlsx.active\n\n for row in tg_sheet.iter_rows():\n row_data = []\n for cell in row:\n row_data.append(cell.value)\n\n result_sheet.append(row_data)\n\nresult_xlsx.save(f'{CUR_PATH}/result.xlsx')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import forms
from django.contrib.auth.models import User
from ServicePad.apps.account.models import UserProfile
import hashlib, random, datetime
from ServicePad.apps.registration.models import ActivationKey
MIN_PASSWORD_LENGTH=8
MAX_PASSWORD_LENGTH=30
class UserRegistrationForm(forms.Form):
first_name = forms.CharField(required=True,max_length=30)
last_name = forms.CharField(required=True,max_length=30)
email = forms.EmailField(required=True,max_length=30)
password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)
confirm_password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)
form_type = forms.CharField(widget=forms.HiddenInput(),initial=UserProfile.ACCOUNT_VOLUNTEER)
def clean(self):
cleaned_data = self.cleaned_data
#Verify usernames
try:
User.objects.get(username__exact=cleaned_data.get('email'))
except User.DoesNotExist:
pass
else:
raise forms.ValidationError("Email already exists")
#Verify Passwords
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError("Passwords do not match")
del cleaned_data['password']
del cleaned_data['confirm_password']
account_type = int(cleaned_data.get('form_type'))
if account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type != UserProfile.ACCOUNT_ORGANIZATION:
raise forms.ValidationError("Invalid account type")
return cleaned_data
def save(self):
new_user = User.objects.create_user(self.cleaned_data['email'], self.cleaned_data['email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.is_active = False
new_user.save()
#create the activation key
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
key_obj = ActivationKey(user=new_user,activation_key=activation_key,key_expires=key_expires)
key_obj.save()
new_profile = UserProfile(user=new_user,account_type=UserProfile.ACCOUNT_VOLUNTEER)
new_profile.save()
return new_user
class OrganizationRegistrationForm(forms.Form):
business_name = forms.CharField(required=True,max_length=60)
primary_contact_first_name = forms.CharField(required=True,max_length=30)
primary_contact_last_name = forms.CharField(required=True,max_length=30)
primary_contact_phone = forms.CharField(required=True,max_length=30)
primary_contact_email = forms.EmailField(required=True,max_length=30)
password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)
confirm_password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)
form_type = forms.CharField(widget=forms.HiddenInput(),initial=UserProfile.ACCOUNT_ORGANIZATION)
def clean(self):
cleaned_data = self.cleaned_data
#Verify usernames
try:
User.objects.get(username__exact=cleaned_data.get('primary_contact_email'))
except User.DoesNotExist:
pass
else:
raise forms.ValidationError("Email already exists")
#Verify Passwords
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError("Passwords do not match")
del cleaned_data['password']
del cleaned_data['confirm_password']
return cleaned_data
def save(self):
new_user = User.objects.create_user(self.cleaned_data['primary_contact_email'], self.cleaned_data['primary_contact_email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['primary_contact_first_name']
new_user.last_name = self.cleaned_data['primary_contact_last_name']
new_user.is_active = False
new_user.save()
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
new_profile = UserProfile(user=new_user,
account_type=UserProfile.ACCOUNT_ORGANIZATION,
business_name=self.cleaned_data['business_name']
)
new_profile.save()
return new_user
|
normal
|
{
"blob_id": "5f680fb21fe1090dfb58f5b9260739b91ae04d99",
"index": 9922,
"step-1": "<mask token>\n\n\nclass UserRegistrationForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'],\n self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n key_obj = ActivationKey(user=new_user, activation_key=\n activation_key, key_expires=key_expires)\n key_obj.save()\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_VOLUNTEER)\n new_profile.save()\n return new_user\n\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True, max_length=60)\n primary_contact_first_name = forms.CharField(required=True, max_length=30)\n primary_contact_last_name = forms.CharField(required=True, max_length=30)\n primary_contact_phone = forms.CharField(required=True, max_length=30)\n primary_contact_email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_ORGANIZATION)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get(\n 'primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[\n 'business_name'])\n new_profile.save()\n return new_user\n",
"step-2": "<mask token>\n\n\nclass UserRegistrationForm(forms.Form):\n first_name = forms.CharField(required=True, max_length=30)\n last_name = forms.CharField(required=True, max_length=30)\n email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_VOLUNTEER)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get('email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n account_type = int(cleaned_data.get('form_type'))\n if (account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type !=\n UserProfile.ACCOUNT_ORGANIZATION):\n raise forms.ValidationError('Invalid account type')\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'],\n self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n key_obj = ActivationKey(user=new_user, activation_key=\n activation_key, key_expires=key_expires)\n key_obj.save()\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_VOLUNTEER)\n new_profile.save()\n return new_user\n\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True, max_length=60)\n primary_contact_first_name = forms.CharField(required=True, max_length=30)\n primary_contact_last_name = forms.CharField(required=True, max_length=30)\n primary_contact_phone = forms.CharField(required=True, max_length=30)\n primary_contact_email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_ORGANIZATION)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get(\n 'primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[\n 'business_name'])\n new_profile.save()\n return new_user\n",
"step-3": "<mask token>\nMIN_PASSWORD_LENGTH = 8\nMAX_PASSWORD_LENGTH = 30\n\n\nclass UserRegistrationForm(forms.Form):\n first_name = forms.CharField(required=True, max_length=30)\n last_name = forms.CharField(required=True, max_length=30)\n email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_VOLUNTEER)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get('email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n account_type = int(cleaned_data.get('form_type'))\n if (account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type !=\n UserProfile.ACCOUNT_ORGANIZATION):\n raise forms.ValidationError('Invalid account type')\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'],\n self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n key_obj = ActivationKey(user=new_user, activation_key=\n activation_key, key_expires=key_expires)\n key_obj.save()\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_VOLUNTEER)\n new_profile.save()\n return new_user\n\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True, max_length=60)\n primary_contact_first_name = forms.CharField(required=True, max_length=30)\n primary_contact_last_name = forms.CharField(required=True, max_length=30)\n primary_contact_phone = forms.CharField(required=True, max_length=30)\n primary_contact_email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_ORGANIZATION)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get(\n 'primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[\n 'business_name'])\n new_profile.save()\n return new_user\n",
"step-4": "from django import forms\nfrom django.contrib.auth.models import User\nfrom ServicePad.apps.account.models import UserProfile\nimport hashlib, random, datetime\nfrom ServicePad.apps.registration.models import ActivationKey\nMIN_PASSWORD_LENGTH = 8\nMAX_PASSWORD_LENGTH = 30\n\n\nclass UserRegistrationForm(forms.Form):\n first_name = forms.CharField(required=True, max_length=30)\n last_name = forms.CharField(required=True, max_length=30)\n email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_VOLUNTEER)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get('email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n account_type = int(cleaned_data.get('form_type'))\n if (account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type !=\n UserProfile.ACCOUNT_ORGANIZATION):\n raise forms.ValidationError('Invalid account type')\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'],\n self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n key_obj = ActivationKey(user=new_user, activation_key=\n activation_key, key_expires=key_expires)\n key_obj.save()\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_VOLUNTEER)\n new_profile.save()\n return new_user\n\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True, max_length=60)\n primary_contact_first_name = forms.CharField(required=True, max_length=30)\n primary_contact_last_name = forms.CharField(required=True, max_length=30)\n primary_contact_phone = forms.CharField(required=True, max_length=30)\n primary_contact_email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_ORGANIZATION)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get(\n 'primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[\n 'business_name'])\n new_profile.save()\n return new_user\n",
"step-5": "from django import forms\nfrom django.contrib.auth.models import User\nfrom ServicePad.apps.account.models import UserProfile\nimport hashlib, random, datetime\nfrom ServicePad.apps.registration.models import ActivationKey\n\nMIN_PASSWORD_LENGTH=8\nMAX_PASSWORD_LENGTH=30\n\nclass UserRegistrationForm(forms.Form):\n first_name = forms.CharField(required=True,max_length=30)\n last_name = forms.CharField(required=True,max_length=30)\n email = forms.EmailField(required=True,max_length=30)\n password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(),initial=UserProfile.ACCOUNT_VOLUNTEER)\n \n def clean(self):\n cleaned_data = self.cleaned_data\n \n #Verify usernames\n try:\n User.objects.get(username__exact=cleaned_data.get('email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError(\"Email already exists\")\n \n #Verify Passwords\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError(\"Passwords do not match\")\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n \n account_type = int(cleaned_data.get('form_type'))\n if account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type != UserProfile.ACCOUNT_ORGANIZATION:\n raise forms.ValidationError(\"Invalid account type\")\n \n \n return cleaned_data\n \n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'], self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n \n #create the activation key\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n \n key_obj = ActivationKey(user=new_user,activation_key=activation_key,key_expires=key_expires)\n key_obj.save()\n \n new_profile = UserProfile(user=new_user,account_type=UserProfile.ACCOUNT_VOLUNTEER)\n \n new_profile.save()\n \n return new_user\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True,max_length=60)\n primary_contact_first_name = forms.CharField(required=True,max_length=30)\n primary_contact_last_name = forms.CharField(required=True,max_length=30)\n primary_contact_phone = forms.CharField(required=True,max_length=30)\n primary_contact_email = forms.EmailField(required=True,max_length=30)\n password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(),initial=UserProfile.ACCOUNT_ORGANIZATION)\n \n def clean(self):\n cleaned_data = self.cleaned_data\n \n #Verify usernames\n try:\n User.objects.get(username__exact=cleaned_data.get('primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError(\"Email already exists\")\n \n #Verify Passwords\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError(\"Passwords do not match\")\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n \n \n return cleaned_data\n \n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['primary_contact_email'], self.cleaned_data['primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n \n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user,\n account_type=UserProfile.ACCOUNT_ORGANIZATION,\n business_name=self.cleaned_data['business_name']\n )\n \n new_profile.save()\n \n return new_user\n\n ",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
class StepUtilTest(wf_testcase.WaterfallTestCase):
def testGetLowerBoundBuildNumber(self):
self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))
self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,
200))
self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,
500))
def testGetBoundingIsolatedTargets(self):
lower_bound_commit_position = 1000
upper_bound_commit_position = 1010
requested_commit_position = 1005
build_id = 10000
target_name = 'browser_tests'
master_name = 'm'
builder_name = 'b'
luci_name = 'chromium'
bucket_name = 'ci'
gitiles_host = 'chromium.googlesource.com'
gitiles_project = 'chromium/src'
gitiles_ref = 'refs/heads/master'
gerrit_patch = ''
lower_bound_revision = 'r1000'
upper_bound_revision = 'r1010'
lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_1', lower_bound_commit_position, lower_bound_revision)
lower_bound_target.put()
upper_bound_target = IsolatedTarget.Create(build_id, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_2', upper_bound_commit_position, upper_bound_revision)
upper_bound_target.put()
self.assertEqual((lower_bound_target, upper_bound_target),
step_util.GetBoundingIsolatedTargets(master_name, builder_name,
target_name, requested_commit_position))
<|reserved_special_token_0|>
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info
):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_101, valid_build_102]
self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,
100, step_name, True, 1))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchDescending(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_99 = BuildInfo(master_name, builder_name, 99)
valid_build_98 = BuildInfo(master_name, builder_name, 98)
valid_build_98.commit_position = 980
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_99, valid_build_98]
self.assertEqual(valid_build_98, step_util.GetValidBuild(
master_name, builder_name, 100, step_name, True, 2))
<|reserved_special_token_0|>
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertEqual(lower_bound_build_number, upper_bound.build_number)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_
):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 10000)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):
upper_bound_build_number = 4
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 50)
self.assertEqual(50, lower_bound.commit_position)
self.assertEqual(50, upper_bound.commit_position)
<|reserved_special_token_0|>
def testIsStepSupportedByFinditObjectNone(self):
self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))
<|reserved_special_token_0|>
def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):
self.assertFalse(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=True)
def testIsStepSupportedByFinditWebkitLayoutTests(self, _):
self.assertTrue(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))
<|reserved_special_token_0|>
@parameterized.expand([({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': None,
'expected_step_metadata': None},), ({'step_log_return': None,
'expected_step_metadata': None},)])
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadata(self, cases, mock_step_log):
mock_step_log.return_value = cases['step_log_return']
step_metadata = step_util.GetStepMetadata(123, 'step')
self.assertEqual(cases['expected_step_metadata'], step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataPartialMatch(self, mock_step_log):
step_util.GetStepMetadata(123, 'step', True)
self.assertIn(True, mock_step_log.call_args[0])
step_util.GetStepMetadata(123, 'step', False)
self.assertIn(False, mock_step_log.call_args[0])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData')
def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):
build = WfBuild.Create('m', 'b', 123)
build.build_id = '8948240770002521488'
build.put()
mock_build.return_value = build
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=
'log1/nlog2')
def testGetStepLogStdio(self, *_):
self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(
'm', 'b', 123, 's', None))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
@mock.patch.object(logdog_util, 'GetLogFromViewUrl')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,
mock_get_log, _):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',
None, 'step_metadata'))
self.assertFalse(mock_get_log.called)
<|reserved_special_token_0|>
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(step_util, 'GetStepLogFromBuildObject')
def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):
step_util.GetStepLogForLuciBuild('87654321', 's', None)
self.assertIn(False, mock_log_from_build.call_args[0])
step_util.GetStepLogForLuciBuild('87654321', 's', None, True)
self.assertIn(True, mock_log_from_build.call_args[0])
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client')
self.assertIn(False, mock_get_log_url.call_args[0])
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client', partial_match=True)
self.assertIn(True, mock_get_log_url.call_args[0])
def testGetStepLogViewUrlNoMatchingLog(self):
build_id = 8945610992972640896
mock_log = common_pb2.Log()
mock_log.name = 'another_log'
mock_log.view_url = 'view_url'
mock_step1 = Step()
mock_step1.name = 's1'
mock_step1.logs.extend([mock_log])
mock_step2 = Step()
mock_step2.name = 's2'
mock_step2.logs.extend([mock_log])
mock_build = Build()
mock_build.id = build_id
mock_build.steps.extend([mock_step1, mock_step2])
self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')
)
@parameterized.expand([(True, 'step_name', 'view_url',
'view_url_partial_match'), (False, 'step_name', 'view_url', None)])
def testGetStepLogViewUrlPartialMatching(self, partial_match,
full_step_name, expected_url_in_build1, expected_url_in_build2):
mock_step1 = Step()
mock_step1.name = 'step_name'
mock_log1 = common_pb2.Log()
mock_log1.name = 'log'
mock_log1.view_url = 'view_url'
mock_step1.logs.extend([mock_log1])
mock_step2 = Step()
mock_step2.name = 'step_name_longer'
mock_log2 = common_pb2.Log()
mock_log2.name = 'log'
mock_log2.view_url = 'view_url_partial_match'
mock_step2.logs.extend([mock_log2])
mock_build1 = Build()
mock_build1.steps.extend([mock_step1, mock_step2])
self.assertEqual(expected_url_in_build1, step_util.
_GetStepLogViewUrl(mock_build1, full_step_name, 'log',
partial_match=partial_match))
mock_build2 = Build()
mock_build2.steps.extend([mock_step2])
self.assertEqual(expected_url_in_build2, step_util.
_GetStepLogViewUrl(mock_build2, full_step_name, 'log',
partial_match=partial_match))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)
def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):
self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',
'b', 200, 'viz_browser_tests (with patch) on Android'))
<|reserved_special_token_0|>
@parameterized.expand([({'isolate_target_name': 'isolate_target'},
'isolate_target'), (None, None), ({'a': 'b'}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetName(self, step_metadata,
expected_isolate_target, mocked_get_stepmeta):
mocked_get_stepmeta.return_value = step_metadata
self.assertEqual(expected_isolate_target, step_util.
GetIsolateTargetName(123, 'full step name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):
step_util.GetIsolateTargetName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetIsolateTargetName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),
(None, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOS(self, mock_fn_return, expected_platform, mock_fn):
mock_fn.return_value = mock_fn_return
self.assertEqual(expected_platform, step_util.GetOS(123,
'builder_name', 'step_name'))
<|reserved_special_token_0|>
@mock.patch.object(step_util, 'GetStepMetadata', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
def testGetOSCached(self, mock_fn):
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
def testGetStepStartAndEndTime(self):
build_id = '8945610992972640896'
start_time = datetime.datetime(2019, 3, 6)
end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)
step = Step()
step.name = 's'
step.start_time.FromDatetime(start_time)
step.end_time.FromDatetime(end_time)
build = Build()
build.id = int(build_id)
build.steps.extend([step])
self.assertEqual((start_time, end_time), step_util.
GetStepStartAndEndTime(build, 's'))
self.assertEqual((None, None), step_util.GetStepStartAndEndTime(
build, 's2'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StepUtilTest(wf_testcase.WaterfallTestCase):
def testGetLowerBoundBuildNumber(self):
self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))
self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,
200))
self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,
500))
def testGetBoundingIsolatedTargets(self):
lower_bound_commit_position = 1000
upper_bound_commit_position = 1010
requested_commit_position = 1005
build_id = 10000
target_name = 'browser_tests'
master_name = 'm'
builder_name = 'b'
luci_name = 'chromium'
bucket_name = 'ci'
gitiles_host = 'chromium.googlesource.com'
gitiles_project = 'chromium/src'
gitiles_ref = 'refs/heads/master'
gerrit_patch = ''
lower_bound_revision = 'r1000'
upper_bound_revision = 'r1010'
lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_1', lower_bound_commit_position, lower_bound_revision)
lower_bound_target.put()
upper_bound_target = IsolatedTarget.Create(build_id, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_2', upper_bound_commit_position, upper_bound_revision)
upper_bound_target.put()
self.assertEqual((lower_bound_target, upper_bound_target),
step_util.GetBoundingIsolatedTargets(master_name, builder_name,
target_name, requested_commit_position))
<|reserved_special_token_0|>
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info
):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_101, valid_build_102]
self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,
100, step_name, True, 1))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchDescending(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_99 = BuildInfo(master_name, builder_name, 99)
valid_build_98 = BuildInfo(master_name, builder_name, 98)
valid_build_98.commit_position = 980
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_99, valid_build_98]
self.assertEqual(valid_build_98, step_util.GetValidBuild(
master_name, builder_name, 100, step_name, True, 2))
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepExactMatch(self, *_):
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', 0, 100, 30)
self.assertEqual(1, lower_bound.build_number)
self.assertEqual(2, upper_bound.build_number)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertEqual(lower_bound_build_number, upper_bound.build_number)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_
):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 10000)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):
upper_bound_build_number = 4
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 50)
self.assertEqual(50, lower_bound.commit_position)
self.assertEqual(50, upper_bound.commit_position)
<|reserved_special_token_0|>
def testIsStepSupportedByFinditObjectNone(self):
self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))
<|reserved_special_token_0|>
def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):
self.assertFalse(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=True)
def testIsStepSupportedByFinditWebkitLayoutTests(self, _):
self.assertTrue(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))
<|reserved_special_token_0|>
@parameterized.expand([({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': None,
'expected_step_metadata': None},), ({'step_log_return': None,
'expected_step_metadata': None},)])
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadata(self, cases, mock_step_log):
mock_step_log.return_value = cases['step_log_return']
step_metadata = step_util.GetStepMetadata(123, 'step')
self.assertEqual(cases['expected_step_metadata'], step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataPartialMatch(self, mock_step_log):
step_util.GetStepMetadata(123, 'step', True)
self.assertIn(True, mock_step_log.call_args[0])
step_util.GetStepMetadata(123, 'step', False)
self.assertIn(False, mock_step_log.call_args[0])
<|reserved_special_token_0|>
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')
def testMalformattedNinjaInfo(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'json.output[ninja_info]')
self.assertIsNone(step_metadata)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value=None)
def testLegacyGetStepMetadataStepNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertIsNone(step_metadata)
<|reserved_special_token_0|>
@mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData')
def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):
build = WfBuild.Create('m', 'b', 123)
build.build_id = '8948240770002521488'
build.put()
mock_build.return_value = build
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=
'log1/nlog2')
def testGetStepLogStdio(self, *_):
self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(
'm', 'b', 123, 's', None))
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')
@mock.patch.object(logging, 'error')
def testGetStepLogNotJosonLoadable(self, mocked_log, *_):
self.assertIsNone(step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata'))
mocked_log.assert_called_with(
'Failed to json load data for step_metadata. Data is: log.')
<|reserved_special_token_0|>
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
@mock.patch.object(logdog_util, 'GetLogFromViewUrl')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,
mock_get_log, _):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',
None, 'step_metadata'))
self.assertFalse(mock_get_log.called)
<|reserved_special_token_0|>
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(step_util, 'GetStepLogFromBuildObject')
def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):
step_util.GetStepLogForLuciBuild('87654321', 's', None)
self.assertIn(False, mock_log_from_build.call_args[0])
step_util.GetStepLogForLuciBuild('87654321', 's', None, True)
self.assertIn(True, mock_log_from_build.call_args[0])
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client')
self.assertIn(False, mock_get_log_url.call_args[0])
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client', partial_match=True)
self.assertIn(True, mock_get_log_url.call_args[0])
def testGetStepLogViewUrlNoMatchingLog(self):
build_id = 8945610992972640896
mock_log = common_pb2.Log()
mock_log.name = 'another_log'
mock_log.view_url = 'view_url'
mock_step1 = Step()
mock_step1.name = 's1'
mock_step1.logs.extend([mock_log])
mock_step2 = Step()
mock_step2.name = 's2'
mock_step2.logs.extend([mock_log])
mock_build = Build()
mock_build.id = build_id
mock_build.steps.extend([mock_step1, mock_step2])
self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')
)
@parameterized.expand([(True, 'step_name', 'view_url',
'view_url_partial_match'), (False, 'step_name', 'view_url', None)])
def testGetStepLogViewUrlPartialMatching(self, partial_match,
full_step_name, expected_url_in_build1, expected_url_in_build2):
mock_step1 = Step()
mock_step1.name = 'step_name'
mock_log1 = common_pb2.Log()
mock_log1.name = 'log'
mock_log1.view_url = 'view_url'
mock_step1.logs.extend([mock_log1])
mock_step2 = Step()
mock_step2.name = 'step_name_longer'
mock_log2 = common_pb2.Log()
mock_log2.name = 'log'
mock_log2.view_url = 'view_url_partial_match'
mock_step2.logs.extend([mock_log2])
mock_build1 = Build()
mock_build1.steps.extend([mock_step1, mock_step2])
self.assertEqual(expected_url_in_build1, step_util.
_GetStepLogViewUrl(mock_build1, full_step_name, 'log',
partial_match=partial_match))
mock_build2 = Build()
mock_build2.steps.extend([mock_step2])
self.assertEqual(expected_url_in_build2, step_util.
_GetStepLogViewUrl(mock_build2, full_step_name, 'log',
partial_match=partial_match))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataCached(self, mock_fn, *_):
mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]
self.assertEqual(None, step_util.GetStepMetadata(123,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):
step_util.GetCanonicalStepName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetCanonicalStepName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
<|reserved_special_token_0|>
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)
def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):
self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',
'b', 200, 'viz_browser_tests (with patch) on Android'))
<|reserved_special_token_0|>
@parameterized.expand([({'isolate_target_name': 'isolate_target'},
'isolate_target'), (None, None), ({'a': 'b'}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetName(self, step_metadata,
expected_isolate_target, mocked_get_stepmeta):
mocked_get_stepmeta.return_value = step_metadata
self.assertEqual(expected_isolate_target, step_util.
GetIsolateTargetName(123, 'full step name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):
step_util.GetIsolateTargetName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetIsolateTargetName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),
(None, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOS(self, mock_fn_return, expected_platform, mock_fn):
mock_fn.return_value = mock_fn_return
self.assertEqual(expected_platform, step_util.GetOS(123,
'builder_name', 'step_name'))
<|reserved_special_token_0|>
@mock.patch.object(step_util, 'GetStepMetadata', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
def testGetOSCached(self, mock_fn):
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
def testGetStepStartAndEndTime(self):
build_id = '8945610992972640896'
start_time = datetime.datetime(2019, 3, 6)
end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)
step = Step()
step.name = 's'
step.start_time.FromDatetime(start_time)
step.end_time.FromDatetime(end_time)
build = Build()
build.id = int(build_id)
build.steps.extend([step])
self.assertEqual((start_time, end_time), step_util.
GetStepStartAndEndTime(build, 's'))
self.assertEqual((None, None), step_util.GetStepStartAndEndTime(
build, 's2'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StepUtilTest(wf_testcase.WaterfallTestCase):
def testGetLowerBoundBuildNumber(self):
self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))
self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,
200))
self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,
500))
def testGetBoundingIsolatedTargets(self):
lower_bound_commit_position = 1000
upper_bound_commit_position = 1010
requested_commit_position = 1005
build_id = 10000
target_name = 'browser_tests'
master_name = 'm'
builder_name = 'b'
luci_name = 'chromium'
bucket_name = 'ci'
gitiles_host = 'chromium.googlesource.com'
gitiles_project = 'chromium/src'
gitiles_ref = 'refs/heads/master'
gerrit_patch = ''
lower_bound_revision = 'r1000'
upper_bound_revision = 'r1010'
lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_1', lower_bound_commit_position, lower_bound_revision)
lower_bound_target.put()
upper_bound_target = IsolatedTarget.Create(build_id, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_2', upper_bound_commit_position, upper_bound_revision)
upper_bound_target.put()
self.assertEqual((lower_bound_target, upper_bound_target),
step_util.GetBoundingIsolatedTargets(master_name, builder_name,
target_name, requested_commit_position))
<|reserved_special_token_0|>
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info
):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_101, valid_build_102]
self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,
100, step_name, True, 1))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchDescending(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_99 = BuildInfo(master_name, builder_name, 99)
valid_build_98 = BuildInfo(master_name, builder_name, 98)
valid_build_98.commit_position = 980
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_99, valid_build_98]
self.assertEqual(valid_build_98, step_util.GetValidBuild(
master_name, builder_name, 100, step_name, True, 2))
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepExactMatch(self, *_):
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', 0, 100, 30)
self.assertEqual(1, lower_bound.build_number)
self.assertEqual(2, upper_bound.build_number)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertEqual(lower_bound_build_number, upper_bound.build_number)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_
):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 10000)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):
upper_bound_build_number = 4
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 50)
self.assertEqual(50, lower_bound.commit_position)
self.assertEqual(50, upper_bound.commit_position)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtLowerBound(self, *_):
upper_bound_build_number = 4
lower_bound_build_number = 1
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, upper_bound_build_number, 20)
self.assertEqual(20, lower_bound.commit_position)
self.assertEqual(20, upper_bound.commit_position)
def testIsStepSupportedByFinditObjectNone(self):
self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=False)
def testStepNotSupportedByFindit(self, _):
self.assertFalse(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'step', 'm'))
def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):
self.assertFalse(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=True)
def testIsStepSupportedByFinditWebkitLayoutTests(self, _):
self.assertTrue(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=True)
def testIsStepSupportedByFinditGtests(self, _):
self.assertTrue(step_util.IsStepSupportedByFindit(GtestTestResults(
None), 'browser_tests', 'm'))
@parameterized.expand([({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': None,
'expected_step_metadata': None},), ({'step_log_return': None,
'expected_step_metadata': None},)])
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadata(self, cases, mock_step_log):
mock_step_log.return_value = cases['step_log_return']
step_metadata = step_util.GetStepMetadata(123, 'step')
self.assertEqual(cases['expected_step_metadata'], step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataPartialMatch(self, mock_step_log):
step_util.GetStepMetadata(123, 'step', True)
self.assertIn(True, mock_step_log.call_args[0])
step_util.GetStepMetadata(123, 'step', False)
self.assertIn(False, mock_step_log.call_args[0])
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value=
'log_stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=json.
dumps(wf_testcase.SAMPLE_STEP_METADATA))
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
def testLegacyGetStepMetadata(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')
def testMalformattedNinjaInfo(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'json.output[ninja_info]')
self.assertIsNone(step_metadata)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value=None)
def testLegacyGetStepMetadataStepNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value=None)
def testLegacyGetStepMetadataStreamNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData')
def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):
build = WfBuild.Create('m', 'b', 123)
build.build_id = '8948240770002521488'
build.put()
mock_build.return_value = build
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=
'log1/nlog2')
def testGetStepLogStdio(self, *_):
self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(
'm', 'b', 123, 's', None))
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')
@mock.patch.object(logging, 'error')
def testGetStepLogNotJosonLoadable(self, mocked_log, *_):
self.assertIsNone(step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata'))
mocked_log.assert_called_with(
'Failed to json load data for step_metadata. Data is: log.')
@mock.patch.object(buildbucket_client, 'GetV2Build', return_value=None)
def testGetStepLogForLuciBuildError(self, _):
self.assertIsNone(step_util.GetStepLogForLuciBuild('87654321', 's',
None))
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
@mock.patch.object(logdog_util, 'GetLogFromViewUrl')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,
mock_get_log, _):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',
None, 'step_metadata'))
self.assertFalse(mock_get_log.called)
<|reserved_special_token_0|>
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(step_util, 'GetStepLogFromBuildObject')
def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):
step_util.GetStepLogForLuciBuild('87654321', 's', None)
self.assertIn(False, mock_log_from_build.call_args[0])
step_util.GetStepLogForLuciBuild('87654321', 's', None, True)
self.assertIn(True, mock_log_from_build.call_args[0])
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client')
self.assertIn(False, mock_get_log_url.call_args[0])
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client', partial_match=True)
self.assertIn(True, mock_get_log_url.call_args[0])
def testGetStepLogViewUrlNoMatchingLog(self):
build_id = 8945610992972640896
mock_log = common_pb2.Log()
mock_log.name = 'another_log'
mock_log.view_url = 'view_url'
mock_step1 = Step()
mock_step1.name = 's1'
mock_step1.logs.extend([mock_log])
mock_step2 = Step()
mock_step2.name = 's2'
mock_step2.logs.extend([mock_log])
mock_build = Build()
mock_build.id = build_id
mock_build.steps.extend([mock_step1, mock_step2])
self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')
)
@parameterized.expand([(True, 'step_name', 'view_url',
'view_url_partial_match'), (False, 'step_name', 'view_url', None)])
def testGetStepLogViewUrlPartialMatching(self, partial_match,
full_step_name, expected_url_in_build1, expected_url_in_build2):
mock_step1 = Step()
mock_step1.name = 'step_name'
mock_log1 = common_pb2.Log()
mock_log1.name = 'log'
mock_log1.view_url = 'view_url'
mock_step1.logs.extend([mock_log1])
mock_step2 = Step()
mock_step2.name = 'step_name_longer'
mock_log2 = common_pb2.Log()
mock_log2.name = 'log'
mock_log2.view_url = 'view_url_partial_match'
mock_step2.logs.extend([mock_log2])
mock_build1 = Build()
mock_build1.steps.extend([mock_step1, mock_step2])
self.assertEqual(expected_url_in_build1, step_util.
_GetStepLogViewUrl(mock_build1, full_step_name, 'log',
partial_match=partial_match))
mock_build2 = Build()
mock_build2.steps.extend([mock_step2])
self.assertEqual(expected_url_in_build2, step_util.
_GetStepLogViewUrl(mock_build2, full_step_name, 'log',
partial_match=partial_match))
@mock.patch.object(step_util, 'GetWaterfallBuildStepLog', return_value=
{'canonical_step_name': 'unsupported_step1'})
def testStepIsSupportedForMaster(self, _):
master_name = 'master1'
builder_name = 'b'
build_number = 123
step_name = 'unsupported_step1 on master1'
self.assertFalse(step_util.StepIsSupportedForMaster(master_name,
builder_name, build_number, step_name))
<|reserved_special_token_0|>
@mock.patch.object(step_util, 'GetWaterfallBuildStepLog')
def testLegacyGetStepMetadataCached(self, mock_fn):
mock_fn.side_effect = ['invalid', {'canonical_step_name': 'step_name'}]
self.assertEqual('invalid', step_util.LegacyGetStepMetadata('m',
'b', 201, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataCached(self, mock_fn, *_):
mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]
self.assertEqual(None, step_util.GetStepMetadata(123,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={
'canonical_step_name': 'step_name'})
def testLegacyGetCanonicalStep(self, _):
self.assertEqual('step_name', step_util.LegacyGetCanonicalStepName(
'm', 'b', 200, 'step_name on a platform'))
@parameterized.expand([({'canonical_step_name': 'step_name'},
'step_name'), (None, 'step_name'), ({'a': 'b'}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepName(self, step_metadata,
expected_canonical_step, mocked_get_step):
mocked_get_step.return_value = step_metadata
self.assertEqual(expected_canonical_step, step_util.
GetCanonicalStepName(123, 'step_name (with patch)'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):
step_util.GetCanonicalStepName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetCanonicalStepName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
<|reserved_special_token_0|>
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)
def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):
self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',
'b', 200, 'viz_browser_tests (with patch) on Android'))
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={
'a': 'b'})
def testLegacyGetIsolateTargetNameIsolateTargetNameIsMissing(self, _):
self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',
'b', 200, 'viz_browser_tests (with patch) on Android'))
@parameterized.expand([({'isolate_target_name': 'isolate_target'},
'isolate_target'), (None, None), ({'a': 'b'}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetName(self, step_metadata,
expected_isolate_target, mocked_get_stepmeta):
mocked_get_stepmeta.return_value = step_metadata
self.assertEqual(expected_isolate_target, step_util.
GetIsolateTargetName(123, 'full step name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):
step_util.GetIsolateTargetName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetIsolateTargetName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),
(None, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOS(self, mock_fn_return, expected_platform, mock_fn):
mock_fn.return_value = mock_fn_return
self.assertEqual(expected_platform, step_util.GetOS(123,
'builder_name', 'step_name'))
<|reserved_special_token_0|>
@mock.patch.object(step_util, 'GetStepMetadata', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
def testGetOSCached(self, mock_fn):
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
def testGetStepStartAndEndTime(self):
build_id = '8945610992972640896'
start_time = datetime.datetime(2019, 3, 6)
end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)
step = Step()
step.name = 's'
step.start_time.FromDatetime(start_time)
step.end_time.FromDatetime(end_time)
build = Build()
build.id = int(build_id)
build.steps.extend([step])
self.assertEqual((start_time, end_time), step_util.
GetStepStartAndEndTime(build, 's'))
self.assertEqual((None, None), step_util.GetStepStartAndEndTime(
build, 's2'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StepUtilTest(wf_testcase.WaterfallTestCase):
def testGetLowerBoundBuildNumber(self):
self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))
self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,
200))
self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,
500))
def testGetBoundingIsolatedTargets(self):
lower_bound_commit_position = 1000
upper_bound_commit_position = 1010
requested_commit_position = 1005
build_id = 10000
target_name = 'browser_tests'
master_name = 'm'
builder_name = 'b'
luci_name = 'chromium'
bucket_name = 'ci'
gitiles_host = 'chromium.googlesource.com'
gitiles_project = 'chromium/src'
gitiles_ref = 'refs/heads/master'
gerrit_patch = ''
lower_bound_revision = 'r1000'
upper_bound_revision = 'r1010'
lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_1', lower_bound_commit_position, lower_bound_revision)
lower_bound_target.put()
upper_bound_target = IsolatedTarget.Create(build_id, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_2', upper_bound_commit_position, upper_bound_revision)
upper_bound_target.put()
self.assertEqual((lower_bound_target, upper_bound_target),
step_util.GetBoundingIsolatedTargets(master_name, builder_name,
target_name, requested_commit_position))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingWithinRange(self, mocked_get_build_info
):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_101, valid_build_102]
self.assertEqual(valid_build_102, step_util.GetValidBuild(
master_name, builder_name, 100, step_name, True, 2))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info
):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_101, valid_build_102]
self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,
100, step_name, True, 1))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchDescending(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_99 = BuildInfo(master_name, builder_name, 99)
valid_build_98 = BuildInfo(master_name, builder_name, 98)
valid_build_98.commit_position = 980
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_99, valid_build_98]
self.assertEqual(valid_build_98, step_util.GetValidBuild(
master_name, builder_name, 100, step_name, True, 2))
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepExactMatch(self, *_):
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', 0, 100, 30)
self.assertEqual(1, lower_bound.build_number)
self.assertEqual(2, upper_bound.build_number)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertEqual(lower_bound_build_number, upper_bound.build_number)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuildInValid(self,
*_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuild(self, *_):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 10000)
self.assertEqual(upper_bound_build_number, lower_bound.build_number)
self.assertIsNone(upper_bound)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_
):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 10000)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):
upper_bound_build_number = 4
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 50)
self.assertEqual(50, lower_bound.commit_position)
self.assertEqual(50, upper_bound.commit_position)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtLowerBound(self, *_):
upper_bound_build_number = 4
lower_bound_build_number = 1
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, upper_bound_build_number, 20)
self.assertEqual(20, lower_bound.commit_position)
self.assertEqual(20, upper_bound.commit_position)
def testIsStepSupportedByFinditObjectNone(self):
self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=False)
def testStepNotSupportedByFindit(self, _):
self.assertFalse(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'step', 'm'))
def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):
self.assertFalse(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=True)
def testIsStepSupportedByFinditWebkitLayoutTests(self, _):
self.assertTrue(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=True)
def testIsStepSupportedByFinditGtests(self, _):
self.assertTrue(step_util.IsStepSupportedByFindit(GtestTestResults(
None), 'browser_tests', 'm'))
@parameterized.expand([({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': None,
'expected_step_metadata': None},), ({'step_log_return': None,
'expected_step_metadata': None},)])
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadata(self, cases, mock_step_log):
mock_step_log.return_value = cases['step_log_return']
step_metadata = step_util.GetStepMetadata(123, 'step')
self.assertEqual(cases['expected_step_metadata'], step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataPartialMatch(self, mock_step_log):
step_util.GetStepMetadata(123, 'step', True)
self.assertIn(True, mock_step_log.call_args[0])
step_util.GetStepMetadata(123, 'step', False)
self.assertIn(False, mock_step_log.call_args[0])
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value=
'log_stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=json.
dumps(wf_testcase.SAMPLE_STEP_METADATA))
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
def testLegacyGetStepMetadata(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')
def testMalformattedNinjaInfo(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'json.output[ninja_info]')
self.assertIsNone(step_metadata)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value=None)
def testLegacyGetStepMetadataStepNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value=None)
def testLegacyGetStepMetadataStreamNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData')
def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):
build = WfBuild.Create('m', 'b', 123)
build.build_id = '8948240770002521488'
build.put()
mock_build.return_value = build
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=
'log1/nlog2')
def testGetStepLogStdio(self, *_):
self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(
'm', 'b', 123, 's', None))
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')
@mock.patch.object(logging, 'error')
def testGetStepLogNotJosonLoadable(self, mocked_log, *_):
self.assertIsNone(step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata'))
mocked_log.assert_called_with(
'Failed to json load data for step_metadata. Data is: log.')
@mock.patch.object(buildbucket_client, 'GetV2Build', return_value=None)
def testGetStepLogForLuciBuildError(self, _):
self.assertIsNone(step_util.GetStepLogForLuciBuild('87654321', 's',
None))
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
@mock.patch.object(logdog_util, 'GetLogFromViewUrl')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,
mock_get_log, _):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',
None, 'step_metadata'))
self.assertFalse(mock_get_log.called)
<|reserved_special_token_0|>
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(step_util, 'GetStepLogFromBuildObject')
def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):
step_util.GetStepLogForLuciBuild('87654321', 's', None)
self.assertIn(False, mock_log_from_build.call_args[0])
step_util.GetStepLogForLuciBuild('87654321', 's', None, True)
self.assertIn(True, mock_log_from_build.call_args[0])
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client')
self.assertIn(False, mock_get_log_url.call_args[0])
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client', partial_match=True)
self.assertIn(True, mock_get_log_url.call_args[0])
def testGetStepLogViewUrlNoMatchingLog(self):
build_id = 8945610992972640896
mock_log = common_pb2.Log()
mock_log.name = 'another_log'
mock_log.view_url = 'view_url'
mock_step1 = Step()
mock_step1.name = 's1'
mock_step1.logs.extend([mock_log])
mock_step2 = Step()
mock_step2.name = 's2'
mock_step2.logs.extend([mock_log])
mock_build = Build()
mock_build.id = build_id
mock_build.steps.extend([mock_step1, mock_step2])
self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')
)
@parameterized.expand([(True, 'step_name', 'view_url',
'view_url_partial_match'), (False, 'step_name', 'view_url', None)])
def testGetStepLogViewUrlPartialMatching(self, partial_match,
full_step_name, expected_url_in_build1, expected_url_in_build2):
mock_step1 = Step()
mock_step1.name = 'step_name'
mock_log1 = common_pb2.Log()
mock_log1.name = 'log'
mock_log1.view_url = 'view_url'
mock_step1.logs.extend([mock_log1])
mock_step2 = Step()
mock_step2.name = 'step_name_longer'
mock_log2 = common_pb2.Log()
mock_log2.name = 'log'
mock_log2.view_url = 'view_url_partial_match'
mock_step2.logs.extend([mock_log2])
mock_build1 = Build()
mock_build1.steps.extend([mock_step1, mock_step2])
self.assertEqual(expected_url_in_build1, step_util.
_GetStepLogViewUrl(mock_build1, full_step_name, 'log',
partial_match=partial_match))
mock_build2 = Build()
mock_build2.steps.extend([mock_step2])
self.assertEqual(expected_url_in_build2, step_util.
_GetStepLogViewUrl(mock_build2, full_step_name, 'log',
partial_match=partial_match))
@mock.patch.object(step_util, 'GetWaterfallBuildStepLog', return_value=
{'canonical_step_name': 'unsupported_step1'})
def testStepIsSupportedForMaster(self, _):
master_name = 'master1'
builder_name = 'b'
build_number = 123
step_name = 'unsupported_step1 on master1'
self.assertFalse(step_util.StepIsSupportedForMaster(master_name,
builder_name, build_number, step_name))
def testStepIsSupportedForMasterCompile(self):
master_name = 'm'
builder_name = 'b'
build_number = 123
step_name = 'compile'
self.assertTrue(step_util.StepIsSupportedForMaster(master_name,
builder_name, build_number, step_name))
@mock.patch.object(step_util, 'GetWaterfallBuildStepLog')
def testLegacyGetStepMetadataCached(self, mock_fn):
mock_fn.side_effect = ['invalid', {'canonical_step_name': 'step_name'}]
self.assertEqual('invalid', step_util.LegacyGetStepMetadata('m',
'b', 201, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataCached(self, mock_fn, *_):
mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]
self.assertEqual(None, step_util.GetStepMetadata(123,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={
'canonical_step_name': 'step_name'})
def testLegacyGetCanonicalStep(self, _):
self.assertEqual('step_name', step_util.LegacyGetCanonicalStepName(
'm', 'b', 200, 'step_name on a platform'))
@parameterized.expand([({'canonical_step_name': 'step_name'},
'step_name'), (None, 'step_name'), ({'a': 'b'}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepName(self, step_metadata,
expected_canonical_step, mocked_get_step):
mocked_get_step.return_value = step_metadata
self.assertEqual(expected_canonical_step, step_util.
GetCanonicalStepName(123, 'step_name (with patch)'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):
step_util.GetCanonicalStepName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetCanonicalStepName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={
'isolate_target_name': 'browser_tests'})
def testLegacyGetIsolateTargetName(self, _):
self.assertEqual('browser_tests', step_util.
LegacyGetIsolateTargetName('m', 'b', 200,
'viz_browser_tests (with patch) on Android'))
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)
def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):
self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',
'b', 200, 'viz_browser_tests (with patch) on Android'))
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={
'a': 'b'})
def testLegacyGetIsolateTargetNameIsolateTargetNameIsMissing(self, _):
self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',
'b', 200, 'viz_browser_tests (with patch) on Android'))
@parameterized.expand([({'isolate_target_name': 'isolate_target'},
'isolate_target'), (None, None), ({'a': 'b'}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetName(self, step_metadata,
expected_isolate_target, mocked_get_stepmeta):
mocked_get_stepmeta.return_value = step_metadata
self.assertEqual(expected_isolate_target, step_util.
GetIsolateTargetName(123, 'full step name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):
step_util.GetIsolateTargetName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetIsolateTargetName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),
(None, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOS(self, mock_fn_return, expected_platform, mock_fn):
mock_fn.return_value = mock_fn_return
self.assertEqual(expected_platform, step_util.GetOS(123,
'builder_name', 'step_name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOSPartialMatch(self, mock_get_step_metadata):
step_util.GetOS(123, 'builder_name', 'step_name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetOS(123, 'builder_name', 'step_name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@mock.patch.object(step_util, 'GetStepMetadata', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
def testGetOSCached(self, mock_fn):
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
def testGetStepStartAndEndTime(self):
build_id = '8945610992972640896'
start_time = datetime.datetime(2019, 3, 6)
end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)
step = Step()
step.name = 's'
step.start_time.FromDatetime(start_time)
step.end_time.FromDatetime(end_time)
build = Build()
build.id = int(build_id)
build.steps.extend([step])
self.assertEqual((start_time, end_time), step_util.
GetStepStartAndEndTime(build, 's'))
self.assertEqual((None, None), step_util.GetStepStartAndEndTime(
build, 's2'))
<|reserved_special_token_1|>
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import json
import logging
import mock
from parameterized import parameterized
from buildbucket_proto import common_pb2
from buildbucket_proto.build_pb2 import Build
from buildbucket_proto.step_pb2 import Step
from common.waterfall import buildbucket_client
from infra_api_clients import logdog_util
from libs.test_results.gtest_test_results import GtestTestResults
from libs.test_results.webkit_layout_test_results import WebkitLayoutTestResults
from model.isolated_target import IsolatedTarget
from model.wf_build import WfBuild
from services import step_util
from services import swarming
from waterfall import build_util
from waterfall import waterfall_config
from waterfall.build_info import BuildInfo
from waterfall.test import wf_testcase
class MockWaterfallBuild(object):
def __init__(self):
self.build_id = None
self.log_location = 'logdog://logs.chromium.org/chromium/buildbucket/path'
def _MockedGetBuildInfo(master_name, builder_name, build_number):
build = BuildInfo(master_name, builder_name, build_number)
build.commit_position = (build_number + 1) * 10
build.result = (
common_pb2.SUCCESS if build_number > 4 else common_pb2.INFRA_FAILURE)
return build
class StepUtilTest(wf_testcase.WaterfallTestCase):
def testGetLowerBoundBuildNumber(self):
self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))
self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100, 200))
self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600, 500))
def testGetBoundingIsolatedTargets(self):
lower_bound_commit_position = 1000
upper_bound_commit_position = 1010
requested_commit_position = 1005
build_id = 10000
target_name = 'browser_tests'
master_name = 'm'
builder_name = 'b'
luci_name = 'chromium'
bucket_name = 'ci'
gitiles_host = 'chromium.googlesource.com'
gitiles_project = 'chromium/src'
gitiles_ref = 'refs/heads/master'
gerrit_patch = ''
lower_bound_revision = 'r1000'
upper_bound_revision = 'r1010'
lower_bound_target = IsolatedTarget.Create(
build_id - 1, luci_name, bucket_name, master_name, builder_name,
gitiles_host, gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_1', lower_bound_commit_position, lower_bound_revision)
lower_bound_target.put()
upper_bound_target = IsolatedTarget.Create(
build_id, luci_name, bucket_name, master_name, builder_name,
gitiles_host, gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_2', upper_bound_commit_position, upper_bound_revision)
upper_bound_target.put()
self.assertEqual((lower_bound_target, upper_bound_target),
step_util.GetBoundingIsolatedTargets(
master_name, builder_name, target_name,
requested_commit_position))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingWithinRange(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [
invalid_build_100,
invalid_build_101,
valid_build_102,
]
self.assertEqual(
valid_build_102,
step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,
2))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [
invalid_build_100,
invalid_build_101,
valid_build_102,
]
self.assertIsNone(
step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,
1))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchDescending(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_99 = BuildInfo(master_name, builder_name, 99)
valid_build_98 = BuildInfo(master_name, builder_name, 98)
valid_build_98.commit_position = 980
mocked_get_build_info.side_effect = [
invalid_build_100,
invalid_build_99,
valid_build_98,
]
self.assertEqual(
valid_build_98,
step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,
2))
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepExactMatch(self, *_):
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', 0, 100, 30)
self.assertEqual(1, lower_bound.build_number)
self.assertEqual(2, upper_bound.build_number)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertEqual(lower_bound_build_number, upper_bound.build_number)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuildInValid(
self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuild(self, *_):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', None, upper_bound_build_number, 10000)
self.assertEqual(upper_bound_build_number, lower_bound.build_number)
self.assertIsNone(upper_bound)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', None, upper_bound_build_number, 10000)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):
upper_bound_build_number = 4
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', None, upper_bound_build_number, 50)
self.assertEqual(50, lower_bound.commit_position)
self.assertEqual(50, upper_bound.commit_position)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtLowerBound(self, *_):
upper_bound_build_number = 4
lower_bound_build_number = 1
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', lower_bound_build_number, upper_bound_build_number, 20)
self.assertEqual(20, lower_bound.commit_position)
self.assertEqual(20, upper_bound.commit_position)
def testIsStepSupportedByFinditObjectNone(self):
self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))
@mock.patch.object(
waterfall_config, 'StepIsSupportedForMaster', return_value=False)
def testStepNotSupportedByFindit(self, _):
self.assertFalse(
step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'step', 'm'))
def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):
self.assertFalse(
step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))
@mock.patch.object(
waterfall_config, 'StepIsSupportedForMaster', return_value=True)
def testIsStepSupportedByFinditWebkitLayoutTests(self, _):
self.assertTrue(
step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))
@mock.patch.object(
waterfall_config, 'StepIsSupportedForMaster', return_value=True)
def testIsStepSupportedByFinditGtests(self, _):
self.assertTrue(
step_util.IsStepSupportedByFindit(
GtestTestResults(None), 'browser_tests', 'm'))
@parameterized.expand([
({
'step_log_return': wf_testcase.SAMPLE_STEP_METADATA,
'expected_step_metadata': wf_testcase.SAMPLE_STEP_METADATA
},),
({
'step_log_return': wf_testcase.SAMPLE_STEP_METADATA,
'expected_step_metadata': wf_testcase.SAMPLE_STEP_METADATA
},),
({
'step_log_return': None,
'expected_step_metadata': None
},),
({
'step_log_return': None,
'expected_step_metadata': None
},),
])
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadata(self, cases, mock_step_log):
mock_step_log.return_value = cases['step_log_return']
step_metadata = step_util.GetStepMetadata(123, 'step')
self.assertEqual(cases['expected_step_metadata'], step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataPartialMatch(self, mock_step_log):
step_util.GetStepMetadata(123, 'step', True)
self.assertIn(True, mock_step_log.call_args[0])
step_util.GetStepMetadata(123, 'step', False)
self.assertIn(False, mock_step_log.call_args[0])
@mock.patch.object(
logdog_util, '_GetAnnotationsProtoForPath', return_value='step')
@mock.patch.object(
logdog_util, '_GetStreamForStep', return_value='log_stream')
@mock.patch.object(
logdog_util,
'GetStepLogLegacy',
return_value=json.dumps(wf_testcase.SAMPLE_STEP_METADATA))
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
def testLegacyGetStepMetadata(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')
def testMalformattedNinjaInfo(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog(
'm', 'b', 123, 's', None, 'json.output[ninja_info]')
self.assertIsNone(step_metadata)
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(
logdog_util, '_GetAnnotationsProtoForPath', return_value=None)
def testLegacyGetStepMetadataStepNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(
logdog_util, '_GetAnnotationsProtoForPath', return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value=None)
def testLegacyGetStepMetadataStreamNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(
step_util,
'GetStepLogForLuciBuild',
return_value=wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData')
def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):
build = WfBuild.Create('m', 'b', 123)
build.build_id = '8948240770002521488'
build.put()
mock_build.return_value = build
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(
logdog_util, '_GetAnnotationsProtoForPath', return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log1/nlog2')
def testGetStepLogStdio(self, *_):
self.assertEqual(
'log1/nlog2',
step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None))
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')
@mock.patch.object(logging, 'error')
def testGetStepLogNotJosonLoadable(self, mocked_log, *_):
self.assertIsNone(
step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata'))
mocked_log.assert_called_with(
'Failed to json load data for step_metadata. Data is: log.')
@mock.patch.object(buildbucket_client, 'GetV2Build', return_value=None)
def testGetStepLogForLuciBuildError(self, _):
self.assertIsNone(step_util.GetStepLogForLuciBuild('87654321', 's', None))
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
@mock.patch.object(logdog_util, 'GetLogFromViewUrl')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build, mock_get_log,
_):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertIsNone(
step_util.GetStepLogForLuciBuild(build_id, 's', None, 'step_metadata'))
self.assertFalse(mock_get_log.called)
@mock.patch.object(
step_util, '_ParseStepLogIfAppropriate', return_value='log')
@mock.patch.object(logdog_util, 'GetLogFromViewUrl', return_value='log')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuild(self, mock_get_build, mock_get_log, _):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertEqual(
'log',
step_util.GetStepLogForLuciBuild(build_id, 's', None, 'step_metadata'))
mock_get_log.assert_called_once_with('view_url', None)
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(step_util, 'GetStepLogFromBuildObject')
def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):
step_util.GetStepLogForLuciBuild('87654321', 's', None)
self.assertIn(False, mock_log_from_build.call_args[0])
step_util.GetStepLogForLuciBuild('87654321', 's', None, True)
self.assertIn(True, mock_log_from_build.call_args[0])
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client')
self.assertIn(False, mock_get_log_url.call_args[0])
step_util.GetStepLogFromBuildObject(
Build(), 'full_step_name', 'http_client', partial_match=True)
self.assertIn(True, mock_get_log_url.call_args[0])
def testGetStepLogViewUrlNoMatchingLog(self):
build_id = 8945610992972640896
mock_log = common_pb2.Log()
mock_log.name = 'another_log'
mock_log.view_url = 'view_url'
mock_step1 = Step()
mock_step1.name = 's1'
mock_step1.logs.extend([mock_log])
mock_step2 = Step()
mock_step2.name = 's2'
mock_step2.logs.extend([mock_log])
mock_build = Build()
mock_build.id = build_id
mock_build.steps.extend([mock_step1, mock_step2])
self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log'))
@parameterized.expand([
(True, 'step_name', 'view_url', 'view_url_partial_match'),
(False, 'step_name', 'view_url', None),
])
def testGetStepLogViewUrlPartialMatching(self, partial_match, full_step_name,
expected_url_in_build1,
expected_url_in_build2):
mock_step1 = Step()
mock_step1.name = 'step_name'
mock_log1 = common_pb2.Log()
mock_log1.name = 'log'
mock_log1.view_url = 'view_url'
mock_step1.logs.extend([mock_log1])
mock_step2 = Step()
mock_step2.name = 'step_name_longer'
mock_log2 = common_pb2.Log()
mock_log2.name = 'log'
mock_log2.view_url = 'view_url_partial_match'
mock_step2.logs.extend([mock_log2])
mock_build1 = Build()
mock_build1.steps.extend([mock_step1, mock_step2])
self.assertEqual(
expected_url_in_build1,
step_util._GetStepLogViewUrl(
mock_build1, full_step_name, 'log', partial_match=partial_match))
mock_build2 = Build()
mock_build2.steps.extend([mock_step2])
self.assertEqual(
expected_url_in_build2,
step_util._GetStepLogViewUrl(
mock_build2, full_step_name, 'log', partial_match=partial_match))
@mock.patch.object(
step_util,
'GetWaterfallBuildStepLog',
return_value={'canonical_step_name': 'unsupported_step1'})
def testStepIsSupportedForMaster(self, _):
master_name = 'master1'
builder_name = 'b'
build_number = 123
step_name = 'unsupported_step1 on master1'
self.assertFalse(
step_util.StepIsSupportedForMaster(master_name, builder_name,
build_number, step_name))
def testStepIsSupportedForMasterCompile(self):
master_name = 'm'
builder_name = 'b'
build_number = 123
step_name = 'compile'
self.assertTrue(
step_util.StepIsSupportedForMaster(master_name, builder_name,
build_number, step_name))
@mock.patch.object(step_util, 'GetWaterfallBuildStepLog')
def testLegacyGetStepMetadataCached(self, mock_fn):
mock_fn.side_effect = ['invalid', {'canonical_step_name': 'step_name'}]
# Returns the invalid step_metadata but not cache it.
self.assertEqual(
'invalid',
step_util.LegacyGetStepMetadata('m', 'b', 201,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
# Returns the valid step_metadata and cache it.
self.assertEqual({
'canonical_step_name': 'step_name'
}, step_util.LegacyGetStepMetadata('m', 'b', 201,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({
'canonical_step_name': 'step_name'
}, step_util.LegacyGetStepMetadata('m', 'b', 201,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataCached(self, mock_fn, *_):
mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]
# Returns the invalid step_metadata but not cache it.
self.assertEqual(None,
step_util.GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
# Returns the valid step_metadata and cache it.
self.assertEqual({
'canonical_step_name': 'step_name'
}, step_util.GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({
'canonical_step_name': 'step_name'
}, step_util.GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(
step_util,
'LegacyGetStepMetadata',
return_value={'canonical_step_name': 'step_name'})
def testLegacyGetCanonicalStep(self, _):
self.assertEqual(
'step_name',
step_util.LegacyGetCanonicalStepName('m', 'b', 200,
'step_name on a platform'))
@parameterized.expand([({
'canonical_step_name': 'step_name'
}, 'step_name'), (None, 'step_name'), ({
'a': 'b'
}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepName(self, step_metadata, expected_canonical_step,
mocked_get_step):
mocked_get_step.return_value = step_metadata
self.assertEqual(
expected_canonical_step,
step_util.GetCanonicalStepName(123, 'step_name (with patch)'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):
step_util.GetCanonicalStepName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetCanonicalStepName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@mock.patch.object(
step_util,
'LegacyGetStepMetadata',
return_value={'isolate_target_name': 'browser_tests'})
def testLegacyGetIsolateTargetName(self, _):
self.assertEqual(
'browser_tests',
step_util.LegacyGetIsolateTargetName(
'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)
def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):
self.assertEqual(
None,
step_util.LegacyGetIsolateTargetName(
'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))
@mock.patch.object(
step_util, 'LegacyGetStepMetadata', return_value={'a': 'b'})
def testLegacyGetIsolateTargetNameIsolateTargetNameIsMissing(self, _):
self.assertEqual(
None,
step_util.LegacyGetIsolateTargetName(
'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))
@parameterized.expand([({
'isolate_target_name': 'isolate_target'
}, 'isolate_target'), (None, None), ({
'a': 'b'
}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetName(self, step_metadata, expected_isolate_target,
mocked_get_stepmeta):
mocked_get_stepmeta.return_value = step_metadata
self.assertEqual(expected_isolate_target,
step_util.GetIsolateTargetName(123, 'full step name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):
step_util.GetIsolateTargetName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetIsolateTargetName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),
(None, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOS(self, mock_fn_return, expected_platform, mock_fn):
mock_fn.return_value = mock_fn_return
self.assertEqual(expected_platform,
step_util.GetOS(123, 'builder_name', 'step_name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOSPartialMatch(self, mock_get_step_metadata):
step_util.GetOS(123, 'builder_name', 'step_name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetOS(123, 'builder_name', 'step_name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@mock.patch.object(
step_util,
'GetStepMetadata',
return_value=wf_testcase.SAMPLE_STEP_METADATA)
def testGetOSCached(self, mock_fn):
self.assertEqual('platform',
step_util.GetOS(123, 'builder_name', 'step_name'))
self.assertEqual(1, mock_fn.call_count)
self.assertEqual('platform',
step_util.GetOS(123, 'builder_name', 'step_name'))
self.assertEqual(1, mock_fn.call_count)
def testGetStepStartAndEndTime(self):
build_id = '8945610992972640896'
start_time = datetime.datetime(2019, 3, 6)
end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)
step = Step()
step.name = 's'
step.start_time.FromDatetime(start_time)
step.end_time.FromDatetime(end_time)
build = Build()
build.id = int(build_id)
build.steps.extend([step])
self.assertEqual((start_time, end_time),
step_util.GetStepStartAndEndTime(build, 's'))
self.assertEqual((None, None), step_util.GetStepStartAndEndTime(
build, 's2'))
|
flexible
|
{
"blob_id": "325efe65030ad3488a7fc45c0d4a289eb0b17196",
"index": 1311,
"step-1": "<mask token>\n\n\nclass StepUtilTest(wf_testcase.WaterfallTestCase):\n\n def testGetLowerBoundBuildNumber(self):\n self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))\n self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,\n 200))\n self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,\n 500))\n\n def testGetBoundingIsolatedTargets(self):\n lower_bound_commit_position = 1000\n upper_bound_commit_position = 1010\n requested_commit_position = 1005\n build_id = 10000\n target_name = 'browser_tests'\n master_name = 'm'\n builder_name = 'b'\n luci_name = 'chromium'\n bucket_name = 'ci'\n gitiles_host = 'chromium.googlesource.com'\n gitiles_project = 'chromium/src'\n gitiles_ref = 'refs/heads/master'\n gerrit_patch = ''\n lower_bound_revision = 'r1000'\n upper_bound_revision = 'r1010'\n lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_1', lower_bound_commit_position, lower_bound_revision)\n lower_bound_target.put()\n upper_bound_target = IsolatedTarget.Create(build_id, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_2', upper_bound_commit_position, upper_bound_revision)\n upper_bound_target.put()\n self.assertEqual((lower_bound_target, upper_bound_target),\n step_util.GetBoundingIsolatedTargets(master_name, builder_name,\n target_name, requested_commit_position))\n <mask token>\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info\n ):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_101, valid_build_102]\n self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,\n 100, step_name, True, 1))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchDescending(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_99 = BuildInfo(master_name, builder_name, 99)\n valid_build_98 = BuildInfo(master_name, builder_name, 98)\n valid_build_98.commit_position = 980\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_99, valid_build_98]\n self.assertEqual(valid_build_98, step_util.GetValidBuild(\n master_name, builder_name, 100, step_name, True, 2))\n <mask token>\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, 100, 10)\n self.assertIsNone(lower_bound)\n self.assertEqual(lower_bound_build_number, upper_bound.build_number)\n <mask token>\n <mask token>\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_\n ):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 10000)\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):\n upper_bound_build_number = 4\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 50)\n self.assertEqual(50, lower_bound.commit_position)\n self.assertEqual(50, upper_bound.commit_position)\n <mask token>\n\n def testIsStepSupportedByFinditObjectNone(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))\n <mask token>\n\n def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=True)\n def testIsStepSupportedByFinditWebkitLayoutTests(self, _):\n self.assertTrue(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))\n <mask token>\n\n @parameterized.expand([({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': None,\n 'expected_step_metadata': None},), ({'step_log_return': None,\n 'expected_step_metadata': None},)])\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadata(self, cases, mock_step_log):\n mock_step_log.return_value = cases['step_log_return']\n step_metadata = step_util.GetStepMetadata(123, 'step')\n self.assertEqual(cases['expected_step_metadata'], step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataPartialMatch(self, mock_step_log):\n step_util.GetStepMetadata(123, 'step', True)\n self.assertIn(True, mock_step_log.call_args[0])\n step_util.GetStepMetadata(123, 'step', False)\n self.assertIn(False, mock_step_log.call_args[0])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n @mock.patch.object(build_util, 'DownloadBuildData')\n def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):\n build = WfBuild.Create('m', 'b', 123)\n build.build_id = '8948240770002521488'\n build.put()\n mock_build.return_value = build\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=\n 'log1/nlog2')\n def testGetStepLogStdio(self, *_):\n self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(\n 'm', 'b', 123, 's', None))\n <mask token>\n <mask token>\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n @mock.patch.object(logdog_util, 'GetLogFromViewUrl')\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,\n mock_get_log, _):\n build_id = '8945610992972640896'\n mock_log = common_pb2.Log()\n mock_log.name = 'step_metadata'\n mock_log.view_url = 'view_url'\n mock_step = Step()\n mock_step.name = 's'\n mock_step.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = int(build_id)\n mock_build.steps.extend([mock_step])\n mock_get_build.return_value = mock_build\n self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',\n None, 'step_metadata'))\n self.assertFalse(mock_get_log.called)\n <mask token>\n\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n @mock.patch.object(step_util, 'GetStepLogFromBuildObject')\n def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):\n step_util.GetStepLogForLuciBuild('87654321', 's', None)\n self.assertIn(False, mock_log_from_build.call_args[0])\n step_util.GetStepLogForLuciBuild('87654321', 's', None, True)\n self.assertIn(True, mock_log_from_build.call_args[0])\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client')\n self.assertIn(False, mock_get_log_url.call_args[0])\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client', partial_match=True)\n self.assertIn(True, mock_get_log_url.call_args[0])\n\n def testGetStepLogViewUrlNoMatchingLog(self):\n build_id = 8945610992972640896\n mock_log = common_pb2.Log()\n mock_log.name = 'another_log'\n mock_log.view_url = 'view_url'\n mock_step1 = Step()\n mock_step1.name = 's1'\n mock_step1.logs.extend([mock_log])\n mock_step2 = Step()\n mock_step2.name = 's2'\n mock_step2.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = build_id\n mock_build.steps.extend([mock_step1, mock_step2])\n self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')\n )\n\n @parameterized.expand([(True, 'step_name', 'view_url',\n 'view_url_partial_match'), (False, 'step_name', 'view_url', None)])\n def testGetStepLogViewUrlPartialMatching(self, partial_match,\n full_step_name, expected_url_in_build1, expected_url_in_build2):\n mock_step1 = Step()\n mock_step1.name = 'step_name'\n mock_log1 = common_pb2.Log()\n mock_log1.name = 'log'\n mock_log1.view_url = 'view_url'\n mock_step1.logs.extend([mock_log1])\n mock_step2 = Step()\n mock_step2.name = 'step_name_longer'\n mock_log2 = common_pb2.Log()\n mock_log2.name = 'log'\n mock_log2.view_url = 'view_url_partial_match'\n mock_step2.logs.extend([mock_log2])\n mock_build1 = Build()\n mock_build1.steps.extend([mock_step1, mock_step2])\n self.assertEqual(expected_url_in_build1, step_util.\n _GetStepLogViewUrl(mock_build1, full_step_name, 'log',\n partial_match=partial_match))\n mock_build2 = Build()\n mock_build2.steps.extend([mock_step2])\n self.assertEqual(expected_url_in_build2, step_util.\n _GetStepLogViewUrl(mock_build2, full_step_name, 'log',\n partial_match=partial_match))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)\n def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):\n self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',\n 'b', 200, 'viz_browser_tests (with patch) on Android'))\n <mask token>\n\n @parameterized.expand([({'isolate_target_name': 'isolate_target'},\n 'isolate_target'), (None, None), ({'a': 'b'}, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetName(self, step_metadata,\n expected_isolate_target, mocked_get_stepmeta):\n mocked_get_stepmeta.return_value = step_metadata\n self.assertEqual(expected_isolate_target, step_util.\n GetIsolateTargetName(123, 'full step name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):\n step_util.GetIsolateTargetName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetIsolateTargetName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),\n (None, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOS(self, mock_fn_return, expected_platform, mock_fn):\n mock_fn.return_value = mock_fn_return\n self.assertEqual(expected_platform, step_util.GetOS(123,\n 'builder_name', 'step_name'))\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepMetadata', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n def testGetOSCached(self, mock_fn):\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n\n def testGetStepStartAndEndTime(self):\n build_id = '8945610992972640896'\n start_time = datetime.datetime(2019, 3, 6)\n end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)\n step = Step()\n step.name = 's'\n step.start_time.FromDatetime(start_time)\n step.end_time.FromDatetime(end_time)\n build = Build()\n build.id = int(build_id)\n build.steps.extend([step])\n self.assertEqual((start_time, end_time), step_util.\n GetStepStartAndEndTime(build, 's'))\n self.assertEqual((None, None), step_util.GetStepStartAndEndTime(\n build, 's2'))\n",
"step-2": "<mask token>\n\n\nclass StepUtilTest(wf_testcase.WaterfallTestCase):\n\n def testGetLowerBoundBuildNumber(self):\n self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))\n self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,\n 200))\n self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,\n 500))\n\n def testGetBoundingIsolatedTargets(self):\n lower_bound_commit_position = 1000\n upper_bound_commit_position = 1010\n requested_commit_position = 1005\n build_id = 10000\n target_name = 'browser_tests'\n master_name = 'm'\n builder_name = 'b'\n luci_name = 'chromium'\n bucket_name = 'ci'\n gitiles_host = 'chromium.googlesource.com'\n gitiles_project = 'chromium/src'\n gitiles_ref = 'refs/heads/master'\n gerrit_patch = ''\n lower_bound_revision = 'r1000'\n upper_bound_revision = 'r1010'\n lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_1', lower_bound_commit_position, lower_bound_revision)\n lower_bound_target.put()\n upper_bound_target = IsolatedTarget.Create(build_id, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_2', upper_bound_commit_position, upper_bound_revision)\n upper_bound_target.put()\n self.assertEqual((lower_bound_target, upper_bound_target),\n step_util.GetBoundingIsolatedTargets(master_name, builder_name,\n target_name, requested_commit_position))\n <mask token>\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info\n ):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_101, valid_build_102]\n self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,\n 100, step_name, True, 1))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchDescending(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_99 = BuildInfo(master_name, builder_name, 99)\n valid_build_98 = BuildInfo(master_name, builder_name, 98)\n valid_build_98.commit_position = 980\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_99, valid_build_98]\n self.assertEqual(valid_build_98, step_util.GetValidBuild(\n master_name, builder_name, 100, step_name, True, 2))\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepExactMatch(self, *_):\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', 0, 100, 30)\n self.assertEqual(1, lower_bound.build_number)\n self.assertEqual(2, upper_bound.build_number)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, 100, 10)\n self.assertIsNone(lower_bound)\n self.assertEqual(lower_bound_build_number, upper_bound.build_number)\n <mask token>\n <mask token>\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_\n ):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 10000)\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):\n upper_bound_build_number = 4\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 50)\n self.assertEqual(50, lower_bound.commit_position)\n self.assertEqual(50, upper_bound.commit_position)\n <mask token>\n\n def testIsStepSupportedByFinditObjectNone(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))\n <mask token>\n\n def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=True)\n def testIsStepSupportedByFinditWebkitLayoutTests(self, _):\n self.assertTrue(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))\n <mask token>\n\n @parameterized.expand([({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': None,\n 'expected_step_metadata': None},), ({'step_log_return': None,\n 'expected_step_metadata': None},)])\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadata(self, cases, mock_step_log):\n mock_step_log.return_value = cases['step_log_return']\n step_metadata = step_util.GetStepMetadata(123, 'step')\n self.assertEqual(cases['expected_step_metadata'], step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataPartialMatch(self, mock_step_log):\n step_util.GetStepMetadata(123, 'step', True)\n self.assertIn(True, mock_step_log.call_args[0])\n step_util.GetStepMetadata(123, 'step', False)\n self.assertIn(False, mock_step_log.call_args[0])\n <mask token>\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')\n def testMalformattedNinjaInfo(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'json.output[ninja_info]')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value=None)\n def testLegacyGetStepMetadataStepNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertIsNone(step_metadata)\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n @mock.patch.object(build_util, 'DownloadBuildData')\n def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):\n build = WfBuild.Create('m', 'b', 123)\n build.build_id = '8948240770002521488'\n build.put()\n mock_build.return_value = build\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=\n 'log1/nlog2')\n def testGetStepLogStdio(self, *_):\n self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(\n 'm', 'b', 123, 's', None))\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')\n @mock.patch.object(logging, 'error')\n def testGetStepLogNotJosonLoadable(self, mocked_log, *_):\n self.assertIsNone(step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata'))\n mocked_log.assert_called_with(\n 'Failed to json load data for step_metadata. Data is: log.')\n <mask token>\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n @mock.patch.object(logdog_util, 'GetLogFromViewUrl')\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,\n mock_get_log, _):\n build_id = '8945610992972640896'\n mock_log = common_pb2.Log()\n mock_log.name = 'step_metadata'\n mock_log.view_url = 'view_url'\n mock_step = Step()\n mock_step.name = 's'\n mock_step.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = int(build_id)\n mock_build.steps.extend([mock_step])\n mock_get_build.return_value = mock_build\n self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',\n None, 'step_metadata'))\n self.assertFalse(mock_get_log.called)\n <mask token>\n\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n @mock.patch.object(step_util, 'GetStepLogFromBuildObject')\n def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):\n step_util.GetStepLogForLuciBuild('87654321', 's', None)\n self.assertIn(False, mock_log_from_build.call_args[0])\n step_util.GetStepLogForLuciBuild('87654321', 's', None, True)\n self.assertIn(True, mock_log_from_build.call_args[0])\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client')\n self.assertIn(False, mock_get_log_url.call_args[0])\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client', partial_match=True)\n self.assertIn(True, mock_get_log_url.call_args[0])\n\n def testGetStepLogViewUrlNoMatchingLog(self):\n build_id = 8945610992972640896\n mock_log = common_pb2.Log()\n mock_log.name = 'another_log'\n mock_log.view_url = 'view_url'\n mock_step1 = Step()\n mock_step1.name = 's1'\n mock_step1.logs.extend([mock_log])\n mock_step2 = Step()\n mock_step2.name = 's2'\n mock_step2.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = build_id\n mock_build.steps.extend([mock_step1, mock_step2])\n self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')\n )\n\n @parameterized.expand([(True, 'step_name', 'view_url',\n 'view_url_partial_match'), (False, 'step_name', 'view_url', None)])\n def testGetStepLogViewUrlPartialMatching(self, partial_match,\n full_step_name, expected_url_in_build1, expected_url_in_build2):\n mock_step1 = Step()\n mock_step1.name = 'step_name'\n mock_log1 = common_pb2.Log()\n mock_log1.name = 'log'\n mock_log1.view_url = 'view_url'\n mock_step1.logs.extend([mock_log1])\n mock_step2 = Step()\n mock_step2.name = 'step_name_longer'\n mock_log2 = common_pb2.Log()\n mock_log2.name = 'log'\n mock_log2.view_url = 'view_url_partial_match'\n mock_step2.logs.extend([mock_log2])\n mock_build1 = Build()\n mock_build1.steps.extend([mock_step1, mock_step2])\n self.assertEqual(expected_url_in_build1, step_util.\n _GetStepLogViewUrl(mock_build1, full_step_name, 'log',\n partial_match=partial_match))\n mock_build2 = Build()\n mock_build2.steps.extend([mock_step2])\n self.assertEqual(expected_url_in_build2, step_util.\n _GetStepLogViewUrl(mock_build2, full_step_name, 'log',\n partial_match=partial_match))\n <mask token>\n <mask token>\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataCached(self, mock_fn, *_):\n mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]\n self.assertEqual(None, step_util.GetStepMetadata(123,\n 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n <mask token>\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):\n step_util.GetCanonicalStepName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetCanonicalStepName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n <mask token>\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)\n def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):\n self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',\n 'b', 200, 'viz_browser_tests (with patch) on Android'))\n <mask token>\n\n @parameterized.expand([({'isolate_target_name': 'isolate_target'},\n 'isolate_target'), (None, None), ({'a': 'b'}, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetName(self, step_metadata,\n expected_isolate_target, mocked_get_stepmeta):\n mocked_get_stepmeta.return_value = step_metadata\n self.assertEqual(expected_isolate_target, step_util.\n GetIsolateTargetName(123, 'full step name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):\n step_util.GetIsolateTargetName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetIsolateTargetName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),\n (None, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOS(self, mock_fn_return, expected_platform, mock_fn):\n mock_fn.return_value = mock_fn_return\n self.assertEqual(expected_platform, step_util.GetOS(123,\n 'builder_name', 'step_name'))\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepMetadata', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n def testGetOSCached(self, mock_fn):\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n\n def testGetStepStartAndEndTime(self):\n build_id = '8945610992972640896'\n start_time = datetime.datetime(2019, 3, 6)\n end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)\n step = Step()\n step.name = 's'\n step.start_time.FromDatetime(start_time)\n step.end_time.FromDatetime(end_time)\n build = Build()\n build.id = int(build_id)\n build.steps.extend([step])\n self.assertEqual((start_time, end_time), step_util.\n GetStepStartAndEndTime(build, 's'))\n self.assertEqual((None, None), step_util.GetStepStartAndEndTime(\n build, 's2'))\n",
"step-3": "<mask token>\n\n\nclass StepUtilTest(wf_testcase.WaterfallTestCase):\n\n def testGetLowerBoundBuildNumber(self):\n self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))\n self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,\n 200))\n self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,\n 500))\n\n def testGetBoundingIsolatedTargets(self):\n lower_bound_commit_position = 1000\n upper_bound_commit_position = 1010\n requested_commit_position = 1005\n build_id = 10000\n target_name = 'browser_tests'\n master_name = 'm'\n builder_name = 'b'\n luci_name = 'chromium'\n bucket_name = 'ci'\n gitiles_host = 'chromium.googlesource.com'\n gitiles_project = 'chromium/src'\n gitiles_ref = 'refs/heads/master'\n gerrit_patch = ''\n lower_bound_revision = 'r1000'\n upper_bound_revision = 'r1010'\n lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_1', lower_bound_commit_position, lower_bound_revision)\n lower_bound_target.put()\n upper_bound_target = IsolatedTarget.Create(build_id, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_2', upper_bound_commit_position, upper_bound_revision)\n upper_bound_target.put()\n self.assertEqual((lower_bound_target, upper_bound_target),\n step_util.GetBoundingIsolatedTargets(master_name, builder_name,\n target_name, requested_commit_position))\n <mask token>\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info\n ):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_101, valid_build_102]\n self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,\n 100, step_name, True, 1))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchDescending(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_99 = BuildInfo(master_name, builder_name, 99)\n valid_build_98 = BuildInfo(master_name, builder_name, 98)\n valid_build_98.commit_position = 980\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_99, valid_build_98]\n self.assertEqual(valid_build_98, step_util.GetValidBuild(\n master_name, builder_name, 100, step_name, True, 2))\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepExactMatch(self, *_):\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', 0, 100, 30)\n self.assertEqual(1, lower_bound.build_number)\n self.assertEqual(2, upper_bound.build_number)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, 100, 10)\n self.assertIsNone(lower_bound)\n self.assertEqual(lower_bound_build_number, upper_bound.build_number)\n <mask token>\n <mask token>\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_\n ):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 10000)\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):\n upper_bound_build_number = 4\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 50)\n self.assertEqual(50, lower_bound.commit_position)\n self.assertEqual(50, upper_bound.commit_position)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtLowerBound(self, *_):\n upper_bound_build_number = 4\n lower_bound_build_number = 1\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, upper_bound_build_number, 20)\n self.assertEqual(20, lower_bound.commit_position)\n self.assertEqual(20, upper_bound.commit_position)\n\n def testIsStepSupportedByFinditObjectNone(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=False)\n def testStepNotSupportedByFindit(self, _):\n self.assertFalse(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'step', 'm'))\n\n def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=True)\n def testIsStepSupportedByFinditWebkitLayoutTests(self, _):\n self.assertTrue(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=True)\n def testIsStepSupportedByFinditGtests(self, _):\n self.assertTrue(step_util.IsStepSupportedByFindit(GtestTestResults(\n None), 'browser_tests', 'm'))\n\n @parameterized.expand([({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': None,\n 'expected_step_metadata': None},), ({'step_log_return': None,\n 'expected_step_metadata': None},)])\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadata(self, cases, mock_step_log):\n mock_step_log.return_value = cases['step_log_return']\n step_metadata = step_util.GetStepMetadata(123, 'step')\n self.assertEqual(cases['expected_step_metadata'], step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataPartialMatch(self, mock_step_log):\n step_util.GetStepMetadata(123, 'step', True)\n self.assertIn(True, mock_step_log.call_args[0])\n step_util.GetStepMetadata(123, 'step', False)\n self.assertIn(False, mock_step_log.call_args[0])\n\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value=\n 'log_stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=json.\n dumps(wf_testcase.SAMPLE_STEP_METADATA))\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n def testLegacyGetStepMetadata(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')\n def testMalformattedNinjaInfo(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'json.output[ninja_info]')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value=None)\n def testLegacyGetStepMetadataStepNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value=None)\n def testLegacyGetStepMetadataStreamNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n @mock.patch.object(build_util, 'DownloadBuildData')\n def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):\n build = WfBuild.Create('m', 'b', 123)\n build.build_id = '8948240770002521488'\n build.put()\n mock_build.return_value = build\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=\n 'log1/nlog2')\n def testGetStepLogStdio(self, *_):\n self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(\n 'm', 'b', 123, 's', None))\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')\n @mock.patch.object(logging, 'error')\n def testGetStepLogNotJosonLoadable(self, mocked_log, *_):\n self.assertIsNone(step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata'))\n mocked_log.assert_called_with(\n 'Failed to json load data for step_metadata. Data is: log.')\n\n @mock.patch.object(buildbucket_client, 'GetV2Build', return_value=None)\n def testGetStepLogForLuciBuildError(self, _):\n self.assertIsNone(step_util.GetStepLogForLuciBuild('87654321', 's',\n None))\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n @mock.patch.object(logdog_util, 'GetLogFromViewUrl')\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,\n mock_get_log, _):\n build_id = '8945610992972640896'\n mock_log = common_pb2.Log()\n mock_log.name = 'step_metadata'\n mock_log.view_url = 'view_url'\n mock_step = Step()\n mock_step.name = 's'\n mock_step.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = int(build_id)\n mock_build.steps.extend([mock_step])\n mock_get_build.return_value = mock_build\n self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',\n None, 'step_metadata'))\n self.assertFalse(mock_get_log.called)\n <mask token>\n\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n @mock.patch.object(step_util, 'GetStepLogFromBuildObject')\n def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):\n step_util.GetStepLogForLuciBuild('87654321', 's', None)\n self.assertIn(False, mock_log_from_build.call_args[0])\n step_util.GetStepLogForLuciBuild('87654321', 's', None, True)\n self.assertIn(True, mock_log_from_build.call_args[0])\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client')\n self.assertIn(False, mock_get_log_url.call_args[0])\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client', partial_match=True)\n self.assertIn(True, mock_get_log_url.call_args[0])\n\n def testGetStepLogViewUrlNoMatchingLog(self):\n build_id = 8945610992972640896\n mock_log = common_pb2.Log()\n mock_log.name = 'another_log'\n mock_log.view_url = 'view_url'\n mock_step1 = Step()\n mock_step1.name = 's1'\n mock_step1.logs.extend([mock_log])\n mock_step2 = Step()\n mock_step2.name = 's2'\n mock_step2.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = build_id\n mock_build.steps.extend([mock_step1, mock_step2])\n self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')\n )\n\n @parameterized.expand([(True, 'step_name', 'view_url',\n 'view_url_partial_match'), (False, 'step_name', 'view_url', None)])\n def testGetStepLogViewUrlPartialMatching(self, partial_match,\n full_step_name, expected_url_in_build1, expected_url_in_build2):\n mock_step1 = Step()\n mock_step1.name = 'step_name'\n mock_log1 = common_pb2.Log()\n mock_log1.name = 'log'\n mock_log1.view_url = 'view_url'\n mock_step1.logs.extend([mock_log1])\n mock_step2 = Step()\n mock_step2.name = 'step_name_longer'\n mock_log2 = common_pb2.Log()\n mock_log2.name = 'log'\n mock_log2.view_url = 'view_url_partial_match'\n mock_step2.logs.extend([mock_log2])\n mock_build1 = Build()\n mock_build1.steps.extend([mock_step1, mock_step2])\n self.assertEqual(expected_url_in_build1, step_util.\n _GetStepLogViewUrl(mock_build1, full_step_name, 'log',\n partial_match=partial_match))\n mock_build2 = Build()\n mock_build2.steps.extend([mock_step2])\n self.assertEqual(expected_url_in_build2, step_util.\n _GetStepLogViewUrl(mock_build2, full_step_name, 'log',\n partial_match=partial_match))\n\n @mock.patch.object(step_util, 'GetWaterfallBuildStepLog', return_value=\n {'canonical_step_name': 'unsupported_step1'})\n def testStepIsSupportedForMaster(self, _):\n master_name = 'master1'\n builder_name = 'b'\n build_number = 123\n step_name = 'unsupported_step1 on master1'\n self.assertFalse(step_util.StepIsSupportedForMaster(master_name,\n builder_name, build_number, step_name))\n <mask token>\n\n @mock.patch.object(step_util, 'GetWaterfallBuildStepLog')\n def testLegacyGetStepMetadataCached(self, mock_fn):\n mock_fn.side_effect = ['invalid', {'canonical_step_name': 'step_name'}]\n self.assertEqual('invalid', step_util.LegacyGetStepMetadata('m',\n 'b', 201, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataCached(self, mock_fn, *_):\n mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]\n self.assertEqual(None, step_util.GetStepMetadata(123,\n 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={\n 'canonical_step_name': 'step_name'})\n def testLegacyGetCanonicalStep(self, _):\n self.assertEqual('step_name', step_util.LegacyGetCanonicalStepName(\n 'm', 'b', 200, 'step_name on a platform'))\n\n @parameterized.expand([({'canonical_step_name': 'step_name'},\n 'step_name'), (None, 'step_name'), ({'a': 'b'}, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepName(self, step_metadata,\n expected_canonical_step, mocked_get_step):\n mocked_get_step.return_value = step_metadata\n self.assertEqual(expected_canonical_step, step_util.\n GetCanonicalStepName(123, 'step_name (with patch)'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):\n step_util.GetCanonicalStepName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetCanonicalStepName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n <mask token>\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)\n def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):\n self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',\n 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={\n 'a': 'b'})\n def testLegacyGetIsolateTargetNameIsolateTargetNameIsMissing(self, _):\n self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',\n 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @parameterized.expand([({'isolate_target_name': 'isolate_target'},\n 'isolate_target'), (None, None), ({'a': 'b'}, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetName(self, step_metadata,\n expected_isolate_target, mocked_get_stepmeta):\n mocked_get_stepmeta.return_value = step_metadata\n self.assertEqual(expected_isolate_target, step_util.\n GetIsolateTargetName(123, 'full step name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):\n step_util.GetIsolateTargetName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetIsolateTargetName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),\n (None, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOS(self, mock_fn_return, expected_platform, mock_fn):\n mock_fn.return_value = mock_fn_return\n self.assertEqual(expected_platform, step_util.GetOS(123,\n 'builder_name', 'step_name'))\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepMetadata', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n def testGetOSCached(self, mock_fn):\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n\n def testGetStepStartAndEndTime(self):\n build_id = '8945610992972640896'\n start_time = datetime.datetime(2019, 3, 6)\n end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)\n step = Step()\n step.name = 's'\n step.start_time.FromDatetime(start_time)\n step.end_time.FromDatetime(end_time)\n build = Build()\n build.id = int(build_id)\n build.steps.extend([step])\n self.assertEqual((start_time, end_time), step_util.\n GetStepStartAndEndTime(build, 's'))\n self.assertEqual((None, None), step_util.GetStepStartAndEndTime(\n build, 's2'))\n",
"step-4": "<mask token>\n\n\nclass StepUtilTest(wf_testcase.WaterfallTestCase):\n\n def testGetLowerBoundBuildNumber(self):\n self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))\n self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,\n 200))\n self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,\n 500))\n\n def testGetBoundingIsolatedTargets(self):\n lower_bound_commit_position = 1000\n upper_bound_commit_position = 1010\n requested_commit_position = 1005\n build_id = 10000\n target_name = 'browser_tests'\n master_name = 'm'\n builder_name = 'b'\n luci_name = 'chromium'\n bucket_name = 'ci'\n gitiles_host = 'chromium.googlesource.com'\n gitiles_project = 'chromium/src'\n gitiles_ref = 'refs/heads/master'\n gerrit_patch = ''\n lower_bound_revision = 'r1000'\n upper_bound_revision = 'r1010'\n lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_1', lower_bound_commit_position, lower_bound_revision)\n lower_bound_target.put()\n upper_bound_target = IsolatedTarget.Create(build_id, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_2', upper_bound_commit_position, upper_bound_revision)\n upper_bound_target.put()\n self.assertEqual((lower_bound_target, upper_bound_target),\n step_util.GetBoundingIsolatedTargets(master_name, builder_name,\n target_name, requested_commit_position))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingWithinRange(self, mocked_get_build_info\n ):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_101, valid_build_102]\n self.assertEqual(valid_build_102, step_util.GetValidBuild(\n master_name, builder_name, 100, step_name, True, 2))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info\n ):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_101, valid_build_102]\n self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,\n 100, step_name, True, 1))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchDescending(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_99 = BuildInfo(master_name, builder_name, 99)\n valid_build_98 = BuildInfo(master_name, builder_name, 98)\n valid_build_98.commit_position = 980\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_99, valid_build_98]\n self.assertEqual(valid_build_98, step_util.GetValidBuild(\n master_name, builder_name, 100, step_name, True, 2))\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepExactMatch(self, *_):\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', 0, 100, 30)\n self.assertEqual(1, lower_bound.build_number)\n self.assertEqual(2, upper_bound.build_number)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, 100, 10)\n self.assertIsNone(lower_bound)\n self.assertEqual(lower_bound_build_number, upper_bound.build_number)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuildInValid(self,\n *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, 100, 10)\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuild(self, *_):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 10000)\n self.assertEqual(upper_bound_build_number, lower_bound.build_number)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_\n ):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 10000)\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):\n upper_bound_build_number = 4\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 50)\n self.assertEqual(50, lower_bound.commit_position)\n self.assertEqual(50, upper_bound.commit_position)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtLowerBound(self, *_):\n upper_bound_build_number = 4\n lower_bound_build_number = 1\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, upper_bound_build_number, 20)\n self.assertEqual(20, lower_bound.commit_position)\n self.assertEqual(20, upper_bound.commit_position)\n\n def testIsStepSupportedByFinditObjectNone(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=False)\n def testStepNotSupportedByFindit(self, _):\n self.assertFalse(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'step', 'm'))\n\n def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=True)\n def testIsStepSupportedByFinditWebkitLayoutTests(self, _):\n self.assertTrue(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=True)\n def testIsStepSupportedByFinditGtests(self, _):\n self.assertTrue(step_util.IsStepSupportedByFindit(GtestTestResults(\n None), 'browser_tests', 'm'))\n\n @parameterized.expand([({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': None,\n 'expected_step_metadata': None},), ({'step_log_return': None,\n 'expected_step_metadata': None},)])\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadata(self, cases, mock_step_log):\n mock_step_log.return_value = cases['step_log_return']\n step_metadata = step_util.GetStepMetadata(123, 'step')\n self.assertEqual(cases['expected_step_metadata'], step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataPartialMatch(self, mock_step_log):\n step_util.GetStepMetadata(123, 'step', True)\n self.assertIn(True, mock_step_log.call_args[0])\n step_util.GetStepMetadata(123, 'step', False)\n self.assertIn(False, mock_step_log.call_args[0])\n\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value=\n 'log_stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=json.\n dumps(wf_testcase.SAMPLE_STEP_METADATA))\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n def testLegacyGetStepMetadata(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')\n def testMalformattedNinjaInfo(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'json.output[ninja_info]')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value=None)\n def testLegacyGetStepMetadataStepNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value=None)\n def testLegacyGetStepMetadataStreamNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n @mock.patch.object(build_util, 'DownloadBuildData')\n def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):\n build = WfBuild.Create('m', 'b', 123)\n build.build_id = '8948240770002521488'\n build.put()\n mock_build.return_value = build\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=\n 'log1/nlog2')\n def testGetStepLogStdio(self, *_):\n self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(\n 'm', 'b', 123, 's', None))\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')\n @mock.patch.object(logging, 'error')\n def testGetStepLogNotJosonLoadable(self, mocked_log, *_):\n self.assertIsNone(step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata'))\n mocked_log.assert_called_with(\n 'Failed to json load data for step_metadata. Data is: log.')\n\n @mock.patch.object(buildbucket_client, 'GetV2Build', return_value=None)\n def testGetStepLogForLuciBuildError(self, _):\n self.assertIsNone(step_util.GetStepLogForLuciBuild('87654321', 's',\n None))\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n @mock.patch.object(logdog_util, 'GetLogFromViewUrl')\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,\n mock_get_log, _):\n build_id = '8945610992972640896'\n mock_log = common_pb2.Log()\n mock_log.name = 'step_metadata'\n mock_log.view_url = 'view_url'\n mock_step = Step()\n mock_step.name = 's'\n mock_step.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = int(build_id)\n mock_build.steps.extend([mock_step])\n mock_get_build.return_value = mock_build\n self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',\n None, 'step_metadata'))\n self.assertFalse(mock_get_log.called)\n <mask token>\n\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n @mock.patch.object(step_util, 'GetStepLogFromBuildObject')\n def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):\n step_util.GetStepLogForLuciBuild('87654321', 's', None)\n self.assertIn(False, mock_log_from_build.call_args[0])\n step_util.GetStepLogForLuciBuild('87654321', 's', None, True)\n self.assertIn(True, mock_log_from_build.call_args[0])\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client')\n self.assertIn(False, mock_get_log_url.call_args[0])\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client', partial_match=True)\n self.assertIn(True, mock_get_log_url.call_args[0])\n\n def testGetStepLogViewUrlNoMatchingLog(self):\n build_id = 8945610992972640896\n mock_log = common_pb2.Log()\n mock_log.name = 'another_log'\n mock_log.view_url = 'view_url'\n mock_step1 = Step()\n mock_step1.name = 's1'\n mock_step1.logs.extend([mock_log])\n mock_step2 = Step()\n mock_step2.name = 's2'\n mock_step2.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = build_id\n mock_build.steps.extend([mock_step1, mock_step2])\n self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')\n )\n\n @parameterized.expand([(True, 'step_name', 'view_url',\n 'view_url_partial_match'), (False, 'step_name', 'view_url', None)])\n def testGetStepLogViewUrlPartialMatching(self, partial_match,\n full_step_name, expected_url_in_build1, expected_url_in_build2):\n mock_step1 = Step()\n mock_step1.name = 'step_name'\n mock_log1 = common_pb2.Log()\n mock_log1.name = 'log'\n mock_log1.view_url = 'view_url'\n mock_step1.logs.extend([mock_log1])\n mock_step2 = Step()\n mock_step2.name = 'step_name_longer'\n mock_log2 = common_pb2.Log()\n mock_log2.name = 'log'\n mock_log2.view_url = 'view_url_partial_match'\n mock_step2.logs.extend([mock_log2])\n mock_build1 = Build()\n mock_build1.steps.extend([mock_step1, mock_step2])\n self.assertEqual(expected_url_in_build1, step_util.\n _GetStepLogViewUrl(mock_build1, full_step_name, 'log',\n partial_match=partial_match))\n mock_build2 = Build()\n mock_build2.steps.extend([mock_step2])\n self.assertEqual(expected_url_in_build2, step_util.\n _GetStepLogViewUrl(mock_build2, full_step_name, 'log',\n partial_match=partial_match))\n\n @mock.patch.object(step_util, 'GetWaterfallBuildStepLog', return_value=\n {'canonical_step_name': 'unsupported_step1'})\n def testStepIsSupportedForMaster(self, _):\n master_name = 'master1'\n builder_name = 'b'\n build_number = 123\n step_name = 'unsupported_step1 on master1'\n self.assertFalse(step_util.StepIsSupportedForMaster(master_name,\n builder_name, build_number, step_name))\n\n def testStepIsSupportedForMasterCompile(self):\n master_name = 'm'\n builder_name = 'b'\n build_number = 123\n step_name = 'compile'\n self.assertTrue(step_util.StepIsSupportedForMaster(master_name,\n builder_name, build_number, step_name))\n\n @mock.patch.object(step_util, 'GetWaterfallBuildStepLog')\n def testLegacyGetStepMetadataCached(self, mock_fn):\n mock_fn.side_effect = ['invalid', {'canonical_step_name': 'step_name'}]\n self.assertEqual('invalid', step_util.LegacyGetStepMetadata('m',\n 'b', 201, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataCached(self, mock_fn, *_):\n mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]\n self.assertEqual(None, step_util.GetStepMetadata(123,\n 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={\n 'canonical_step_name': 'step_name'})\n def testLegacyGetCanonicalStep(self, _):\n self.assertEqual('step_name', step_util.LegacyGetCanonicalStepName(\n 'm', 'b', 200, 'step_name on a platform'))\n\n @parameterized.expand([({'canonical_step_name': 'step_name'},\n 'step_name'), (None, 'step_name'), ({'a': 'b'}, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepName(self, step_metadata,\n expected_canonical_step, mocked_get_step):\n mocked_get_step.return_value = step_metadata\n self.assertEqual(expected_canonical_step, step_util.\n GetCanonicalStepName(123, 'step_name (with patch)'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):\n step_util.GetCanonicalStepName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetCanonicalStepName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={\n 'isolate_target_name': 'browser_tests'})\n def testLegacyGetIsolateTargetName(self, _):\n self.assertEqual('browser_tests', step_util.\n LegacyGetIsolateTargetName('m', 'b', 200,\n 'viz_browser_tests (with patch) on Android'))\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)\n def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):\n self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',\n 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={\n 'a': 'b'})\n def testLegacyGetIsolateTargetNameIsolateTargetNameIsMissing(self, _):\n self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',\n 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @parameterized.expand([({'isolate_target_name': 'isolate_target'},\n 'isolate_target'), (None, None), ({'a': 'b'}, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetName(self, step_metadata,\n expected_isolate_target, mocked_get_stepmeta):\n mocked_get_stepmeta.return_value = step_metadata\n self.assertEqual(expected_isolate_target, step_util.\n GetIsolateTargetName(123, 'full step name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):\n step_util.GetIsolateTargetName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetIsolateTargetName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),\n (None, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOS(self, mock_fn_return, expected_platform, mock_fn):\n mock_fn.return_value = mock_fn_return\n self.assertEqual(expected_platform, step_util.GetOS(123,\n 'builder_name', 'step_name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOSPartialMatch(self, mock_get_step_metadata):\n step_util.GetOS(123, 'builder_name', 'step_name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetOS(123, 'builder_name', 'step_name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @mock.patch.object(step_util, 'GetStepMetadata', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n def testGetOSCached(self, mock_fn):\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n\n def testGetStepStartAndEndTime(self):\n build_id = '8945610992972640896'\n start_time = datetime.datetime(2019, 3, 6)\n end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)\n step = Step()\n step.name = 's'\n step.start_time.FromDatetime(start_time)\n step.end_time.FromDatetime(end_time)\n build = Build()\n build.id = int(build_id)\n build.steps.extend([step])\n self.assertEqual((start_time, end_time), step_util.\n GetStepStartAndEndTime(build, 's'))\n self.assertEqual((None, None), step_util.GetStepStartAndEndTime(\n build, 's2'))\n",
"step-5": "# Copyright 2018 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport datetime\nimport json\nimport logging\nimport mock\n\nfrom parameterized import parameterized\n\nfrom buildbucket_proto import common_pb2\nfrom buildbucket_proto.build_pb2 import Build\nfrom buildbucket_proto.step_pb2 import Step\n\nfrom common.waterfall import buildbucket_client\nfrom infra_api_clients import logdog_util\nfrom libs.test_results.gtest_test_results import GtestTestResults\nfrom libs.test_results.webkit_layout_test_results import WebkitLayoutTestResults\nfrom model.isolated_target import IsolatedTarget\nfrom model.wf_build import WfBuild\nfrom services import step_util\nfrom services import swarming\nfrom waterfall import build_util\nfrom waterfall import waterfall_config\nfrom waterfall.build_info import BuildInfo\nfrom waterfall.test import wf_testcase\n\n\nclass MockWaterfallBuild(object):\n\n def __init__(self):\n self.build_id = None\n self.log_location = 'logdog://logs.chromium.org/chromium/buildbucket/path'\n\n\ndef _MockedGetBuildInfo(master_name, builder_name, build_number):\n build = BuildInfo(master_name, builder_name, build_number)\n build.commit_position = (build_number + 1) * 10\n build.result = (\n common_pb2.SUCCESS if build_number > 4 else common_pb2.INFRA_FAILURE)\n return build\n\n\nclass StepUtilTest(wf_testcase.WaterfallTestCase):\n\n def testGetLowerBoundBuildNumber(self):\n self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))\n self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100, 200))\n self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600, 500))\n\n def testGetBoundingIsolatedTargets(self):\n lower_bound_commit_position = 1000\n upper_bound_commit_position = 1010\n requested_commit_position = 1005\n build_id = 10000\n target_name = 'browser_tests'\n master_name = 'm'\n builder_name = 'b'\n luci_name = 'chromium'\n bucket_name = 'ci'\n gitiles_host = 'chromium.googlesource.com'\n gitiles_project = 'chromium/src'\n gitiles_ref = 'refs/heads/master'\n gerrit_patch = ''\n lower_bound_revision = 'r1000'\n upper_bound_revision = 'r1010'\n\n lower_bound_target = IsolatedTarget.Create(\n build_id - 1, luci_name, bucket_name, master_name, builder_name,\n gitiles_host, gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_1', lower_bound_commit_position, lower_bound_revision)\n lower_bound_target.put()\n\n upper_bound_target = IsolatedTarget.Create(\n build_id, luci_name, bucket_name, master_name, builder_name,\n gitiles_host, gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_2', upper_bound_commit_position, upper_bound_revision)\n upper_bound_target.put()\n\n self.assertEqual((lower_bound_target, upper_bound_target),\n step_util.GetBoundingIsolatedTargets(\n master_name, builder_name, target_name,\n requested_commit_position))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingWithinRange(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n\n mocked_get_build_info.side_effect = [\n invalid_build_100,\n invalid_build_101,\n valid_build_102,\n ]\n\n self.assertEqual(\n valid_build_102,\n step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,\n 2))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n\n mocked_get_build_info.side_effect = [\n invalid_build_100,\n invalid_build_101,\n valid_build_102,\n ]\n\n self.assertIsNone(\n step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,\n 1))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchDescending(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_99 = BuildInfo(master_name, builder_name, 99)\n valid_build_98 = BuildInfo(master_name, builder_name, 98)\n valid_build_98.commit_position = 980\n\n mocked_get_build_info.side_effect = [\n invalid_build_100,\n invalid_build_99,\n valid_build_98,\n ]\n\n self.assertEqual(\n valid_build_98,\n step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,\n 2))\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepExactMatch(self, *_):\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', 0, 100, 30)\n self.assertEqual(1, lower_bound.build_number)\n self.assertEqual(2, upper_bound.build_number)\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', lower_bound_build_number, 100, 10)\n\n self.assertIsNone(lower_bound)\n self.assertEqual(lower_bound_build_number, upper_bound.build_number)\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuildInValid(\n self, *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', lower_bound_build_number, 100, 10)\n\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuild(self, *_):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', None, upper_bound_build_number, 10000)\n self.assertEqual(upper_bound_build_number, lower_bound.build_number)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', None, upper_bound_build_number, 10000)\n\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):\n upper_bound_build_number = 4\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', None, upper_bound_build_number, 50)\n\n self.assertEqual(50, lower_bound.commit_position)\n self.assertEqual(50, upper_bound.commit_position)\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtLowerBound(self, *_):\n upper_bound_build_number = 4\n lower_bound_build_number = 1\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', lower_bound_build_number, upper_bound_build_number, 20)\n\n self.assertEqual(20, lower_bound.commit_position)\n self.assertEqual(20, upper_bound.commit_position)\n\n def testIsStepSupportedByFinditObjectNone(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))\n\n @mock.patch.object(\n waterfall_config, 'StepIsSupportedForMaster', return_value=False)\n def testStepNotSupportedByFindit(self, _):\n self.assertFalse(\n step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'step', 'm'))\n\n def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):\n self.assertFalse(\n step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))\n\n @mock.patch.object(\n waterfall_config, 'StepIsSupportedForMaster', return_value=True)\n def testIsStepSupportedByFinditWebkitLayoutTests(self, _):\n self.assertTrue(\n step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))\n\n @mock.patch.object(\n waterfall_config, 'StepIsSupportedForMaster', return_value=True)\n def testIsStepSupportedByFinditGtests(self, _):\n self.assertTrue(\n step_util.IsStepSupportedByFindit(\n GtestTestResults(None), 'browser_tests', 'm'))\n\n @parameterized.expand([\n ({\n 'step_log_return': wf_testcase.SAMPLE_STEP_METADATA,\n 'expected_step_metadata': wf_testcase.SAMPLE_STEP_METADATA\n },),\n ({\n 'step_log_return': wf_testcase.SAMPLE_STEP_METADATA,\n 'expected_step_metadata': wf_testcase.SAMPLE_STEP_METADATA\n },),\n ({\n 'step_log_return': None,\n 'expected_step_metadata': None\n },),\n ({\n 'step_log_return': None,\n 'expected_step_metadata': None\n },),\n ])\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadata(self, cases, mock_step_log):\n mock_step_log.return_value = cases['step_log_return']\n step_metadata = step_util.GetStepMetadata(123, 'step')\n self.assertEqual(cases['expected_step_metadata'], step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataPartialMatch(self, mock_step_log):\n step_util.GetStepMetadata(123, 'step', True)\n self.assertIn(True, mock_step_log.call_args[0])\n step_util.GetStepMetadata(123, 'step', False)\n self.assertIn(False, mock_step_log.call_args[0])\n\n @mock.patch.object(\n logdog_util, '_GetAnnotationsProtoForPath', return_value='step')\n @mock.patch.object(\n logdog_util, '_GetStreamForStep', return_value='log_stream')\n @mock.patch.object(\n logdog_util,\n 'GetStepLogLegacy',\n return_value=json.dumps(wf_testcase.SAMPLE_STEP_METADATA))\n @mock.patch.object(\n build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())\n def testLegacyGetStepMetadata(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,\n 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(\n build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')\n def testMalformattedNinjaInfo(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog(\n 'm', 'b', 123, 's', None, 'json.output[ninja_info]')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(\n build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())\n @mock.patch.object(\n logdog_util, '_GetAnnotationsProtoForPath', return_value=None)\n def testLegacyGetStepMetadataStepNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,\n 'step_metadata')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(\n build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())\n @mock.patch.object(\n logdog_util, '_GetAnnotationsProtoForPath', return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value=None)\n def testLegacyGetStepMetadataStreamNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,\n 'step_metadata')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(\n step_util,\n 'GetStepLogForLuciBuild',\n return_value=wf_testcase.SAMPLE_STEP_METADATA)\n @mock.patch.object(build_util, 'DownloadBuildData')\n def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):\n build = WfBuild.Create('m', 'b', 123)\n build.build_id = '8948240770002521488'\n build.put()\n mock_build.return_value = build\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,\n 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(\n build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())\n @mock.patch.object(\n logdog_util, '_GetAnnotationsProtoForPath', return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log1/nlog2')\n def testGetStepLogStdio(self, *_):\n self.assertEqual(\n 'log1/nlog2',\n step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None))\n\n @mock.patch.object(\n build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')\n @mock.patch.object(logging, 'error')\n def testGetStepLogNotJosonLoadable(self, mocked_log, *_):\n self.assertIsNone(\n step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,\n 'step_metadata'))\n mocked_log.assert_called_with(\n 'Failed to json load data for step_metadata. Data is: log.')\n\n @mock.patch.object(buildbucket_client, 'GetV2Build', return_value=None)\n def testGetStepLogForLuciBuildError(self, _):\n self.assertIsNone(step_util.GetStepLogForLuciBuild('87654321', 's', None))\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n @mock.patch.object(logdog_util, 'GetLogFromViewUrl')\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build, mock_get_log,\n _):\n build_id = '8945610992972640896'\n mock_log = common_pb2.Log()\n mock_log.name = 'step_metadata'\n mock_log.view_url = 'view_url'\n mock_step = Step()\n mock_step.name = 's'\n mock_step.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = int(build_id)\n mock_build.steps.extend([mock_step])\n mock_get_build.return_value = mock_build\n self.assertIsNone(\n step_util.GetStepLogForLuciBuild(build_id, 's', None, 'step_metadata'))\n self.assertFalse(mock_get_log.called)\n\n @mock.patch.object(\n step_util, '_ParseStepLogIfAppropriate', return_value='log')\n @mock.patch.object(logdog_util, 'GetLogFromViewUrl', return_value='log')\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n def testGetStepLogForLuciBuild(self, mock_get_build, mock_get_log, _):\n build_id = '8945610992972640896'\n mock_log = common_pb2.Log()\n mock_log.name = 'step_metadata'\n mock_log.view_url = 'view_url'\n mock_step = Step()\n mock_step.name = 's'\n mock_step.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = int(build_id)\n mock_build.steps.extend([mock_step])\n mock_get_build.return_value = mock_build\n self.assertEqual(\n 'log',\n step_util.GetStepLogForLuciBuild(build_id, 's', None, 'step_metadata'))\n mock_get_log.assert_called_once_with('view_url', None)\n\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n @mock.patch.object(step_util, 'GetStepLogFromBuildObject')\n def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):\n step_util.GetStepLogForLuciBuild('87654321', 's', None)\n self.assertIn(False, mock_log_from_build.call_args[0])\n step_util.GetStepLogForLuciBuild('87654321', 's', None, True)\n self.assertIn(True, mock_log_from_build.call_args[0])\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client')\n self.assertIn(False, mock_get_log_url.call_args[0])\n step_util.GetStepLogFromBuildObject(\n Build(), 'full_step_name', 'http_client', partial_match=True)\n self.assertIn(True, mock_get_log_url.call_args[0])\n\n def testGetStepLogViewUrlNoMatchingLog(self):\n build_id = 8945610992972640896\n mock_log = common_pb2.Log()\n mock_log.name = 'another_log'\n mock_log.view_url = 'view_url'\n mock_step1 = Step()\n mock_step1.name = 's1'\n mock_step1.logs.extend([mock_log])\n mock_step2 = Step()\n mock_step2.name = 's2'\n mock_step2.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = build_id\n mock_build.steps.extend([mock_step1, mock_step2])\n self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log'))\n\n @parameterized.expand([\n (True, 'step_name', 'view_url', 'view_url_partial_match'),\n (False, 'step_name', 'view_url', None),\n ])\n def testGetStepLogViewUrlPartialMatching(self, partial_match, full_step_name,\n expected_url_in_build1,\n expected_url_in_build2):\n mock_step1 = Step()\n mock_step1.name = 'step_name'\n mock_log1 = common_pb2.Log()\n mock_log1.name = 'log'\n mock_log1.view_url = 'view_url'\n mock_step1.logs.extend([mock_log1])\n\n mock_step2 = Step()\n mock_step2.name = 'step_name_longer'\n mock_log2 = common_pb2.Log()\n mock_log2.name = 'log'\n mock_log2.view_url = 'view_url_partial_match'\n mock_step2.logs.extend([mock_log2])\n\n mock_build1 = Build()\n mock_build1.steps.extend([mock_step1, mock_step2])\n self.assertEqual(\n expected_url_in_build1,\n step_util._GetStepLogViewUrl(\n mock_build1, full_step_name, 'log', partial_match=partial_match))\n\n mock_build2 = Build()\n mock_build2.steps.extend([mock_step2])\n self.assertEqual(\n expected_url_in_build2,\n step_util._GetStepLogViewUrl(\n mock_build2, full_step_name, 'log', partial_match=partial_match))\n\n @mock.patch.object(\n step_util,\n 'GetWaterfallBuildStepLog',\n return_value={'canonical_step_name': 'unsupported_step1'})\n def testStepIsSupportedForMaster(self, _):\n master_name = 'master1'\n builder_name = 'b'\n build_number = 123\n step_name = 'unsupported_step1 on master1'\n self.assertFalse(\n step_util.StepIsSupportedForMaster(master_name, builder_name,\n build_number, step_name))\n\n def testStepIsSupportedForMasterCompile(self):\n master_name = 'm'\n builder_name = 'b'\n build_number = 123\n step_name = 'compile'\n self.assertTrue(\n step_util.StepIsSupportedForMaster(master_name, builder_name,\n build_number, step_name))\n\n @mock.patch.object(step_util, 'GetWaterfallBuildStepLog')\n def testLegacyGetStepMetadataCached(self, mock_fn):\n mock_fn.side_effect = ['invalid', {'canonical_step_name': 'step_name'}]\n # Returns the invalid step_metadata but not cache it.\n self.assertEqual(\n 'invalid',\n step_util.LegacyGetStepMetadata('m', 'b', 201,\n 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n # Returns the valid step_metadata and cache it.\n self.assertEqual({\n 'canonical_step_name': 'step_name'\n }, step_util.LegacyGetStepMetadata('m', 'b', 201,\n 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({\n 'canonical_step_name': 'step_name'\n }, step_util.LegacyGetStepMetadata('m', 'b', 201,\n 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataCached(self, mock_fn, *_):\n mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]\n # Returns the invalid step_metadata but not cache it.\n self.assertEqual(None,\n step_util.GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n # Returns the valid step_metadata and cache it.\n self.assertEqual({\n 'canonical_step_name': 'step_name'\n }, step_util.GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({\n 'canonical_step_name': 'step_name'\n }, step_util.GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n\n @mock.patch.object(\n step_util,\n 'LegacyGetStepMetadata',\n return_value={'canonical_step_name': 'step_name'})\n def testLegacyGetCanonicalStep(self, _):\n self.assertEqual(\n 'step_name',\n step_util.LegacyGetCanonicalStepName('m', 'b', 200,\n 'step_name on a platform'))\n\n @parameterized.expand([({\n 'canonical_step_name': 'step_name'\n }, 'step_name'), (None, 'step_name'), ({\n 'a': 'b'\n }, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepName(self, step_metadata, expected_canonical_step,\n mocked_get_step):\n mocked_get_step.return_value = step_metadata\n self.assertEqual(\n expected_canonical_step,\n step_util.GetCanonicalStepName(123, 'step_name (with patch)'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):\n step_util.GetCanonicalStepName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetCanonicalStepName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @mock.patch.object(\n step_util,\n 'LegacyGetStepMetadata',\n return_value={'isolate_target_name': 'browser_tests'})\n def testLegacyGetIsolateTargetName(self, _):\n self.assertEqual(\n 'browser_tests',\n step_util.LegacyGetIsolateTargetName(\n 'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)\n def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):\n self.assertEqual(\n None,\n step_util.LegacyGetIsolateTargetName(\n 'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @mock.patch.object(\n step_util, 'LegacyGetStepMetadata', return_value={'a': 'b'})\n def testLegacyGetIsolateTargetNameIsolateTargetNameIsMissing(self, _):\n self.assertEqual(\n None,\n step_util.LegacyGetIsolateTargetName(\n 'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @parameterized.expand([({\n 'isolate_target_name': 'isolate_target'\n }, 'isolate_target'), (None, None), ({\n 'a': 'b'\n }, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetName(self, step_metadata, expected_isolate_target,\n mocked_get_stepmeta):\n mocked_get_stepmeta.return_value = step_metadata\n self.assertEqual(expected_isolate_target,\n step_util.GetIsolateTargetName(123, 'full step name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):\n step_util.GetIsolateTargetName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetIsolateTargetName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),\n (None, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOS(self, mock_fn_return, expected_platform, mock_fn):\n mock_fn.return_value = mock_fn_return\n self.assertEqual(expected_platform,\n step_util.GetOS(123, 'builder_name', 'step_name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOSPartialMatch(self, mock_get_step_metadata):\n step_util.GetOS(123, 'builder_name', 'step_name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetOS(123, 'builder_name', 'step_name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @mock.patch.object(\n step_util,\n 'GetStepMetadata',\n return_value=wf_testcase.SAMPLE_STEP_METADATA)\n def testGetOSCached(self, mock_fn):\n self.assertEqual('platform',\n step_util.GetOS(123, 'builder_name', 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n self.assertEqual('platform',\n step_util.GetOS(123, 'builder_name', 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n\n def testGetStepStartAndEndTime(self):\n build_id = '8945610992972640896'\n start_time = datetime.datetime(2019, 3, 6)\n end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)\n step = Step()\n step.name = 's'\n step.start_time.FromDatetime(start_time)\n step.end_time.FromDatetime(end_time)\n build = Build()\n build.id = int(build_id)\n build.steps.extend([step])\n\n self.assertEqual((start_time, end_time),\n step_util.GetStepStartAndEndTime(build, 's'))\n self.assertEqual((None, None), step_util.GetStepStartAndEndTime(\n build, 's2'))\n",
"step-ids": [
26,
32,
43,
49,
55
]
}
|
[
26,
32,
43,
49,
55
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('guac_auth', '0001_initial')]
operations = [migrations.RemoveField(model_name=
'guacamoleconnectiongroup', name='type'), migrations.
AlterUniqueTogether(name='guacamoleconnectiongrouppermission',
unique_together=set([])), migrations.AlterUniqueTogether(name=
'guacamoleconnectionpermission', unique_together=set([])),
migrations.AlterUniqueTogether(name='guacamolesystempermission',
unique_together=set([])), migrations.AlterUniqueTogether(name=
'guacamoleuserpermission', unique_together=set([])), migrations.
RemoveField(model_name='guacamoleconnectiongrouppermission', name=
'permission'), migrations.RemoveField(model_name=
'guacamoleconnectionpermission', name='permission'), migrations.
RemoveField(model_name='guacamolesystempermission', name=
'permission'), migrations.RemoveField(model_name=
'guacamoleuserpermission', name='permission')]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('guac_auth', '0001_initial')]
operations = [migrations.RemoveField(model_name=
'guacamoleconnectiongroup', name='type'), migrations.
AlterUniqueTogether(name='guacamoleconnectiongrouppermission',
unique_together=set([])), migrations.AlterUniqueTogether(name=
'guacamoleconnectionpermission', unique_together=set([])),
migrations.AlterUniqueTogether(name='guacamolesystempermission',
unique_together=set([])), migrations.AlterUniqueTogether(name=
'guacamoleuserpermission', unique_together=set([])), migrations.
RemoveField(model_name='guacamoleconnectiongrouppermission', name=
'permission'), migrations.RemoveField(model_name=
'guacamoleconnectionpermission', name='permission'), migrations.
RemoveField(model_name='guacamolesystempermission', name=
'permission'), migrations.RemoveField(model_name=
'guacamoleuserpermission', name='permission')]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('guac_auth', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='guacamoleconnectiongroup',
name='type',
),
migrations.AlterUniqueTogether(
name='guacamoleconnectiongrouppermission',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='guacamoleconnectionpermission',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='guacamolesystempermission',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='guacamoleuserpermission',
unique_together=set([]),
),
migrations.RemoveField(
model_name='guacamoleconnectiongrouppermission',
name='permission',
),
migrations.RemoveField(
model_name='guacamoleconnectionpermission',
name='permission',
),
migrations.RemoveField(
model_name='guacamolesystempermission',
name='permission',
),
migrations.RemoveField(
model_name='guacamoleuserpermission',
name='permission',
),
]
|
flexible
|
{
"blob_id": "7f63097265b1058785e90441f85b7f0088946717",
"index": 7785,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('guac_auth', '0001_initial')]\n operations = [migrations.RemoveField(model_name=\n 'guacamoleconnectiongroup', name='type'), migrations.\n AlterUniqueTogether(name='guacamoleconnectiongrouppermission',\n unique_together=set([])), migrations.AlterUniqueTogether(name=\n 'guacamoleconnectionpermission', unique_together=set([])),\n migrations.AlterUniqueTogether(name='guacamolesystempermission',\n unique_together=set([])), migrations.AlterUniqueTogether(name=\n 'guacamoleuserpermission', unique_together=set([])), migrations.\n RemoveField(model_name='guacamoleconnectiongrouppermission', name=\n 'permission'), migrations.RemoveField(model_name=\n 'guacamoleconnectionpermission', name='permission'), migrations.\n RemoveField(model_name='guacamolesystempermission', name=\n 'permission'), migrations.RemoveField(model_name=\n 'guacamoleuserpermission', name='permission')]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('guac_auth', '0001_initial')]\n operations = [migrations.RemoveField(model_name=\n 'guacamoleconnectiongroup', name='type'), migrations.\n AlterUniqueTogether(name='guacamoleconnectiongrouppermission',\n unique_together=set([])), migrations.AlterUniqueTogether(name=\n 'guacamoleconnectionpermission', unique_together=set([])),\n migrations.AlterUniqueTogether(name='guacamolesystempermission',\n unique_together=set([])), migrations.AlterUniqueTogether(name=\n 'guacamoleuserpermission', unique_together=set([])), migrations.\n RemoveField(model_name='guacamoleconnectiongrouppermission', name=\n 'permission'), migrations.RemoveField(model_name=\n 'guacamoleconnectionpermission', name='permission'), migrations.\n RemoveField(model_name='guacamolesystempermission', name=\n 'permission'), migrations.RemoveField(model_name=\n 'guacamoleuserpermission', name='permission')]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('guac_auth', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='guacamoleconnectiongroup',\n name='type',\n ),\n migrations.AlterUniqueTogether(\n name='guacamoleconnectiongrouppermission',\n unique_together=set([]),\n ),\n migrations.AlterUniqueTogether(\n name='guacamoleconnectionpermission',\n unique_together=set([]),\n ),\n migrations.AlterUniqueTogether(\n name='guacamolesystempermission',\n unique_together=set([]),\n ),\n migrations.AlterUniqueTogether(\n name='guacamoleuserpermission',\n unique_together=set([]),\n ),\n migrations.RemoveField(\n model_name='guacamoleconnectiongrouppermission',\n name='permission',\n ),\n migrations.RemoveField(\n model_name='guacamoleconnectionpermission',\n name='permission',\n ),\n migrations.RemoveField(\n model_name='guacamolesystempermission',\n name='permission',\n ),\n migrations.RemoveField(\n model_name='guacamoleuserpermission',\n name='permission',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ContentKind(models.Model):
<|reserved_special_token_0|>
def __str__(self):
return self.kind
class FileFormat(models.Model):
extension = models.CharField(primary_key=True, max_length=40, choices=
file_formats.choices)
mimetype = models.CharField(max_length=200, blank=True)
def __str__(self):
return self.extension
class FormatPreset(models.Model):
id = models.CharField(primary_key=True, max_length=150, choices=
format_presets.choices)
readable_name = models.CharField(max_length=400)
multi_language = models.BooleanField(default=False)
supplementary = models.BooleanField(default=False)
thumbnail = models.BooleanField(default=False)
subtitle = models.BooleanField(default=False)
display = models.BooleanField(default=True)
order = models.IntegerField(default=0)
kind = models.ForeignKey(ContentKind, related_name='format_presets',
null=True, on_delete=models.SET_NULL)
allowed_formats = models.ManyToManyField(FileFormat, blank=True)
def __str__(self):
return self.id
@classmethod
def guess_format_preset(cls, filename):
"""
Guess the format preset of a filename based on its extension.
Return None if format is unknown.
"""
_, ext = os.path.splitext(filename)
ext = ext.lstrip('.')
f = FormatPreset.objects.filter(allowed_formats__extension=ext,
display=True)
return f.first()
@classmethod
def get_preset(cls, preset_name):
"""
Get the FormatPreset object with that exact name.
Returns None if that format preset is not found.
"""
try:
return FormatPreset.objects.get(id=preset_name)
except FormatPreset.DoesNotExist:
return None
class Language(models.Model):
id = models.CharField(max_length=14, primary_key=True)
lang_code = models.CharField(max_length=3, db_index=True)
lang_subcode = models.CharField(max_length=10, db_index=True, blank=
True, null=True)
readable_name = models.CharField(max_length=100, blank=True)
native_name = models.CharField(max_length=100, blank=True)
lang_direction = models.CharField(max_length=3, choices=languages.
LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])
def ietf_name(self):
return '{code}-{subcode}'.format(code=self.lang_code, subcode=self.
lang_subcode) if self.lang_subcode else self.lang_code
def __str__(self):
return self.ietf_name()
<|reserved_special_token_0|>
class AssessmentItem(models.Model):
type = models.CharField(max_length=50, default='multiplechoice')
question = models.TextField(blank=True)
hints = models.TextField(default='[]')
answers = models.TextField(default='[]')
order = models.IntegerField(default=1)
contentnode = models.ForeignKey('ContentNode', related_name=
'assessment_items', blank=True, null=True, db_index=True, on_delete
=models.CASCADE)
assessment_id = UUIDField(primary_key=False, default=uuid.uuid4,
editable=False)
raw_data = models.TextField(blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
randomize = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
objects = CustomManager()
_field_updates = FieldTracker()
def has_changes(self):
return bool(self._field_updates.changed())
class Meta:
indexes = [models.Index(fields=['assessment_id'], name=
ASSESSMENT_ID_INDEX_NAME)]
unique_together = ['contentnode', 'assessment_id']
_permission_filter = Q(tree_id=OuterRef('contentnode__tree_id'))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.
exists(cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(public=Exists(Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef('contentnode__tree_id'
)).values('pk')))
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=
boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit
=edit_cte.exists(cls._permission_filter), view=view_cte.exists(
cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))
def on_create(self):
"""
When an exercise is added to a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def on_update(self):
"""
When an exercise is updated of a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def delete(self, *args, **kwargs):
"""
When an exercise is deleted from a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
return super(AssessmentItem, self).delete(*args, **kwargs)
class SlideshowSlide(models.Model):
contentnode = models.ForeignKey('ContentNode', related_name=
'slideshow_slides', blank=True, null=True, db_index=True, on_delete
=models.CASCADE)
sort_order = models.FloatField(default=1.0)
metadata = JSONField(default=dict)
class StagedFile(models.Model):
"""
Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit
"""
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='staged_files',
blank=True, null=True, on_delete=models.CASCADE)
<|reserved_special_token_0|>
class File(models.Model):
"""
The bottom layer of the contentDB schema, defines the basic building brick for content.
Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...
"""
id = UUIDField(primary_key=True, default=uuid.uuid4)
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
file_on_disk = models.FileField(upload_to=object_storage_name, storage=
default_storage, max_length=500, blank=True)
contentnode = models.ForeignKey(ContentNode, related_name='files',
blank=True, null=True, db_index=True, on_delete=models.CASCADE)
assessment_item = models.ForeignKey(AssessmentItem, related_name=
'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE
)
slideshow_slide = models.ForeignKey(SlideshowSlide, related_name=
'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE
)
file_format = models.ForeignKey(FileFormat, related_name='files', blank
=True, null=True, db_index=True, on_delete=models.SET_NULL)
preset = models.ForeignKey(FormatPreset, related_name='files', blank=
True, null=True, db_index=True, on_delete=models.SET_NULL)
language = models.ForeignKey(Language, related_name='files', blank=True,
null=True, on_delete=models.SET_NULL)
original_filename = models.CharField(max_length=255, blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='files', blank=True,
null=True, on_delete=models.SET_NULL)
modified = models.DateTimeField(auto_now=True, verbose_name='modified',
null=True)
duration = models.IntegerField(blank=True, null=True)
objects = CustomManager()
_permission_filter = Q(tree_id=OuterRef('contentnode__tree_id')) | Q(
tree_id=OuterRef('assessment_item__contentnode__tree_id'))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls.
_permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(edit=True) | Q(uploaded_by=user,
contentnode__isnull=True, assessment_item__isnull=True))
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(public=Exists(Channel.objects.filter(
public=True).filter(Q(main_tree__tree_id=OuterRef(
'contentnode__tree_id')) | Q(main_tree__tree_id=OuterRef(
'assessment_item__contentnode__tree_id'))).values('pk')))
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=
boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit
=edit_cte.exists(cls._permission_filter), view=view_cte.exists(
cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True) |
Q(uploaded_by=user, contentnode__isnull=True,
assessment_item__isnull=True))
class Admin:
pass
def __str__(self):
return '{checksum}{extension}'.format(checksum=self.checksum,
extension='.' + self.file_format.extension)
def filename(self):
"""
Returns just the filename of the File in storage, without the path
e.g. abcd.mp4
"""
return os.path.basename(self.file_on_disk.name)
def update_contentnode_content_id(self):
"""
If the file is attached to a contentnode and is not a thumbnail
then update that contentnode's content_id if it's a copied contentnode.
"""
if self.contentnode and self.preset.thumbnail is False:
self.contentnode.make_content_id_unique()
def on_update(self):
self.modified = timezone.now()
self.update_contentnode_content_id()
def save(self, set_by_file_on_disk=True, *args, **kwargs):
"""
Overrider the default save method.
If the file_on_disk FileField gets passed a content copy:
1. generate the MD5 from the content copy
2. fill the other fields accordingly
"""
from contentcuration.utils.user import calculate_user_storage
if self.file_format_id:
if self.file_format_id not in dict(file_formats.choices):
raise ValidationError('Invalid file_format')
if set_by_file_on_disk and self.file_on_disk:
if self.checksum is None or self.checksum == '':
md5 = hashlib.md5()
for chunk in self.file_on_disk.chunks():
md5.update(chunk)
self.checksum = md5.hexdigest()
if not self.file_size:
self.file_size = self.file_on_disk.size
if not self.file_format_id:
ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')
if ext in list(dict(file_formats.choices).keys()):
self.file_format_id = ext
else:
raise ValueError('Files of type `{}` are not supported.'
.format(ext))
super(File, self).save(*args, **kwargs)
if self.uploaded_by_id:
calculate_user_storage(self.uploaded_by_id)
class Meta:
indexes = [models.Index(fields=['checksum', 'file_size'], name=
FILE_DISTINCT_INDEX_NAME), models.Index(fields=['-modified'],
name=FILE_MODIFIED_DESC_INDEX_NAME)]
constraints = [models.CheckConstraint(check=Q(preset__in=
MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True), name
=FILE_DURATION_CONSTRAINT)]
<|reserved_special_token_0|>
class PrerequisiteContentRelationship(models.Model):
"""
Predefine the prerequisite relationship between two ContentNode objects.
"""
target_node = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)
prerequisite = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)
class Meta:
unique_together = ['target_node', 'prerequisite']
def clean(self, *args, **kwargs):
if self.target_node == self.prerequisite:
raise IntegrityError('Cannot self reference as prerequisite.')
if PrerequisiteContentRelationship.objects.using(self._state.db
).filter(target_node=self.prerequisite, prerequisite=self.
target_node):
raise IntegrityError(
'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'
% (self.target_node, self.prerequisite))
super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
self.full_clean()
super(PrerequisiteContentRelationship, self).save(*args, **kwargs)
def __unicode__(self):
return u'%s' % self.pk
class RelatedContentRelationship(models.Model):
"""
Predefine the related relationship between two ContentNode objects.
"""
contentnode_1 = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_1', on_delete=models.CASCADE)
contentnode_2 = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_2', on_delete=models.CASCADE)
class Meta:
unique_together = ['contentnode_1', 'contentnode_2']
def save(self, *args, **kwargs):
if self.contentnode_1 == self.contentnode_2:
raise IntegrityError('Cannot self reference as related.')
if RelatedContentRelationship.objects.using(self._state.db).filter(
contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1
):
return
super(RelatedContentRelationship, self).save(*args, **kwargs)
class Invitation(models.Model):
""" Invitation to edit channel """
id = UUIDField(primary_key=True, default=uuid.uuid4)
accepted = models.BooleanField(default=False)
declined = models.BooleanField(default=False)
revoked = models.BooleanField(default=False)
invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.
SET_NULL, null=True, related_name='sent_to')
share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)
email = models.EmailField(max_length=100, null=True)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=
'sent_by', null=True, on_delete=models.CASCADE)
channel = models.ForeignKey('Channel', null=True, related_name=
'pending_editors', on_delete=models.CASCADE)
first_name = models.CharField(max_length=100, blank=True)
last_name = models.CharField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = 'Invitation'
verbose_name_plural = 'Invitations'
def accept(self):
user = User.objects.filter(email__iexact=self.email).first()
if self.channel:
if self.share_mode == VIEW_ACCESS:
self.channel.editors.remove(user)
self.channel.viewers.add(user)
else:
self.channel.viewers.remove(user)
self.channel.editors.add(user)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |
Q(channel__editors=user)).distinct()
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |
Q(channel__editors=user) | Q(channel__viewers=user)).distinct()
class Change(models.Model):
server_rev = models.BigAutoField(primary_key=True)
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
blank=True, on_delete=models.SET_NULL, related_name='changes_by_user')
channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=
models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=
True, on_delete=models.CASCADE, related_name='changes_about_user')
client_rev = models.IntegerField(null=True, blank=True)
session = models.ForeignKey(Session, null=True, blank=True, on_delete=
models.SET_NULL)
table = models.CharField(max_length=32)
change_type = models.IntegerField()
kwargs = JSONField(encoder=JSONEncoder)
applied = models.BooleanField(default=False)
errored = models.BooleanField(default=False)
@classmethod
def _create_from_change(cls, created_by_id=None, channel_id=None,
user_id=None, session_key=None, applied=False, table=None, rev=None,
**data):
change_type = data.pop('type')
if table is None or table not in ALL_TABLES:
raise TypeError(
'table is a required argument for creating changes and must be a valid table name'
)
if change_type is None or change_type not in ALL_CHANGES:
raise TypeError(
'change_type is a required argument for creating changes and must be a valid change type integer'
)
return cls(session_id=session_key, created_by_id=created_by_id,
channel_id=channel_id, user_id=user_id, client_rev=rev, table=
table, change_type=change_type, kwargs=data, applied=applied)
@classmethod
def create_changes(cls, changes, created_by_id=None, session_key=None,
applied=False):
change_models = []
for change in changes:
change_models.append(cls._create_from_change(created_by_id=
created_by_id, session_key=session_key, applied=applied, **
change))
cls.objects.bulk_create(change_models)
return change_models
@classmethod
def create_change(cls, change, created_by_id=None, session_key=None,
applied=False):
obj = cls._create_from_change(created_by_id=created_by_id,
session_key=session_key, applied=applied, **change)
obj.save()
return obj
@classmethod
def serialize(cls, change):
datum = get_attribute(change, ['kwargs']).copy()
datum.update({'server_rev': get_attribute(change, ['server_rev']),
'table': get_attribute(change, ['table']), 'type':
get_attribute(change, ['change_type']), 'channel_id':
get_attribute(change, ['channel_id']), 'user_id': get_attribute
(change, ['user_id']), 'created_by_id': get_attribute(change, [
'created_by_id'])})
return datum
def serialize_to_change_dict(self):
return self.serialize(self)
class TaskResultCustom(object):
"""
Custom fields to add to django_celery_results's TaskResult model
If adding fields to this class, run `makemigrations` then move the generated migration from the
`django_celery_results` app to the `contentcuration` app and override the constructor to change
the app_label. See `0141_add_task_signature` for an example
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='tasks',
on_delete=models.CASCADE, null=True)
channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)
progress = models.IntegerField(null=True, blank=True, validators=[
MinValueValidator(0), MaxValueValidator(100)])
signature = models.CharField(null=True, blank=False, max_length=32)
super_as_dict = TaskResult.as_dict
def as_dict(self):
"""
:return: A dictionary representation
"""
super_dict = self.super_as_dict()
super_dict.update(user_id=self.user_id, channel_id=self.channel_id,
progress=self.progress)
return super_dict
@classmethod
def contribute_to_class(cls, model_class=TaskResult):
"""
Adds fields to model, by default TaskResult
:param model_class: TaskResult model
"""
for field in dir(cls):
if not field.startswith('_') and field not in (
'contribute_to_class', 'Meta'):
model_class.add_to_class(field, getattr(cls, field))
setattr(model_class._meta, 'indexes', getattr(model_class._meta,
'indexes', []) + cls.Meta.indexes)
class Meta:
indexes = [models.Index(fields=['signature'], name=
'task_result_signature_idx', condition=Q(status__in=
celery_states.UNREADY_STATES))]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class License(models.Model):
<|reserved_special_token_0|>
license_name = models.CharField(max_length=50)
license_url = models.URLField(blank=True)
license_description = models.TextField(blank=True)
copyright_holder_required = models.BooleanField(default=True)
is_custom = models.BooleanField(default=False)
exists = models.BooleanField(default=False, verbose_name=
'license exists', help_text=
'Tells whether or not a content item is licensed to share')
@classmethod
def validate_name(cls, name):
if cls.objects.filter(license_name=name).count() == 0:
raise ValidationError('License `{}` does not exist'.format(name))
def __str__(self):
return self.license_name
<|reserved_special_token_0|>
class ContentNode(MPTTModel, models.Model):
"""
By default, all nodes have a title and can be used as a topic.
"""
id = UUIDField(primary_key=True, default=uuid.uuid4)
content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=
False, db_index=True)
node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
original_channel_id = UUIDField(primary_key=False, editable=False, null
=True, db_index=True)
source_channel_id = UUIDField(primary_key=False, editable=False, null=True)
original_source_node_id = UUIDField(primary_key=False, editable=False,
null=True, db_index=True)
source_node_id = UUIDField(primary_key=False, editable=False, null=True)
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
title = models.CharField(max_length=200, blank=True)
description = models.TextField(blank=True)
kind = models.ForeignKey('ContentKind', related_name='contentnodes',
db_index=True, null=True, blank=True, on_delete=models.SET_NULL)
license = models.ForeignKey('License', null=True, blank=True, on_delete
=models.SET_NULL)
license_description = models.CharField(max_length=400, null=True, blank
=True)
prerequisite = models.ManyToManyField('self', related_name=
'is_prerequisite_of', through='PrerequisiteContentRelationship',
symmetrical=False, blank=True)
is_related = models.ManyToManyField('self', related_name='relate_to',
through='RelatedContentRelationship', symmetrical=False, blank=True)
language = models.ForeignKey('Language', null=True, blank=True,
related_name='content_language', on_delete=models.SET_NULL)
parent = TreeForeignKey('self', null=True, blank=True, related_name=
'children', db_index=True, on_delete=models.CASCADE)
tags = models.ManyToManyField(ContentTag, symmetrical=False,
related_name='tagged_content', blank=True)
sort_order = models.FloatField(max_length=50, default=1, verbose_name=
'sort order', help_text='Ascending, lowest number shown first')
copyright_holder = models.CharField(max_length=200, null=True, blank=
True, default='', help_text=
'Organization of person who holds the essential rights')
original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=
True, blank=True, related_name='duplicates')
cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=
True, blank=True, related_name='clones')
thumbnail_encoding = models.TextField(blank=True, null=True)
created = models.DateTimeField(default=timezone.now, verbose_name='created'
)
modified = models.DateTimeField(auto_now=True, verbose_name='modified')
published = models.BooleanField(default=False)
publishing = models.BooleanField(default=False)
complete = models.BooleanField(null=True)
changed = models.BooleanField(default=True)
"""
Extra fields for exercises:
- type: mastery model to use to determine completion
- m: m value for M out of N mastery criteria
- n: n value for M out of N mastery criteria
"""
extra_fields = JSONField(default=dict, blank=True, null=True)
author = models.CharField(max_length=200, blank=True, default='',
help_text='Who created this content?', null=True)
aggregator = models.CharField(max_length=200, blank=True, default='',
help_text='Who gathered this content together?', null=True)
provider = models.CharField(max_length=200, blank=True, default='',
help_text='Who distributed this content?', null=True)
role_visibility = models.CharField(max_length=50, choices=roles.choices,
default=roles.LEARNER)
freeze_authoring_data = models.BooleanField(default=False)
grade_levels = models.JSONField(blank=True, null=True)
resource_types = models.JSONField(blank=True, null=True)
learning_activities = models.JSONField(blank=True, null=True)
accessibility_labels = models.JSONField(blank=True, null=True)
categories = models.JSONField(blank=True, null=True)
learner_needs = models.JSONField(blank=True, null=True)
suggested_duration = models.IntegerField(blank=True, null=True,
help_text='Suggested duration for the content node (in seconds)')
objects = CustomContentNodeTreeManager()
_field_updates = FieldTracker()
_permission_filter = Q(tree_id=OuterRef('tree_id'))
@classmethod
def _annotate_channel_id(cls, queryset):
return queryset.annotate(channel_id=Subquery(Channel.objects.filter
(main_tree__tree_id=OuterRef('tree_id')).values_list('id', flat
=True)[:1]))
@classmethod
def filter_by_pk(cls, pk):
"""
When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always
returns a queryset filtered by pk.
When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode
for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If
a ContentNode does not exist for `pk` then an empty queryset is returned.
"""
query = ContentNode.objects.filter(pk=pk)
if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:
tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))
if tree_id:
query = query.filter(tree_id=tree_id)
else:
tree_id = ContentNode.objects.filter(pk=pk).values_list(
'tree_id', flat=True).first()
if tree_id:
cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk),
tree_id, None)
query = query.filter(tree_id=tree_id)
else:
query = query.none()
return query
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.
exists(cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(public=Exists(Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef('tree_id')).values('pk')))
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=
boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit
=edit_cte.exists(cls._permission_filter), view=view_cte.exists(
cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))
@raise_if_unsaved
def get_root(self):
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return super(ContentNode, self).get_root()
@raise_if_unsaved
def get_root_id(self):
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return ContentNode.objects.values_list('pk', flat=True).get(tree_id
=self._mpttfield('tree_id'), parent=None)
def get_tree_data(self, levels=float('inf')):
"""
Returns `levels`-deep tree information starting at current node.
Args:
levels (int): depth of tree hierarchy to return
Returns:
tree (dict): starting with self, with children list containing either
the just the children's `node_id`s or full recusive tree.
"""
if self.kind_id == content_kinds.TOPIC:
node_data = {'title': self.title, 'kind': self.kind_id,
'node_id': self.node_id, 'studio_id': self.id}
children = self.children.all()
if levels > 0:
node_data['children'] = [c.get_tree_data(levels=levels - 1) for
c in children]
return node_data
if self.kind_id == content_kinds.EXERCISE:
return {'title': self.title, 'kind': self.kind_id, 'count':
self.assessment_items.count(), 'node_id': self.node_id,
'studio_id': self.id}
return {'title': self.title, 'kind': self.kind_id, 'file_size':
self.files.values('file_size').aggregate(size=Sum('file_size'))
['size'], 'node_id': self.node_id, 'studio_id': self.id}
def get_original_node(self):
original_node = self.original_node or self
if self.original_channel_id and self.original_source_node_id:
original_tree_id = Channel.objects.select_related('main_tree').get(
pk=self.original_channel_id).main_tree.tree_id
original_node = ContentNode.objects.filter(tree_id=
original_tree_id, node_id=self.original_source_node_id).first(
) or ContentNode.objects.filter(tree_id=original_tree_id,
content_id=self.content_id).first() or self
return original_node
def get_associated_presets(self):
key = 'associated_presets_{}'.format(self.kind_id)
cached_data = cache.get(key)
if cached_data:
return cached_data
presets = list(FormatPreset.objects.filter(kind=self.kind).values())
cache.set(key, presets, None)
return presets
def get_prerequisites(self):
prerequisite_mapping = {}
prerequisites = self.prerequisite.all()
prereqlist = list(prerequisites)
for prereq in prerequisites:
prlist, prereqmapping = prereq.get_prerequisites()
prerequisite_mapping.update({prereq.pk: prereqmapping})
prereqlist.extend(prlist)
return prereqlist, prerequisite_mapping
def get_postrequisites(self):
postrequisite_mapping = {}
postrequisites = self.is_prerequisite_of.all()
postreqlist = list(postrequisites)
for postreq in postrequisites:
prlist, postreqmapping = postreq.get_postrequisites()
postrequisite_mapping.update({postreq.pk: postreqmapping})
postreqlist.extend(prlist)
return postreqlist, postrequisite_mapping
def get_channel_id(self):
if hasattr(self, 'channel_id'):
return self.channel_id
channel = self.get_channel()
if channel:
return channel.id
return None
def get_channel(self):
try:
root = self.get_root()
if not root:
return None
return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=
root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(
previous_tree=root)).first()
except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):
return None
def get_thumbnail(self):
if self.thumbnail_encoding:
thumbnail_data = load_json_string(self.thumbnail_encoding)
if type(thumbnail_data) is dict and thumbnail_data.get('base64'):
return thumbnail_data['base64']
thumbnail = self.files.filter(preset__thumbnail=True).first()
if thumbnail:
return generate_storage_url(str(thumbnail))
return ''
@classmethod
def get_nodes_with_title(cls, title, limit_to_children_of=None):
"""
Returns all ContentNodes with a given title. If limit_to_children_of
is passed in with an id, only look at all the children of the node with that id.
"""
if limit_to_children_of:
root = cls.objects.get(id=limit_to_children_of)
return root.get_descendants().filter(title=title)
return cls.objects.filter(title=title)
def get_details(self, channel_id=None):
"""
Returns information about the node and its children, including total size, languages, files, etc.
:return: A dictionary with detailed statistics and information about the node.
"""
from contentcuration.viewsets.common import SQArrayAgg
from contentcuration.viewsets.common import SQCount
from contentcuration.viewsets.common import SQRelatedArrayAgg
from contentcuration.viewsets.common import SQSum
from contentcuration.viewsets.common import SQJSONBKeyArrayAgg
node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id
).order_by()
descendants = self.get_descendants().values('id')
if channel_id:
channel = Channel.objects.filter(id=channel_id)[0]
else:
channel = self.get_channel()
if not descendants.exists():
data = {'last_update': pytz.utc.localize(datetime.now()).
strftime(settings.DATE_TIME_FORMAT), 'created': self.
created.strftime(settings.DATE_TIME_FORMAT),
'resource_count': 0, 'resource_size': 0, 'includes': {
'coach_content': 0, 'exercises': 0}, 'kind_count': [],
'languages': [], 'accessible_languages': [], 'licenses': [],
'tags': [], 'copyright_holders': [], 'authors': [],
'aggregators': [], 'providers': [], 'sample_pathway': [],
'original_channels': [], 'sample_nodes': [], 'levels': [],
'categories': []}
cache.set('details_{}'.format(self.node_id), json.dumps(data), None
)
return data
resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()
nodes = With(File.objects.filter(contentnode_id__in=Subquery(
resources.values('id'))).values('checksum', 'file_size').
order_by(), name='nodes')
file_query = nodes.queryset().with_cte(nodes).values('checksum',
'file_size').distinct()
l_nodes = With(File.objects.filter(contentnode_id__in=Subquery(
resources.values('id'))).values('language_id', 'preset_id').
order_by(), name='l_nodes')
accessible_languages_query = l_nodes.queryset().filter(preset_id=
format_presets.VIDEO_SUBTITLE).with_cte(l_nodes).values(
'language__native_name').distinct()
tags_query = str(ContentTag.objects.filter(tagged_content__pk__in=
descendants.values_list('pk', flat=True)).values('tag_name').
annotate(count=Count('tag_name')).query).replace('topic', "'topic'"
)
kind_count_query = str(resources.values('kind_id').annotate(count=
Count('kind_id')).query).replace('topic', "'topic'")
node = node.annotate(resource_count=SQCount(resources, field='id'),
resource_size=SQSum(file_query, field='file_size'),
copyright_holders=SQArrayAgg(resources.distinct(
'copyright_holder').order_by('copyright_holder'), field=
'copyright_holder'), authors=SQArrayAgg(resources.distinct(
'author').order_by('author'), field='author'), aggregators=
SQArrayAgg(resources.distinct('aggregator').order_by(
'aggregator'), field='aggregator'), providers=SQArrayAgg(
resources.distinct('provider').order_by('provider'), field=
'provider'), languages=SQRelatedArrayAgg(descendants.exclude(
language=None).distinct('language__native_name').order_by(),
field='language__native_name', fieldname='native_name'),
accessible_languages=SQRelatedArrayAgg(
accessible_languages_query, field='language__native_name',
fieldname='native_name'), licenses=SQRelatedArrayAgg(resources.
exclude(license=None).distinct('license__license_name').
order_by('license__license_name'), field=
'license__license_name', fieldname='license_name'), kind_count=
RawSQL('SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format
(kind_count_query), ()), tags_list=RawSQL(
'SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format(
tags_query), ()), coach_content=SQCount(resources.filter(
role_visibility=roles.COACH), field='id'), exercises=SQCount(
resources.filter(kind_id=content_kinds.EXERCISE), field='id'),
levels=SQJSONBKeyArrayAgg(descendants.exclude(
grade_levels__isnull=True), field='grade_levels'),
all_categories=SQJSONBKeyArrayAgg(descendants.exclude(
categories__isnull=True), field='categories'))
max_level = max(resources.values_list('level', flat=True).order_by(
).distinct() or [0])
m_nodes = With(resources.values('id', 'level', 'tree_id', 'lft').
order_by(), name='m_nodes')
deepest_node_record = m_nodes.queryset().with_cte(m_nodes).filter(level
=max_level).values('id').order_by('tree_id', 'lft').first()
if deepest_node_record:
deepest_node = ContentNode.objects.get(pk=deepest_node_record['id']
)
pathway = list(deepest_node.get_ancestors().order_by().exclude(
parent=None).values('title', 'node_id', 'kind_id').order_by()
) if deepest_node_record else []
sample_nodes = [{'node_id': n.node_id, 'title': n.title,
'description': n.description, 'thumbnail': n.get_thumbnail(),
'kind': n.kind_id} for n in deepest_node.get_siblings(
include_self=True)[0:4]] if deepest_node_record else []
channel_id = channel and channel.id
originals = resources.values('original_channel_id').annotate(count=
Count('original_channel_id')).order_by('original_channel_id')
originals = {c['original_channel_id']: c['count'] for c in originals}
original_channels = Channel.objects.exclude(pk=channel_id).filter(
pk__in=originals.keys(), deleted=False).order_by()
original_channels = [{'id': c.id, 'name': '{}{}'.format(c.name, _(
' (Original)') if channel_id == c.id else ''), 'thumbnail': c.
get_thumbnail(), 'count': originals[c.id]} for c in
original_channels]
node = node.order_by().values('id', 'resource_count',
'resource_size', 'copyright_holders', 'authors', 'aggregators',
'providers', 'languages', 'accessible_languages',
'coach_content', 'licenses', 'tags_list', 'kind_count',
'exercises', 'levels', 'all_categories').first()
for_educators = {'coach_content': node['coach_content'],
'exercises': node['exercises']}
data = {'last_update': pytz.utc.localize(datetime.now()).strftime(
settings.DATE_TIME_FORMAT), 'created': self.created.strftime(
settings.DATE_TIME_FORMAT), 'resource_count': node.get(
'resource_count', 0), 'resource_size': node.get('resource_size',
0), 'includes': for_educators, 'kind_count': node.get(
'kind_count') or [], 'languages': node.get('languages') or [],
'accessible_languages': node.get('accessible_languages') or [],
'licenses': node.get('licenses') or [], 'tags': node.get(
'tags_list') or [], 'original_channels': original_channels,
'sample_pathway': pathway, 'sample_nodes': sample_nodes,
'authors': list(filter(bool, node['authors'])), 'aggregators':
list(filter(bool, node['aggregators'])), 'providers': list(
filter(bool, node['providers'])), 'copyright_holders': list(
filter(bool, node['copyright_holders'])), 'levels': node.get(
'levels') or [], 'categories': node.get('all_categories') or []}
cache.set('details_{}'.format(self.node_id), json.dumps(data), None)
return data
def has_changes(self):
mptt_opts = self._mptt_meta
blacklist = set(['changed', 'modified', 'publishing', mptt_opts.
tree_id_attr, mptt_opts.left_attr, mptt_opts.right_attr,
mptt_opts.level_attr])
original_values = self._field_updates.changed()
return any(True for field in original_values if field not in blacklist)
def recalculate_editors_storage(self):
from contentcuration.utils.user import calculate_user_storage
for editor in self.files.values_list('uploaded_by_id', flat=True
).distinct():
calculate_user_storage(editor)
def mark_complete(self):
errors = []
if not (bool(self.title) or self.parent_id is None):
errors.append('Empty title')
if self.kind_id != content_kinds.TOPIC:
if not self.license:
errors.append('Missing license')
if (self.license and self.license.is_custom and not self.
license_description):
errors.append('Missing license description for custom license')
if (self.license and self.license.copyright_holder_required and
not self.copyright_holder):
errors.append('Missing required copyright holder')
if (self.kind_id != content_kinds.EXERCISE and not self.files.
filter(preset__supplementary=False).exists()):
errors.append('Missing default file')
if self.kind_id == content_kinds.EXERCISE:
if not self.assessment_items.filter(~Q(raw_data='') | ~Q(
question='') & ~Q(answers='[]') & (Q(type=exercises.
INPUT_QUESTION) | Q(answers__iregex='"correct":\\s*true'))
).exists():
errors.append(
'No questions with question text and complete answers')
criterion = self.extra_fields.get('options', {}).get(
'completion_criteria')
if not (self.extra_fields.get('mastery_model') or criterion):
errors.append('Missing mastery criterion')
if criterion:
try:
completion_criteria.validate(criterion, kind=
content_kinds.EXERCISE)
except completion_criteria.ValidationError:
errors.append(
'Mastery criterion is defined but is invalid')
self.complete = not errors
return errors
def make_content_id_unique(self):
"""
If self is NOT an original contentnode (in other words, a copied contentnode)
and a contentnode with same content_id exists then we update self's content_id.
"""
is_node_original = (self.original_source_node_id is None or self.
original_source_node_id == self.node_id)
node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(
content_id=self.content_id)
if not is_node_original and node_same_content_id.exists():
ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.
uuid4().hex)
def on_create(self):
self.changed = True
self.recalculate_editors_storage()
self.set_default_learning_activity()
def on_update(self):
self.changed = self.changed or self.has_changes()
def move_to(self, target, *args, **kwargs):
parent_was_trashtree = self.parent.channel_trash.exists()
super(ContentNode, self).move_to(target, *args, **kwargs)
self.save()
cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.
tree_id, None)
if target.channel_trash.exists() or parent_was_trashtree:
self.recalculate_editors_storage()
def set_default_learning_activity(self):
if self.learning_activities is None:
if self.kind in kind_activity_map:
self.learning_activities = {kind_activity_map[self.kind]: True}
def save(self, skip_lock=False, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
old_parent_id = self._field_updates.changed().get('parent_id')
if self._state.adding and (self.parent_id or self.parent):
same_order = False
elif old_parent_id is DeferredAttribute:
same_order = True
else:
same_order = old_parent_id == self.parent_id
if not same_order:
changed_ids = list(filter(lambda x: x is not None, set([
old_parent_id, self.parent_id])))
else:
changed_ids = []
if not same_order and not skip_lock:
with ContentNode.objects.lock_mptt(*ContentNode.objects.filter(
id__in=[pid for pid in [old_parent_id, self.parent_id] if
pid]).values_list('tree_id', flat=True).distinct()):
super(ContentNode, self).save(*args, **kwargs)
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(
changed=True)
else:
super(ContentNode, self).save(*args, **kwargs)
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(changed
=True)
save.alters_data = True
def delete(self, *args, **kwargs):
parent = self.parent or self._field_updates.changed().get('parent')
if parent:
parent.changed = True
parent.save()
self.recalculate_editors_storage()
with ContentNode.objects.lock_mptt(self.tree_id):
return super(ContentNode, self).delete(*args, **kwargs)
delete.alters_data = True
def copy_to(self, target=None, position='last-child', pk=None, mods=
None, excluded_descendants=None, can_edit_source_channel=None,
batch_size=None, progress_tracker=None):
return self._tree_manager.copy_node(self, target, position, pk,
mods, excluded_descendants, can_edit_source_channel, batch_size,
progress_tracker)[0]
def copy(self):
return self.copy_to()
def is_publishable(self):
return self.complete and self.get_descendants(include_self=True
).exclude(kind_id=content_kinds.TOPIC).exists()
class Meta:
verbose_name = 'Topic'
verbose_name_plural = 'Topics'
indexes = [models.Index(fields=['node_id'], name=NODE_ID_INDEX_NAME
), models.Index(fields=['-modified'], name=
NODE_MODIFIED_DESC_INDEX_NAME)]
class ContentKind(models.Model):
kind = models.CharField(primary_key=True, max_length=200, choices=
content_kinds.choices)
def __str__(self):
return self.kind
class FileFormat(models.Model):
extension = models.CharField(primary_key=True, max_length=40, choices=
file_formats.choices)
mimetype = models.CharField(max_length=200, blank=True)
def __str__(self):
return self.extension
class FormatPreset(models.Model):
id = models.CharField(primary_key=True, max_length=150, choices=
format_presets.choices)
readable_name = models.CharField(max_length=400)
multi_language = models.BooleanField(default=False)
supplementary = models.BooleanField(default=False)
thumbnail = models.BooleanField(default=False)
subtitle = models.BooleanField(default=False)
display = models.BooleanField(default=True)
order = models.IntegerField(default=0)
kind = models.ForeignKey(ContentKind, related_name='format_presets',
null=True, on_delete=models.SET_NULL)
allowed_formats = models.ManyToManyField(FileFormat, blank=True)
def __str__(self):
return self.id
@classmethod
def guess_format_preset(cls, filename):
"""
Guess the format preset of a filename based on its extension.
Return None if format is unknown.
"""
_, ext = os.path.splitext(filename)
ext = ext.lstrip('.')
f = FormatPreset.objects.filter(allowed_formats__extension=ext,
display=True)
return f.first()
@classmethod
def get_preset(cls, preset_name):
"""
Get the FormatPreset object with that exact name.
Returns None if that format preset is not found.
"""
try:
return FormatPreset.objects.get(id=preset_name)
except FormatPreset.DoesNotExist:
return None
class Language(models.Model):
id = models.CharField(max_length=14, primary_key=True)
lang_code = models.CharField(max_length=3, db_index=True)
lang_subcode = models.CharField(max_length=10, db_index=True, blank=
True, null=True)
readable_name = models.CharField(max_length=100, blank=True)
native_name = models.CharField(max_length=100, blank=True)
lang_direction = models.CharField(max_length=3, choices=languages.
LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])
def ietf_name(self):
return '{code}-{subcode}'.format(code=self.lang_code, subcode=self.
lang_subcode) if self.lang_subcode else self.lang_code
def __str__(self):
return self.ietf_name()
<|reserved_special_token_0|>
class AssessmentItem(models.Model):
type = models.CharField(max_length=50, default='multiplechoice')
question = models.TextField(blank=True)
hints = models.TextField(default='[]')
answers = models.TextField(default='[]')
order = models.IntegerField(default=1)
contentnode = models.ForeignKey('ContentNode', related_name=
'assessment_items', blank=True, null=True, db_index=True, on_delete
=models.CASCADE)
assessment_id = UUIDField(primary_key=False, default=uuid.uuid4,
editable=False)
raw_data = models.TextField(blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
randomize = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
objects = CustomManager()
_field_updates = FieldTracker()
def has_changes(self):
return bool(self._field_updates.changed())
class Meta:
indexes = [models.Index(fields=['assessment_id'], name=
ASSESSMENT_ID_INDEX_NAME)]
unique_together = ['contentnode', 'assessment_id']
_permission_filter = Q(tree_id=OuterRef('contentnode__tree_id'))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.
exists(cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(public=Exists(Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef('contentnode__tree_id'
)).values('pk')))
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=
boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit
=edit_cte.exists(cls._permission_filter), view=view_cte.exists(
cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))
def on_create(self):
"""
When an exercise is added to a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def on_update(self):
"""
When an exercise is updated of a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def delete(self, *args, **kwargs):
"""
When an exercise is deleted from a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
return super(AssessmentItem, self).delete(*args, **kwargs)
class SlideshowSlide(models.Model):
contentnode = models.ForeignKey('ContentNode', related_name=
'slideshow_slides', blank=True, null=True, db_index=True, on_delete
=models.CASCADE)
sort_order = models.FloatField(default=1.0)
metadata = JSONField(default=dict)
class StagedFile(models.Model):
"""
Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit
"""
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='staged_files',
blank=True, null=True, on_delete=models.CASCADE)
<|reserved_special_token_0|>
class File(models.Model):
"""
The bottom layer of the contentDB schema, defines the basic building brick for content.
Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...
"""
id = UUIDField(primary_key=True, default=uuid.uuid4)
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
file_on_disk = models.FileField(upload_to=object_storage_name, storage=
default_storage, max_length=500, blank=True)
contentnode = models.ForeignKey(ContentNode, related_name='files',
blank=True, null=True, db_index=True, on_delete=models.CASCADE)
assessment_item = models.ForeignKey(AssessmentItem, related_name=
'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE
)
slideshow_slide = models.ForeignKey(SlideshowSlide, related_name=
'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE
)
file_format = models.ForeignKey(FileFormat, related_name='files', blank
=True, null=True, db_index=True, on_delete=models.SET_NULL)
preset = models.ForeignKey(FormatPreset, related_name='files', blank=
True, null=True, db_index=True, on_delete=models.SET_NULL)
language = models.ForeignKey(Language, related_name='files', blank=True,
null=True, on_delete=models.SET_NULL)
original_filename = models.CharField(max_length=255, blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='files', blank=True,
null=True, on_delete=models.SET_NULL)
modified = models.DateTimeField(auto_now=True, verbose_name='modified',
null=True)
duration = models.IntegerField(blank=True, null=True)
objects = CustomManager()
_permission_filter = Q(tree_id=OuterRef('contentnode__tree_id')) | Q(
tree_id=OuterRef('assessment_item__contentnode__tree_id'))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls.
_permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(edit=True) | Q(uploaded_by=user,
contentnode__isnull=True, assessment_item__isnull=True))
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(public=Exists(Channel.objects.filter(
public=True).filter(Q(main_tree__tree_id=OuterRef(
'contentnode__tree_id')) | Q(main_tree__tree_id=OuterRef(
'assessment_item__contentnode__tree_id'))).values('pk')))
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=
boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit
=edit_cte.exists(cls._permission_filter), view=view_cte.exists(
cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True) |
Q(uploaded_by=user, contentnode__isnull=True,
assessment_item__isnull=True))
class Admin:
pass
def __str__(self):
return '{checksum}{extension}'.format(checksum=self.checksum,
extension='.' + self.file_format.extension)
def filename(self):
"""
Returns just the filename of the File in storage, without the path
e.g. abcd.mp4
"""
return os.path.basename(self.file_on_disk.name)
def update_contentnode_content_id(self):
"""
If the file is attached to a contentnode and is not a thumbnail
then update that contentnode's content_id if it's a copied contentnode.
"""
if self.contentnode and self.preset.thumbnail is False:
self.contentnode.make_content_id_unique()
def on_update(self):
self.modified = timezone.now()
self.update_contentnode_content_id()
def save(self, set_by_file_on_disk=True, *args, **kwargs):
"""
Overrider the default save method.
If the file_on_disk FileField gets passed a content copy:
1. generate the MD5 from the content copy
2. fill the other fields accordingly
"""
from contentcuration.utils.user import calculate_user_storage
if self.file_format_id:
if self.file_format_id not in dict(file_formats.choices):
raise ValidationError('Invalid file_format')
if set_by_file_on_disk and self.file_on_disk:
if self.checksum is None or self.checksum == '':
md5 = hashlib.md5()
for chunk in self.file_on_disk.chunks():
md5.update(chunk)
self.checksum = md5.hexdigest()
if not self.file_size:
self.file_size = self.file_on_disk.size
if not self.file_format_id:
ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')
if ext in list(dict(file_formats.choices).keys()):
self.file_format_id = ext
else:
raise ValueError('Files of type `{}` are not supported.'
.format(ext))
super(File, self).save(*args, **kwargs)
if self.uploaded_by_id:
calculate_user_storage(self.uploaded_by_id)
class Meta:
indexes = [models.Index(fields=['checksum', 'file_size'], name=
FILE_DISTINCT_INDEX_NAME), models.Index(fields=['-modified'],
name=FILE_MODIFIED_DESC_INDEX_NAME)]
constraints = [models.CheckConstraint(check=Q(preset__in=
MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True), name
=FILE_DURATION_CONSTRAINT)]
<|reserved_special_token_0|>
class PrerequisiteContentRelationship(models.Model):
"""
Predefine the prerequisite relationship between two ContentNode objects.
"""
target_node = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)
prerequisite = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)
class Meta:
unique_together = ['target_node', 'prerequisite']
def clean(self, *args, **kwargs):
if self.target_node == self.prerequisite:
raise IntegrityError('Cannot self reference as prerequisite.')
if PrerequisiteContentRelationship.objects.using(self._state.db
).filter(target_node=self.prerequisite, prerequisite=self.
target_node):
raise IntegrityError(
'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'
% (self.target_node, self.prerequisite))
super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
self.full_clean()
super(PrerequisiteContentRelationship, self).save(*args, **kwargs)
def __unicode__(self):
return u'%s' % self.pk
class RelatedContentRelationship(models.Model):
"""
Predefine the related relationship between two ContentNode objects.
"""
contentnode_1 = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_1', on_delete=models.CASCADE)
contentnode_2 = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_2', on_delete=models.CASCADE)
class Meta:
unique_together = ['contentnode_1', 'contentnode_2']
def save(self, *args, **kwargs):
if self.contentnode_1 == self.contentnode_2:
raise IntegrityError('Cannot self reference as related.')
if RelatedContentRelationship.objects.using(self._state.db).filter(
contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1
):
return
super(RelatedContentRelationship, self).save(*args, **kwargs)
class Invitation(models.Model):
""" Invitation to edit channel """
id = UUIDField(primary_key=True, default=uuid.uuid4)
accepted = models.BooleanField(default=False)
declined = models.BooleanField(default=False)
revoked = models.BooleanField(default=False)
invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.
SET_NULL, null=True, related_name='sent_to')
share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)
email = models.EmailField(max_length=100, null=True)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=
'sent_by', null=True, on_delete=models.CASCADE)
channel = models.ForeignKey('Channel', null=True, related_name=
'pending_editors', on_delete=models.CASCADE)
first_name = models.CharField(max_length=100, blank=True)
last_name = models.CharField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = 'Invitation'
verbose_name_plural = 'Invitations'
def accept(self):
user = User.objects.filter(email__iexact=self.email).first()
if self.channel:
if self.share_mode == VIEW_ACCESS:
self.channel.editors.remove(user)
self.channel.viewers.add(user)
else:
self.channel.viewers.remove(user)
self.channel.editors.add(user)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |
Q(channel__editors=user)).distinct()
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |
Q(channel__editors=user) | Q(channel__viewers=user)).distinct()
class Change(models.Model):
server_rev = models.BigAutoField(primary_key=True)
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
blank=True, on_delete=models.SET_NULL, related_name='changes_by_user')
channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=
models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=
True, on_delete=models.CASCADE, related_name='changes_about_user')
client_rev = models.IntegerField(null=True, blank=True)
session = models.ForeignKey(Session, null=True, blank=True, on_delete=
models.SET_NULL)
table = models.CharField(max_length=32)
change_type = models.IntegerField()
kwargs = JSONField(encoder=JSONEncoder)
applied = models.BooleanField(default=False)
errored = models.BooleanField(default=False)
@classmethod
def _create_from_change(cls, created_by_id=None, channel_id=None,
user_id=None, session_key=None, applied=False, table=None, rev=None,
**data):
change_type = data.pop('type')
if table is None or table not in ALL_TABLES:
raise TypeError(
'table is a required argument for creating changes and must be a valid table name'
)
if change_type is None or change_type not in ALL_CHANGES:
raise TypeError(
'change_type is a required argument for creating changes and must be a valid change type integer'
)
return cls(session_id=session_key, created_by_id=created_by_id,
channel_id=channel_id, user_id=user_id, client_rev=rev, table=
table, change_type=change_type, kwargs=data, applied=applied)
@classmethod
def create_changes(cls, changes, created_by_id=None, session_key=None,
applied=False):
change_models = []
for change in changes:
change_models.append(cls._create_from_change(created_by_id=
created_by_id, session_key=session_key, applied=applied, **
change))
cls.objects.bulk_create(change_models)
return change_models
@classmethod
def create_change(cls, change, created_by_id=None, session_key=None,
applied=False):
obj = cls._create_from_change(created_by_id=created_by_id,
session_key=session_key, applied=applied, **change)
obj.save()
return obj
@classmethod
def serialize(cls, change):
datum = get_attribute(change, ['kwargs']).copy()
datum.update({'server_rev': get_attribute(change, ['server_rev']),
'table': get_attribute(change, ['table']), 'type':
get_attribute(change, ['change_type']), 'channel_id':
get_attribute(change, ['channel_id']), 'user_id': get_attribute
(change, ['user_id']), 'created_by_id': get_attribute(change, [
'created_by_id'])})
return datum
def serialize_to_change_dict(self):
return self.serialize(self)
class TaskResultCustom(object):
"""
Custom fields to add to django_celery_results's TaskResult model
If adding fields to this class, run `makemigrations` then move the generated migration from the
`django_celery_results` app to the `contentcuration` app and override the constructor to change
the app_label. See `0141_add_task_signature` for an example
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='tasks',
on_delete=models.CASCADE, null=True)
channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)
progress = models.IntegerField(null=True, blank=True, validators=[
MinValueValidator(0), MaxValueValidator(100)])
signature = models.CharField(null=True, blank=False, max_length=32)
super_as_dict = TaskResult.as_dict
def as_dict(self):
"""
:return: A dictionary representation
"""
super_dict = self.super_as_dict()
super_dict.update(user_id=self.user_id, channel_id=self.channel_id,
progress=self.progress)
return super_dict
@classmethod
def contribute_to_class(cls, model_class=TaskResult):
"""
Adds fields to model, by default TaskResult
:param model_class: TaskResult model
"""
for field in dir(cls):
if not field.startswith('_') and field not in (
'contribute_to_class', 'Meta'):
model_class.add_to_class(field, getattr(cls, field))
setattr(model_class._meta, 'indexes', getattr(model_class._meta,
'indexes', []) + cls.Meta.indexes)
class Meta:
indexes = [models.Index(fields=['signature'], name=
'task_result_signature_idx', condition=Q(status__in=
celery_states.UNREADY_STATES))]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SecretToken(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def exists(cls, token):
"""
Return true when the token string given by string already exists.
Returns false otherwise.
"""
return cls.objects.filter(token=token).exists()
@classmethod
def generate_new_token(cls):
"""
Creates a primary secret token for the current channel using a proquint
string. Creates a secondary token containing the channel id.
These tokens can be used to refer to the channel to download its content
database.
"""
token = proquint.generate()
TRIALS = 100
for __ in range(TRIALS):
token = proquint.generate()
if SecretToken.exists(token):
continue
break
else:
raise ValueError('Cannot generate new token')
return token
def __str__(self):
return '{}-{}'.format(self.token[:5], self.token[5:])
<|reserved_special_token_0|>
class PermissionCTE(With):
tree_id_fields = ['channel__{}__tree_id'.format(tree_name) for
tree_name in CHANNEL_TREES]
def __init__(self, model, user_id, **kwargs):
queryset = model.objects.filter(user_id=user_id).annotate(tree_id=
Unnest(ArrayRemove(Array(*self.tree_id_fields), None),
output_field=models.IntegerField()))
super(PermissionCTE, self).__init__(queryset=queryset.values(
'user_id', 'channel_id', 'tree_id'), **kwargs)
@classmethod
def editable_channels(cls, user_id):
return PermissionCTE(User.editable_channels.through, user_id, name=
'editable_channels_cte')
@classmethod
def view_only_channels(cls, user_id):
return PermissionCTE(User.view_only_channels.through, user_id, name
='view_only_channels_cte')
def exists(self, *filters):
return Exists(self.queryset().filter(*filters).values('user_id'))
class Channel(models.Model):
""" Permissions come from association with organizations """
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
tagline = models.CharField(max_length=150, blank=True, null=True)
version = models.IntegerField(default=0)
thumbnail = models.TextField(blank=True, null=True)
thumbnail_encoding = JSONField(default=dict)
editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name
='editable_channels', verbose_name='editors', help_text=
'Users with edit rights', blank=True)
viewers = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name
='view_only_channels', verbose_name='viewers', help_text=
'Users with view only rights', blank=True)
language = models.ForeignKey('Language', null=True, blank=True,
related_name='channel_language', on_delete=models.SET_NULL)
trash_tree = models.ForeignKey('ContentNode', null=True, blank=True,
related_name='channel_trash', on_delete=models.SET_NULL)
clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True,
related_name='channel_clipboard', on_delete=models.SET_NULL)
main_tree = models.ForeignKey('ContentNode', null=True, blank=True,
related_name='channel_main', on_delete=models.SET_NULL)
staging_tree = models.ForeignKey('ContentNode', null=True, blank=True,
related_name='channel_staging', on_delete=models.SET_NULL)
chef_tree = models.ForeignKey('ContentNode', null=True, blank=True,
related_name='channel_chef', on_delete=models.SET_NULL)
previous_tree = models.ForeignKey('ContentNode', null=True, blank=True,
related_name='channel_previous', on_delete=models.SET_NULL)
bookmarked_by = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name='bookmarked_channels', verbose_name='bookmarked by')
deleted = models.BooleanField(default=False, db_index=True)
public = models.BooleanField(default=False, db_index=True)
preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)
content_defaults = JSONField(default=dict)
priority = models.IntegerField(default=0, help_text=
'Order to display public channels')
last_published = models.DateTimeField(blank=True, null=True)
secret_tokens = models.ManyToManyField(SecretToken, related_name=
'channels', verbose_name='secret tokens', blank=True)
source_url = models.CharField(max_length=200, blank=True, null=True)
demo_server_url = models.CharField(max_length=200, blank=True, null=True)
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
ricecooker_version = models.CharField(max_length=100, blank=True, null=True
)
published_data = JSONField(default=dict)
icon_encoding = models.TextField(blank=True, null=True)
total_resource_count = models.IntegerField(default=0)
published_kind_count = models.TextField(blank=True, null=True)
published_size = models.FloatField(default=0)
included_languages = models.ManyToManyField('Language', related_name=
'channels', verbose_name='languages', blank=True)
_field_updates = FieldTracker(fields=['description', 'language_id',
'thumbnail', 'name', 'thumbnail_encoding', 'deleted', 'public',
'main_tree_id', 'version'])
@classmethod
def get_editable(cls, user, channel_id):
return cls.filter_edit_queryset(cls.objects.all(), user).get(id=
channel_id)
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit = Exists(User.editable_channels.through.objects.filter(user_id
=user_id, channel_id=OuterRef('id')))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
user_email = not user.is_anonymous and user.email
if user_id:
filters = dict(user_id=user_id, channel_id=OuterRef('id'))
edit = Exists(User.editable_channels.through.objects.filter(**
filters).values('user_id'))
view = Exists(User.view_only_channels.through.objects.filter(**
filters).values('user_id'))
else:
edit = boolean_val(False)
view = boolean_val(False)
queryset = queryset.annotate(edit=edit, view=view)
if user_id and user.is_admin:
return queryset
permission_filter = Q()
if user_id:
pending_channels = Invitation.objects.filter(email=user_email,
revoked=False, declined=False, accepted=False).values_list(
'channel_id', flat=True)
permission_filter = Q(view=True) | Q(edit=True) | Q(deleted=
False, id__in=pending_channels)
return queryset.filter(permission_filter | Q(deleted=False, public=
True))
@classmethod
def get_all_channels(cls):
return cls.objects.select_related('main_tree').prefetch_related(
'editors', 'viewers').distinct()
def resource_size_key(self):
return '{}_resource_size'.format(self.pk)
def get_resource_size(self):
cached_data = cache.get(self.resource_size_key())
if cached_data:
return cached_data
tree_id = self.main_tree.tree_id
files = File.objects.select_related('contentnode', 'assessment_item'
).filter(contentnode__tree_id=tree_id).values('checksum',
'file_size').distinct().aggregate(resource_size=Sum('file_size'))
cache.set(self.resource_size_key(), files['resource_size'] or 0, None)
return files['resource_size'] or 0
def on_create(self):
record_channel_stats(self, None)
if not self.content_defaults:
self.content_defaults = DEFAULT_CONTENT_DEFAULTS
if not self.main_tree:
self.main_tree = ContentNode.objects.create(title=self.name,
kind_id=content_kinds.TOPIC, content_id=self.id, node_id=
self.id, original_channel_id=self.id, source_channel_id=
self.id, changed=True, complete=True)
if settings.DEBUG:
if ContentNode.objects.filter(parent=None, tree_id=self.
main_tree.tree_id).count() != 1:
raise AssertionError
if not self.trash_tree:
self.trash_tree = ContentNode.objects.create(title=self.name,
kind_id=content_kinds.TOPIC, content_id=self.id, node_id=
self.id)
if self.public and (self.main_tree and self.main_tree.published):
delete_public_channel_cache_keys()
def on_update(self):
from contentcuration.utils.user import calculate_user_storage
original_values = self._field_updates.changed()
record_channel_stats(self, original_values)
blacklist = set(['public', 'main_tree_id', 'version'])
if self.main_tree and original_values and any(True for field in
original_values if field not in blacklist):
self.main_tree.changed = True
if 'thumbnail' in original_values and original_values['thumbnail'
] and 'static' not in original_values['thumbnail']:
filename, ext = os.path.splitext(original_values['thumbnail'])
delete_empty_file_reference(filename, ext[1:])
if 'deleted' in original_values:
for editor in self.editors.all():
calculate_user_storage(editor.pk)
if 'deleted' in original_values and not original_values['deleted']:
self.pending_editors.all().delete()
export_db_storage_path = os.path.join(settings.DB_ROOT,
'{channel_id}.sqlite3'.format(channel_id=self.id))
if default_storage.exists(export_db_storage_path):
default_storage.delete(export_db_storage_path)
if self.main_tree:
self.main_tree.published = False
if self.main_tree and self.main_tree._field_updates.changed():
self.main_tree.save()
if 'public' in original_values and (self.main_tree and self.
main_tree.published):
delete_public_channel_cache_keys()
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
super(Channel, self).save(*args, **kwargs)
def get_thumbnail(self):
return get_channel_thumbnail(self)
def has_changes(self):
return self.main_tree.get_descendants(include_self=True).filter(changed
=True).exists()
def get_date_modified(self):
return self.main_tree.get_descendants(include_self=True).aggregate(
last_modified=Max('modified'))['last_modified']
def get_resource_count(self):
return self.main_tree.get_descendants().exclude(kind_id=
content_kinds.TOPIC).order_by('content_id').distinct('content_id'
).count()
def get_human_token(self):
return self.secret_tokens.get(is_primary=True)
def get_channel_id_token(self):
return self.secret_tokens.get(token=self.id)
def make_token(self):
token = self.secret_tokens.create(token=SecretToken.
generate_new_token(), is_primary=True)
self.secret_tokens.get_or_create(token=self.id)
return token
def make_public(self, bypass_signals=False):
"""
Sets the current channel object to be public and viewable by anyone.
If bypass_signals is True, update the model in such a way that we
prevent any model signals from running due to the update.
Returns the same channel object.
"""
if bypass_signals:
self.public = True
Channel.objects.filter(id=self.id).update(public=True)
delete_public_channel_cache_keys()
else:
self.public = True
self.save()
return self
def mark_created(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.
CREATION)
def mark_publishing(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.
PUBLICATION)
self.main_tree.publishing = True
self.main_tree.save()
def mark_deleted(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.
DELETION)
self.deleted = True
self.save()
def mark_recovered(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.
RECOVERY)
self.deleted = False
self.save()
@property
def deletion_history(self):
return self.history.filter(action=channel_history.DELETION)
@property
def publishing_history(self):
return self.history.filter(action=channel_history.PUBLICATION)
@classmethod
def get_public_channels(cls, defer_nonmain_trees=False):
"""
Get all public channels.
If defer_nonmain_trees is True, defer the loading of all
trees except for the main_tree."""
if defer_nonmain_trees:
c = Channel.objects.filter(public=True).exclude(deleted=True
).select_related('main_tree').prefetch_related('editors'
).defer('trash_tree', 'clipboard_tree', 'staging_tree',
'chef_tree', 'previous_tree', 'viewers')
else:
c = Channel.objects.filter(public=True).exclude(deleted=True)
return c
class Meta:
verbose_name = 'Channel'
verbose_name_plural = 'Channels'
indexes = [models.Index(fields=['name'], name=CHANNEL_NAME_INDEX_NAME)]
index_together = [['deleted', 'public']]
<|reserved_special_token_0|>
class ChannelHistory(models.Model):
"""
Model for tracking certain actions performed on a channel
"""
channel = models.ForeignKey('Channel', null=False, blank=False,
related_name='history', on_delete=models.CASCADE)
actor = models.ForeignKey('User', null=False, blank=False, related_name
='channel_history', on_delete=models.CASCADE)
performed = models.DateTimeField(default=timezone.now)
action = models.CharField(max_length=50, choices=channel_history.choices)
@classmethod
def prune(cls):
"""
Prunes history records by keeping the most recent actions for each channel and type,
and deleting all other older actions
"""
keep_ids = cls.objects.distinct('channel_id', 'action').order_by(
'channel_id', 'action', '-performed').values_list('id', flat=True)
cls.objects.exclude(id__in=keep_ids).delete()
class Meta:
verbose_name = 'Channel history'
verbose_name_plural = 'Channel histories'
indexes = [models.Index(fields=['channel_id'], name=
CHANNEL_HISTORY_CHANNEL_INDEX_NAME)]
class UserHistory(models.Model):
"""
Model that stores the user's action history.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=
False, related_name='history', on_delete=models.CASCADE)
action = models.CharField(max_length=32, choices=user_history.choices)
performed_at = models.DateTimeField(default=timezone.now)
class ChannelSet(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
public = models.BooleanField(default=False, db_index=True)
editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name
='channel_sets', verbose_name='editors', help_text=
'Users with edit rights', blank=True)
secret_token = models.ForeignKey('SecretToken', null=True, blank=True,
related_name='channel_sets', on_delete=models.SET_NULL)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
user_id = not user.is_anonymous and user.id
edit = Exists(User.channel_sets.through.objects.filter(user_id=
user_id, channelset_id=OuterRef('id')))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
return cls.filter_edit_queryset(queryset, user)
def get_channels(self):
if self.secret_token:
return self.secret_token.channels.filter(deleted=False)
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
super(ChannelSet, self).save()
def on_create(self):
if not self.secret_token:
self.secret_token = SecretToken.objects.create(token=
SecretToken.generate_new_token())
def delete(self, *args, **kwargs):
super(ChannelSet, self).delete(*args, **kwargs)
if self.secret_token:
self.secret_token.delete()
class ContentTag(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
tag_name = models.CharField(max_length=50)
channel = models.ForeignKey('Channel', related_name='tags', blank=True,
null=True, db_index=True, on_delete=models.SET_NULL)
objects = CustomManager()
def __str__(self):
return self.tag_name
class Meta:
unique_together = ['tag_name', 'channel']
class License(models.Model):
"""
Normalize the license of ContentNode model
"""
license_name = models.CharField(max_length=50)
license_url = models.URLField(blank=True)
license_description = models.TextField(blank=True)
copyright_holder_required = models.BooleanField(default=True)
is_custom = models.BooleanField(default=False)
exists = models.BooleanField(default=False, verbose_name=
'license exists', help_text=
'Tells whether or not a content item is licensed to share')
@classmethod
def validate_name(cls, name):
if cls.objects.filter(license_name=name).count() == 0:
raise ValidationError('License `{}` does not exist'.format(name))
def __str__(self):
return self.license_name
<|reserved_special_token_0|>
class ContentNode(MPTTModel, models.Model):
"""
By default, all nodes have a title and can be used as a topic.
"""
id = UUIDField(primary_key=True, default=uuid.uuid4)
content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=
False, db_index=True)
node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
original_channel_id = UUIDField(primary_key=False, editable=False, null
=True, db_index=True)
source_channel_id = UUIDField(primary_key=False, editable=False, null=True)
original_source_node_id = UUIDField(primary_key=False, editable=False,
null=True, db_index=True)
source_node_id = UUIDField(primary_key=False, editable=False, null=True)
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
title = models.CharField(max_length=200, blank=True)
description = models.TextField(blank=True)
kind = models.ForeignKey('ContentKind', related_name='contentnodes',
db_index=True, null=True, blank=True, on_delete=models.SET_NULL)
license = models.ForeignKey('License', null=True, blank=True, on_delete
=models.SET_NULL)
license_description = models.CharField(max_length=400, null=True, blank
=True)
prerequisite = models.ManyToManyField('self', related_name=
'is_prerequisite_of', through='PrerequisiteContentRelationship',
symmetrical=False, blank=True)
is_related = models.ManyToManyField('self', related_name='relate_to',
through='RelatedContentRelationship', symmetrical=False, blank=True)
language = models.ForeignKey('Language', null=True, blank=True,
related_name='content_language', on_delete=models.SET_NULL)
parent = TreeForeignKey('self', null=True, blank=True, related_name=
'children', db_index=True, on_delete=models.CASCADE)
tags = models.ManyToManyField(ContentTag, symmetrical=False,
related_name='tagged_content', blank=True)
sort_order = models.FloatField(max_length=50, default=1, verbose_name=
'sort order', help_text='Ascending, lowest number shown first')
copyright_holder = models.CharField(max_length=200, null=True, blank=
True, default='', help_text=
'Organization of person who holds the essential rights')
original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=
True, blank=True, related_name='duplicates')
cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=
True, blank=True, related_name='clones')
thumbnail_encoding = models.TextField(blank=True, null=True)
created = models.DateTimeField(default=timezone.now, verbose_name='created'
)
modified = models.DateTimeField(auto_now=True, verbose_name='modified')
published = models.BooleanField(default=False)
publishing = models.BooleanField(default=False)
complete = models.BooleanField(null=True)
changed = models.BooleanField(default=True)
"""
Extra fields for exercises:
- type: mastery model to use to determine completion
- m: m value for M out of N mastery criteria
- n: n value for M out of N mastery criteria
"""
extra_fields = JSONField(default=dict, blank=True, null=True)
author = models.CharField(max_length=200, blank=True, default='',
help_text='Who created this content?', null=True)
aggregator = models.CharField(max_length=200, blank=True, default='',
help_text='Who gathered this content together?', null=True)
provider = models.CharField(max_length=200, blank=True, default='',
help_text='Who distributed this content?', null=True)
role_visibility = models.CharField(max_length=50, choices=roles.choices,
default=roles.LEARNER)
freeze_authoring_data = models.BooleanField(default=False)
grade_levels = models.JSONField(blank=True, null=True)
resource_types = models.JSONField(blank=True, null=True)
learning_activities = models.JSONField(blank=True, null=True)
accessibility_labels = models.JSONField(blank=True, null=True)
categories = models.JSONField(blank=True, null=True)
learner_needs = models.JSONField(blank=True, null=True)
suggested_duration = models.IntegerField(blank=True, null=True,
help_text='Suggested duration for the content node (in seconds)')
objects = CustomContentNodeTreeManager()
_field_updates = FieldTracker()
_permission_filter = Q(tree_id=OuterRef('tree_id'))
@classmethod
def _annotate_channel_id(cls, queryset):
return queryset.annotate(channel_id=Subquery(Channel.objects.filter
(main_tree__tree_id=OuterRef('tree_id')).values_list('id', flat
=True)[:1]))
@classmethod
def filter_by_pk(cls, pk):
"""
When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always
returns a queryset filtered by pk.
When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode
for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If
a ContentNode does not exist for `pk` then an empty queryset is returned.
"""
query = ContentNode.objects.filter(pk=pk)
if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:
tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))
if tree_id:
query = query.filter(tree_id=tree_id)
else:
tree_id = ContentNode.objects.filter(pk=pk).values_list(
'tree_id', flat=True).first()
if tree_id:
cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk),
tree_id, None)
query = query.filter(tree_id=tree_id)
else:
query = query.none()
return query
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.
exists(cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(public=Exists(Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef('tree_id')).values('pk')))
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=
boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit
=edit_cte.exists(cls._permission_filter), view=view_cte.exists(
cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))
@raise_if_unsaved
def get_root(self):
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return super(ContentNode, self).get_root()
@raise_if_unsaved
def get_root_id(self):
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return ContentNode.objects.values_list('pk', flat=True).get(tree_id
=self._mpttfield('tree_id'), parent=None)
def get_tree_data(self, levels=float('inf')):
"""
Returns `levels`-deep tree information starting at current node.
Args:
levels (int): depth of tree hierarchy to return
Returns:
tree (dict): starting with self, with children list containing either
the just the children's `node_id`s or full recusive tree.
"""
if self.kind_id == content_kinds.TOPIC:
node_data = {'title': self.title, 'kind': self.kind_id,
'node_id': self.node_id, 'studio_id': self.id}
children = self.children.all()
if levels > 0:
node_data['children'] = [c.get_tree_data(levels=levels - 1) for
c in children]
return node_data
if self.kind_id == content_kinds.EXERCISE:
return {'title': self.title, 'kind': self.kind_id, 'count':
self.assessment_items.count(), 'node_id': self.node_id,
'studio_id': self.id}
return {'title': self.title, 'kind': self.kind_id, 'file_size':
self.files.values('file_size').aggregate(size=Sum('file_size'))
['size'], 'node_id': self.node_id, 'studio_id': self.id}
def get_original_node(self):
original_node = self.original_node or self
if self.original_channel_id and self.original_source_node_id:
original_tree_id = Channel.objects.select_related('main_tree').get(
pk=self.original_channel_id).main_tree.tree_id
original_node = ContentNode.objects.filter(tree_id=
original_tree_id, node_id=self.original_source_node_id).first(
) or ContentNode.objects.filter(tree_id=original_tree_id,
content_id=self.content_id).first() or self
return original_node
def get_associated_presets(self):
key = 'associated_presets_{}'.format(self.kind_id)
cached_data = cache.get(key)
if cached_data:
return cached_data
presets = list(FormatPreset.objects.filter(kind=self.kind).values())
cache.set(key, presets, None)
return presets
def get_prerequisites(self):
prerequisite_mapping = {}
prerequisites = self.prerequisite.all()
prereqlist = list(prerequisites)
for prereq in prerequisites:
prlist, prereqmapping = prereq.get_prerequisites()
prerequisite_mapping.update({prereq.pk: prereqmapping})
prereqlist.extend(prlist)
return prereqlist, prerequisite_mapping
def get_postrequisites(self):
postrequisite_mapping = {}
postrequisites = self.is_prerequisite_of.all()
postreqlist = list(postrequisites)
for postreq in postrequisites:
prlist, postreqmapping = postreq.get_postrequisites()
postrequisite_mapping.update({postreq.pk: postreqmapping})
postreqlist.extend(prlist)
return postreqlist, postrequisite_mapping
def get_channel_id(self):
if hasattr(self, 'channel_id'):
return self.channel_id
channel = self.get_channel()
if channel:
return channel.id
return None
def get_channel(self):
try:
root = self.get_root()
if not root:
return None
return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=
root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(
previous_tree=root)).first()
except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):
return None
def get_thumbnail(self):
if self.thumbnail_encoding:
thumbnail_data = load_json_string(self.thumbnail_encoding)
if type(thumbnail_data) is dict and thumbnail_data.get('base64'):
return thumbnail_data['base64']
thumbnail = self.files.filter(preset__thumbnail=True).first()
if thumbnail:
return generate_storage_url(str(thumbnail))
return ''
@classmethod
def get_nodes_with_title(cls, title, limit_to_children_of=None):
"""
Returns all ContentNodes with a given title. If limit_to_children_of
is passed in with an id, only look at all the children of the node with that id.
"""
if limit_to_children_of:
root = cls.objects.get(id=limit_to_children_of)
return root.get_descendants().filter(title=title)
return cls.objects.filter(title=title)
def get_details(self, channel_id=None):
"""
Returns information about the node and its children, including total size, languages, files, etc.
:return: A dictionary with detailed statistics and information about the node.
"""
from contentcuration.viewsets.common import SQArrayAgg
from contentcuration.viewsets.common import SQCount
from contentcuration.viewsets.common import SQRelatedArrayAgg
from contentcuration.viewsets.common import SQSum
from contentcuration.viewsets.common import SQJSONBKeyArrayAgg
node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id
).order_by()
descendants = self.get_descendants().values('id')
if channel_id:
channel = Channel.objects.filter(id=channel_id)[0]
else:
channel = self.get_channel()
if not descendants.exists():
data = {'last_update': pytz.utc.localize(datetime.now()).
strftime(settings.DATE_TIME_FORMAT), 'created': self.
created.strftime(settings.DATE_TIME_FORMAT),
'resource_count': 0, 'resource_size': 0, 'includes': {
'coach_content': 0, 'exercises': 0}, 'kind_count': [],
'languages': [], 'accessible_languages': [], 'licenses': [],
'tags': [], 'copyright_holders': [], 'authors': [],
'aggregators': [], 'providers': [], 'sample_pathway': [],
'original_channels': [], 'sample_nodes': [], 'levels': [],
'categories': []}
cache.set('details_{}'.format(self.node_id), json.dumps(data), None
)
return data
resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()
nodes = With(File.objects.filter(contentnode_id__in=Subquery(
resources.values('id'))).values('checksum', 'file_size').
order_by(), name='nodes')
file_query = nodes.queryset().with_cte(nodes).values('checksum',
'file_size').distinct()
l_nodes = With(File.objects.filter(contentnode_id__in=Subquery(
resources.values('id'))).values('language_id', 'preset_id').
order_by(), name='l_nodes')
accessible_languages_query = l_nodes.queryset().filter(preset_id=
format_presets.VIDEO_SUBTITLE).with_cte(l_nodes).values(
'language__native_name').distinct()
tags_query = str(ContentTag.objects.filter(tagged_content__pk__in=
descendants.values_list('pk', flat=True)).values('tag_name').
annotate(count=Count('tag_name')).query).replace('topic', "'topic'"
)
kind_count_query = str(resources.values('kind_id').annotate(count=
Count('kind_id')).query).replace('topic', "'topic'")
node = node.annotate(resource_count=SQCount(resources, field='id'),
resource_size=SQSum(file_query, field='file_size'),
copyright_holders=SQArrayAgg(resources.distinct(
'copyright_holder').order_by('copyright_holder'), field=
'copyright_holder'), authors=SQArrayAgg(resources.distinct(
'author').order_by('author'), field='author'), aggregators=
SQArrayAgg(resources.distinct('aggregator').order_by(
'aggregator'), field='aggregator'), providers=SQArrayAgg(
resources.distinct('provider').order_by('provider'), field=
'provider'), languages=SQRelatedArrayAgg(descendants.exclude(
language=None).distinct('language__native_name').order_by(),
field='language__native_name', fieldname='native_name'),
accessible_languages=SQRelatedArrayAgg(
accessible_languages_query, field='language__native_name',
fieldname='native_name'), licenses=SQRelatedArrayAgg(resources.
exclude(license=None).distinct('license__license_name').
order_by('license__license_name'), field=
'license__license_name', fieldname='license_name'), kind_count=
RawSQL('SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format
(kind_count_query), ()), tags_list=RawSQL(
'SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format(
tags_query), ()), coach_content=SQCount(resources.filter(
role_visibility=roles.COACH), field='id'), exercises=SQCount(
resources.filter(kind_id=content_kinds.EXERCISE), field='id'),
levels=SQJSONBKeyArrayAgg(descendants.exclude(
grade_levels__isnull=True), field='grade_levels'),
all_categories=SQJSONBKeyArrayAgg(descendants.exclude(
categories__isnull=True), field='categories'))
max_level = max(resources.values_list('level', flat=True).order_by(
).distinct() or [0])
m_nodes = With(resources.values('id', 'level', 'tree_id', 'lft').
order_by(), name='m_nodes')
deepest_node_record = m_nodes.queryset().with_cte(m_nodes).filter(level
=max_level).values('id').order_by('tree_id', 'lft').first()
if deepest_node_record:
deepest_node = ContentNode.objects.get(pk=deepest_node_record['id']
)
pathway = list(deepest_node.get_ancestors().order_by().exclude(
parent=None).values('title', 'node_id', 'kind_id').order_by()
) if deepest_node_record else []
sample_nodes = [{'node_id': n.node_id, 'title': n.title,
'description': n.description, 'thumbnail': n.get_thumbnail(),
'kind': n.kind_id} for n in deepest_node.get_siblings(
include_self=True)[0:4]] if deepest_node_record else []
channel_id = channel and channel.id
originals = resources.values('original_channel_id').annotate(count=
Count('original_channel_id')).order_by('original_channel_id')
originals = {c['original_channel_id']: c['count'] for c in originals}
original_channels = Channel.objects.exclude(pk=channel_id).filter(
pk__in=originals.keys(), deleted=False).order_by()
original_channels = [{'id': c.id, 'name': '{}{}'.format(c.name, _(
' (Original)') if channel_id == c.id else ''), 'thumbnail': c.
get_thumbnail(), 'count': originals[c.id]} for c in
original_channels]
node = node.order_by().values('id', 'resource_count',
'resource_size', 'copyright_holders', 'authors', 'aggregators',
'providers', 'languages', 'accessible_languages',
'coach_content', 'licenses', 'tags_list', 'kind_count',
'exercises', 'levels', 'all_categories').first()
for_educators = {'coach_content': node['coach_content'],
'exercises': node['exercises']}
data = {'last_update': pytz.utc.localize(datetime.now()).strftime(
settings.DATE_TIME_FORMAT), 'created': self.created.strftime(
settings.DATE_TIME_FORMAT), 'resource_count': node.get(
'resource_count', 0), 'resource_size': node.get('resource_size',
0), 'includes': for_educators, 'kind_count': node.get(
'kind_count') or [], 'languages': node.get('languages') or [],
'accessible_languages': node.get('accessible_languages') or [],
'licenses': node.get('licenses') or [], 'tags': node.get(
'tags_list') or [], 'original_channels': original_channels,
'sample_pathway': pathway, 'sample_nodes': sample_nodes,
'authors': list(filter(bool, node['authors'])), 'aggregators':
list(filter(bool, node['aggregators'])), 'providers': list(
filter(bool, node['providers'])), 'copyright_holders': list(
filter(bool, node['copyright_holders'])), 'levels': node.get(
'levels') or [], 'categories': node.get('all_categories') or []}
cache.set('details_{}'.format(self.node_id), json.dumps(data), None)
return data
def has_changes(self):
mptt_opts = self._mptt_meta
blacklist = set(['changed', 'modified', 'publishing', mptt_opts.
tree_id_attr, mptt_opts.left_attr, mptt_opts.right_attr,
mptt_opts.level_attr])
original_values = self._field_updates.changed()
return any(True for field in original_values if field not in blacklist)
def recalculate_editors_storage(self):
from contentcuration.utils.user import calculate_user_storage
for editor in self.files.values_list('uploaded_by_id', flat=True
).distinct():
calculate_user_storage(editor)
def mark_complete(self):
errors = []
if not (bool(self.title) or self.parent_id is None):
errors.append('Empty title')
if self.kind_id != content_kinds.TOPIC:
if not self.license:
errors.append('Missing license')
if (self.license and self.license.is_custom and not self.
license_description):
errors.append('Missing license description for custom license')
if (self.license and self.license.copyright_holder_required and
not self.copyright_holder):
errors.append('Missing required copyright holder')
if (self.kind_id != content_kinds.EXERCISE and not self.files.
filter(preset__supplementary=False).exists()):
errors.append('Missing default file')
if self.kind_id == content_kinds.EXERCISE:
if not self.assessment_items.filter(~Q(raw_data='') | ~Q(
question='') & ~Q(answers='[]') & (Q(type=exercises.
INPUT_QUESTION) | Q(answers__iregex='"correct":\\s*true'))
).exists():
errors.append(
'No questions with question text and complete answers')
criterion = self.extra_fields.get('options', {}).get(
'completion_criteria')
if not (self.extra_fields.get('mastery_model') or criterion):
errors.append('Missing mastery criterion')
if criterion:
try:
completion_criteria.validate(criterion, kind=
content_kinds.EXERCISE)
except completion_criteria.ValidationError:
errors.append(
'Mastery criterion is defined but is invalid')
self.complete = not errors
return errors
def make_content_id_unique(self):
"""
If self is NOT an original contentnode (in other words, a copied contentnode)
and a contentnode with same content_id exists then we update self's content_id.
"""
is_node_original = (self.original_source_node_id is None or self.
original_source_node_id == self.node_id)
node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(
content_id=self.content_id)
if not is_node_original and node_same_content_id.exists():
ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.
uuid4().hex)
def on_create(self):
self.changed = True
self.recalculate_editors_storage()
self.set_default_learning_activity()
def on_update(self):
self.changed = self.changed or self.has_changes()
def move_to(self, target, *args, **kwargs):
parent_was_trashtree = self.parent.channel_trash.exists()
super(ContentNode, self).move_to(target, *args, **kwargs)
self.save()
cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.
tree_id, None)
if target.channel_trash.exists() or parent_was_trashtree:
self.recalculate_editors_storage()
def set_default_learning_activity(self):
if self.learning_activities is None:
if self.kind in kind_activity_map:
self.learning_activities = {kind_activity_map[self.kind]: True}
def save(self, skip_lock=False, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
old_parent_id = self._field_updates.changed().get('parent_id')
if self._state.adding and (self.parent_id or self.parent):
same_order = False
elif old_parent_id is DeferredAttribute:
same_order = True
else:
same_order = old_parent_id == self.parent_id
if not same_order:
changed_ids = list(filter(lambda x: x is not None, set([
old_parent_id, self.parent_id])))
else:
changed_ids = []
if not same_order and not skip_lock:
with ContentNode.objects.lock_mptt(*ContentNode.objects.filter(
id__in=[pid for pid in [old_parent_id, self.parent_id] if
pid]).values_list('tree_id', flat=True).distinct()):
super(ContentNode, self).save(*args, **kwargs)
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(
changed=True)
else:
super(ContentNode, self).save(*args, **kwargs)
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(changed
=True)
save.alters_data = True
def delete(self, *args, **kwargs):
parent = self.parent or self._field_updates.changed().get('parent')
if parent:
parent.changed = True
parent.save()
self.recalculate_editors_storage()
with ContentNode.objects.lock_mptt(self.tree_id):
return super(ContentNode, self).delete(*args, **kwargs)
delete.alters_data = True
def copy_to(self, target=None, position='last-child', pk=None, mods=
None, excluded_descendants=None, can_edit_source_channel=None,
batch_size=None, progress_tracker=None):
return self._tree_manager.copy_node(self, target, position, pk,
mods, excluded_descendants, can_edit_source_channel, batch_size,
progress_tracker)[0]
def copy(self):
return self.copy_to()
def is_publishable(self):
return self.complete and self.get_descendants(include_self=True
).exclude(kind_id=content_kinds.TOPIC).exists()
class Meta:
verbose_name = 'Topic'
verbose_name_plural = 'Topics'
indexes = [models.Index(fields=['node_id'], name=NODE_ID_INDEX_NAME
), models.Index(fields=['-modified'], name=
NODE_MODIFIED_DESC_INDEX_NAME)]
class ContentKind(models.Model):
kind = models.CharField(primary_key=True, max_length=200, choices=
content_kinds.choices)
def __str__(self):
return self.kind
class FileFormat(models.Model):
extension = models.CharField(primary_key=True, max_length=40, choices=
file_formats.choices)
mimetype = models.CharField(max_length=200, blank=True)
def __str__(self):
return self.extension
class FormatPreset(models.Model):
id = models.CharField(primary_key=True, max_length=150, choices=
format_presets.choices)
readable_name = models.CharField(max_length=400)
multi_language = models.BooleanField(default=False)
supplementary = models.BooleanField(default=False)
thumbnail = models.BooleanField(default=False)
subtitle = models.BooleanField(default=False)
display = models.BooleanField(default=True)
order = models.IntegerField(default=0)
kind = models.ForeignKey(ContentKind, related_name='format_presets',
null=True, on_delete=models.SET_NULL)
allowed_formats = models.ManyToManyField(FileFormat, blank=True)
def __str__(self):
return self.id
@classmethod
def guess_format_preset(cls, filename):
"""
Guess the format preset of a filename based on its extension.
Return None if format is unknown.
"""
_, ext = os.path.splitext(filename)
ext = ext.lstrip('.')
f = FormatPreset.objects.filter(allowed_formats__extension=ext,
display=True)
return f.first()
@classmethod
def get_preset(cls, preset_name):
"""
Get the FormatPreset object with that exact name.
Returns None if that format preset is not found.
"""
try:
return FormatPreset.objects.get(id=preset_name)
except FormatPreset.DoesNotExist:
return None
class Language(models.Model):
id = models.CharField(max_length=14, primary_key=True)
lang_code = models.CharField(max_length=3, db_index=True)
lang_subcode = models.CharField(max_length=10, db_index=True, blank=
True, null=True)
readable_name = models.CharField(max_length=100, blank=True)
native_name = models.CharField(max_length=100, blank=True)
lang_direction = models.CharField(max_length=3, choices=languages.
LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])
def ietf_name(self):
return '{code}-{subcode}'.format(code=self.lang_code, subcode=self.
lang_subcode) if self.lang_subcode else self.lang_code
def __str__(self):
return self.ietf_name()
<|reserved_special_token_0|>
class AssessmentItem(models.Model):
type = models.CharField(max_length=50, default='multiplechoice')
question = models.TextField(blank=True)
hints = models.TextField(default='[]')
answers = models.TextField(default='[]')
order = models.IntegerField(default=1)
contentnode = models.ForeignKey('ContentNode', related_name=
'assessment_items', blank=True, null=True, db_index=True, on_delete
=models.CASCADE)
assessment_id = UUIDField(primary_key=False, default=uuid.uuid4,
editable=False)
raw_data = models.TextField(blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
randomize = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
objects = CustomManager()
_field_updates = FieldTracker()
def has_changes(self):
return bool(self._field_updates.changed())
class Meta:
indexes = [models.Index(fields=['assessment_id'], name=
ASSESSMENT_ID_INDEX_NAME)]
unique_together = ['contentnode', 'assessment_id']
_permission_filter = Q(tree_id=OuterRef('contentnode__tree_id'))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.
exists(cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(public=Exists(Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef('contentnode__tree_id'
)).values('pk')))
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=
boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit
=edit_cte.exists(cls._permission_filter), view=view_cte.exists(
cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))
def on_create(self):
"""
When an exercise is added to a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def on_update(self):
"""
When an exercise is updated of a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def delete(self, *args, **kwargs):
"""
When an exercise is deleted from a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
return super(AssessmentItem, self).delete(*args, **kwargs)
class SlideshowSlide(models.Model):
contentnode = models.ForeignKey('ContentNode', related_name=
'slideshow_slides', blank=True, null=True, db_index=True, on_delete
=models.CASCADE)
sort_order = models.FloatField(default=1.0)
metadata = JSONField(default=dict)
class StagedFile(models.Model):
"""
Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit
"""
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='staged_files',
blank=True, null=True, on_delete=models.CASCADE)
<|reserved_special_token_0|>
class File(models.Model):
"""
The bottom layer of the contentDB schema, defines the basic building brick for content.
Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...
"""
id = UUIDField(primary_key=True, default=uuid.uuid4)
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
file_on_disk = models.FileField(upload_to=object_storage_name, storage=
default_storage, max_length=500, blank=True)
contentnode = models.ForeignKey(ContentNode, related_name='files',
blank=True, null=True, db_index=True, on_delete=models.CASCADE)
assessment_item = models.ForeignKey(AssessmentItem, related_name=
'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE
)
slideshow_slide = models.ForeignKey(SlideshowSlide, related_name=
'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE
)
file_format = models.ForeignKey(FileFormat, related_name='files', blank
=True, null=True, db_index=True, on_delete=models.SET_NULL)
preset = models.ForeignKey(FormatPreset, related_name='files', blank=
True, null=True, db_index=True, on_delete=models.SET_NULL)
language = models.ForeignKey(Language, related_name='files', blank=True,
null=True, on_delete=models.SET_NULL)
original_filename = models.CharField(max_length=255, blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='files', blank=True,
null=True, on_delete=models.SET_NULL)
modified = models.DateTimeField(auto_now=True, verbose_name='modified',
null=True)
duration = models.IntegerField(blank=True, null=True)
objects = CustomManager()
_permission_filter = Q(tree_id=OuterRef('contentnode__tree_id')) | Q(
tree_id=OuterRef('assessment_item__contentnode__tree_id'))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls.
_permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(edit=True) | Q(uploaded_by=user,
contentnode__isnull=True, assessment_item__isnull=True))
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(public=Exists(Channel.objects.filter(
public=True).filter(Q(main_tree__tree_id=OuterRef(
'contentnode__tree_id')) | Q(main_tree__tree_id=OuterRef(
'assessment_item__contentnode__tree_id'))).values('pk')))
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=
boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit
=edit_cte.exists(cls._permission_filter), view=view_cte.exists(
cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True) |
Q(uploaded_by=user, contentnode__isnull=True,
assessment_item__isnull=True))
class Admin:
pass
def __str__(self):
return '{checksum}{extension}'.format(checksum=self.checksum,
extension='.' + self.file_format.extension)
def filename(self):
"""
Returns just the filename of the File in storage, without the path
e.g. abcd.mp4
"""
return os.path.basename(self.file_on_disk.name)
def update_contentnode_content_id(self):
"""
If the file is attached to a contentnode and is not a thumbnail
then update that contentnode's content_id if it's a copied contentnode.
"""
if self.contentnode and self.preset.thumbnail is False:
self.contentnode.make_content_id_unique()
def on_update(self):
self.modified = timezone.now()
self.update_contentnode_content_id()
def save(self, set_by_file_on_disk=True, *args, **kwargs):
"""
Overrider the default save method.
If the file_on_disk FileField gets passed a content copy:
1. generate the MD5 from the content copy
2. fill the other fields accordingly
"""
from contentcuration.utils.user import calculate_user_storage
if self.file_format_id:
if self.file_format_id not in dict(file_formats.choices):
raise ValidationError('Invalid file_format')
if set_by_file_on_disk and self.file_on_disk:
if self.checksum is None or self.checksum == '':
md5 = hashlib.md5()
for chunk in self.file_on_disk.chunks():
md5.update(chunk)
self.checksum = md5.hexdigest()
if not self.file_size:
self.file_size = self.file_on_disk.size
if not self.file_format_id:
ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')
if ext in list(dict(file_formats.choices).keys()):
self.file_format_id = ext
else:
raise ValueError('Files of type `{}` are not supported.'
.format(ext))
super(File, self).save(*args, **kwargs)
if self.uploaded_by_id:
calculate_user_storage(self.uploaded_by_id)
class Meta:
indexes = [models.Index(fields=['checksum', 'file_size'], name=
FILE_DISTINCT_INDEX_NAME), models.Index(fields=['-modified'],
name=FILE_MODIFIED_DESC_INDEX_NAME)]
constraints = [models.CheckConstraint(check=Q(preset__in=
MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True), name
=FILE_DURATION_CONSTRAINT)]
<|reserved_special_token_0|>
class PrerequisiteContentRelationship(models.Model):
"""
Predefine the prerequisite relationship between two ContentNode objects.
"""
target_node = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)
prerequisite = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)
class Meta:
unique_together = ['target_node', 'prerequisite']
def clean(self, *args, **kwargs):
if self.target_node == self.prerequisite:
raise IntegrityError('Cannot self reference as prerequisite.')
if PrerequisiteContentRelationship.objects.using(self._state.db
).filter(target_node=self.prerequisite, prerequisite=self.
target_node):
raise IntegrityError(
'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'
% (self.target_node, self.prerequisite))
super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
self.full_clean()
super(PrerequisiteContentRelationship, self).save(*args, **kwargs)
def __unicode__(self):
return u'%s' % self.pk
class RelatedContentRelationship(models.Model):
"""
Predefine the related relationship between two ContentNode objects.
"""
contentnode_1 = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_1', on_delete=models.CASCADE)
contentnode_2 = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_2', on_delete=models.CASCADE)
class Meta:
unique_together = ['contentnode_1', 'contentnode_2']
def save(self, *args, **kwargs):
if self.contentnode_1 == self.contentnode_2:
raise IntegrityError('Cannot self reference as related.')
if RelatedContentRelationship.objects.using(self._state.db).filter(
contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1
):
return
super(RelatedContentRelationship, self).save(*args, **kwargs)
class Invitation(models.Model):
""" Invitation to edit channel """
id = UUIDField(primary_key=True, default=uuid.uuid4)
accepted = models.BooleanField(default=False)
declined = models.BooleanField(default=False)
revoked = models.BooleanField(default=False)
invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.
SET_NULL, null=True, related_name='sent_to')
share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)
email = models.EmailField(max_length=100, null=True)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=
'sent_by', null=True, on_delete=models.CASCADE)
channel = models.ForeignKey('Channel', null=True, related_name=
'pending_editors', on_delete=models.CASCADE)
first_name = models.CharField(max_length=100, blank=True)
last_name = models.CharField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = 'Invitation'
verbose_name_plural = 'Invitations'
def accept(self):
user = User.objects.filter(email__iexact=self.email).first()
if self.channel:
if self.share_mode == VIEW_ACCESS:
self.channel.editors.remove(user)
self.channel.viewers.add(user)
else:
self.channel.viewers.remove(user)
self.channel.editors.add(user)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |
Q(channel__editors=user)).distinct()
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |
Q(channel__editors=user) | Q(channel__viewers=user)).distinct()
class Change(models.Model):
server_rev = models.BigAutoField(primary_key=True)
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
blank=True, on_delete=models.SET_NULL, related_name='changes_by_user')
channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=
models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=
True, on_delete=models.CASCADE, related_name='changes_about_user')
client_rev = models.IntegerField(null=True, blank=True)
session = models.ForeignKey(Session, null=True, blank=True, on_delete=
models.SET_NULL)
table = models.CharField(max_length=32)
change_type = models.IntegerField()
kwargs = JSONField(encoder=JSONEncoder)
applied = models.BooleanField(default=False)
errored = models.BooleanField(default=False)
@classmethod
def _create_from_change(cls, created_by_id=None, channel_id=None,
user_id=None, session_key=None, applied=False, table=None, rev=None,
**data):
change_type = data.pop('type')
if table is None or table not in ALL_TABLES:
raise TypeError(
'table is a required argument for creating changes and must be a valid table name'
)
if change_type is None or change_type not in ALL_CHANGES:
raise TypeError(
'change_type is a required argument for creating changes and must be a valid change type integer'
)
return cls(session_id=session_key, created_by_id=created_by_id,
channel_id=channel_id, user_id=user_id, client_rev=rev, table=
table, change_type=change_type, kwargs=data, applied=applied)
@classmethod
def create_changes(cls, changes, created_by_id=None, session_key=None,
applied=False):
change_models = []
for change in changes:
change_models.append(cls._create_from_change(created_by_id=
created_by_id, session_key=session_key, applied=applied, **
change))
cls.objects.bulk_create(change_models)
return change_models
@classmethod
def create_change(cls, change, created_by_id=None, session_key=None,
applied=False):
obj = cls._create_from_change(created_by_id=created_by_id,
session_key=session_key, applied=applied, **change)
obj.save()
return obj
@classmethod
def serialize(cls, change):
datum = get_attribute(change, ['kwargs']).copy()
datum.update({'server_rev': get_attribute(change, ['server_rev']),
'table': get_attribute(change, ['table']), 'type':
get_attribute(change, ['change_type']), 'channel_id':
get_attribute(change, ['channel_id']), 'user_id': get_attribute
(change, ['user_id']), 'created_by_id': get_attribute(change, [
'created_by_id'])})
return datum
def serialize_to_change_dict(self):
return self.serialize(self)
class TaskResultCustom(object):
"""
Custom fields to add to django_celery_results's TaskResult model
If adding fields to this class, run `makemigrations` then move the generated migration from the
`django_celery_results` app to the `contentcuration` app and override the constructor to change
the app_label. See `0141_add_task_signature` for an example
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='tasks',
on_delete=models.CASCADE, null=True)
channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)
progress = models.IntegerField(null=True, blank=True, validators=[
MinValueValidator(0), MaxValueValidator(100)])
signature = models.CharField(null=True, blank=False, max_length=32)
super_as_dict = TaskResult.as_dict
def as_dict(self):
"""
:return: A dictionary representation
"""
super_dict = self.super_as_dict()
super_dict.update(user_id=self.user_id, channel_id=self.channel_id,
progress=self.progress)
return super_dict
@classmethod
def contribute_to_class(cls, model_class=TaskResult):
"""
Adds fields to model, by default TaskResult
:param model_class: TaskResult model
"""
for field in dir(cls):
if not field.startswith('_') and field not in (
'contribute_to_class', 'Meta'):
model_class.add_to_class(field, getattr(cls, field))
setattr(model_class._meta, 'indexes', getattr(model_class._meta,
'indexes', []) + cls.Meta.indexes)
class Meta:
indexes = [models.Index(fields=['signature'], name=
'task_result_signature_idx', condition=Q(status__in=
celery_states.UNREADY_STATES))]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UUIDField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(*args, **kwargs)
<|reserved_special_token_0|>
def get_default(self):
result = super(UUIDField, self).get_default()
if isinstance(result, uuid.UUID):
result = result.hex
return result
<|reserved_special_token_0|>
class MPTTTreeIDManager(models.Model):
"""
Because MPTT uses plain integers for tree IDs and does not use an auto-incrementing field for them,
the same ID can sometimes be assigned to two trees if two channel create ops happen concurrently.
As we are using this table only for the ID generation, it does not need any fields.
We resolve this by creating a dummy table and using its ID as the tree index to take advantage of the db's
concurrency-friendly way of generating sequential integer IDs. There is a custom migration that ensures
that the number of records (and thus id) matches the max tree ID number when this table gets added.
"""
<|reserved_special_token_0|>
class FileOnDiskStorage(FileSystemStorage):
"""
Overrider FileSystemStorage's default save method to ignore duplicated file.
"""
def get_available_name(self, name):
return name
def _save(self, name, content):
if self.exists(name):
logging.warn('Content copy "%s" already exists!' % name)
return name
return super(FileOnDiskStorage, self)._save(name, content)
class SecretToken(models.Model):
"""Tokens for channels"""
token = models.CharField(max_length=100, unique=True)
is_primary = models.BooleanField(default=False)
@classmethod
def exists(cls, token):
"""
Return true when the token string given by string already exists.
Returns false otherwise.
"""
return cls.objects.filter(token=token).exists()
@classmethod
def generate_new_token(cls):
"""
Creates a primary secret token for the current channel using a proquint
string. Creates a secondary token containing the channel id.
These tokens can be used to refer to the channel to download its content
database.
"""
token = proquint.generate()
TRIALS = 100
for __ in range(TRIALS):
token = proquint.generate()
if SecretToken.exists(token):
continue
break
else:
raise ValueError('Cannot generate new token')
return token
def __str__(self):
return '{}-{}'.format(self.token[:5], self.token[5:])
<|reserved_special_token_0|>
class PermissionCTE(With):
tree_id_fields = ['channel__{}__tree_id'.format(tree_name) for
tree_name in CHANNEL_TREES]
def __init__(self, model, user_id, **kwargs):
queryset = model.objects.filter(user_id=user_id).annotate(tree_id=
Unnest(ArrayRemove(Array(*self.tree_id_fields), None),
output_field=models.IntegerField()))
super(PermissionCTE, self).__init__(queryset=queryset.values(
'user_id', 'channel_id', 'tree_id'), **kwargs)
@classmethod
def editable_channels(cls, user_id):
return PermissionCTE(User.editable_channels.through, user_id, name=
'editable_channels_cte')
@classmethod
def view_only_channels(cls, user_id):
return PermissionCTE(User.view_only_channels.through, user_id, name
='view_only_channels_cte')
def exists(self, *filters):
return Exists(self.queryset().filter(*filters).values('user_id'))
class Channel(models.Model):
""" Permissions come from association with organizations """
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
tagline = models.CharField(max_length=150, blank=True, null=True)
version = models.IntegerField(default=0)
thumbnail = models.TextField(blank=True, null=True)
thumbnail_encoding = JSONField(default=dict)
editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name
='editable_channels', verbose_name='editors', help_text=
'Users with edit rights', blank=True)
viewers = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name
='view_only_channels', verbose_name='viewers', help_text=
'Users with view only rights', blank=True)
language = models.ForeignKey('Language', null=True, blank=True,
related_name='channel_language', on_delete=models.SET_NULL)
trash_tree = models.ForeignKey('ContentNode', null=True, blank=True,
related_name='channel_trash', on_delete=models.SET_NULL)
clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True,
related_name='channel_clipboard', on_delete=models.SET_NULL)
main_tree = models.ForeignKey('ContentNode', null=True, blank=True,
related_name='channel_main', on_delete=models.SET_NULL)
staging_tree = models.ForeignKey('ContentNode', null=True, blank=True,
related_name='channel_staging', on_delete=models.SET_NULL)
chef_tree = models.ForeignKey('ContentNode', null=True, blank=True,
related_name='channel_chef', on_delete=models.SET_NULL)
previous_tree = models.ForeignKey('ContentNode', null=True, blank=True,
related_name='channel_previous', on_delete=models.SET_NULL)
bookmarked_by = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name='bookmarked_channels', verbose_name='bookmarked by')
deleted = models.BooleanField(default=False, db_index=True)
public = models.BooleanField(default=False, db_index=True)
preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)
content_defaults = JSONField(default=dict)
priority = models.IntegerField(default=0, help_text=
'Order to display public channels')
last_published = models.DateTimeField(blank=True, null=True)
secret_tokens = models.ManyToManyField(SecretToken, related_name=
'channels', verbose_name='secret tokens', blank=True)
source_url = models.CharField(max_length=200, blank=True, null=True)
demo_server_url = models.CharField(max_length=200, blank=True, null=True)
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
ricecooker_version = models.CharField(max_length=100, blank=True, null=True
)
published_data = JSONField(default=dict)
icon_encoding = models.TextField(blank=True, null=True)
total_resource_count = models.IntegerField(default=0)
published_kind_count = models.TextField(blank=True, null=True)
published_size = models.FloatField(default=0)
included_languages = models.ManyToManyField('Language', related_name=
'channels', verbose_name='languages', blank=True)
_field_updates = FieldTracker(fields=['description', 'language_id',
'thumbnail', 'name', 'thumbnail_encoding', 'deleted', 'public',
'main_tree_id', 'version'])
@classmethod
def get_editable(cls, user, channel_id):
return cls.filter_edit_queryset(cls.objects.all(), user).get(id=
channel_id)
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit = Exists(User.editable_channels.through.objects.filter(user_id
=user_id, channel_id=OuterRef('id')))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
user_email = not user.is_anonymous and user.email
if user_id:
filters = dict(user_id=user_id, channel_id=OuterRef('id'))
edit = Exists(User.editable_channels.through.objects.filter(**
filters).values('user_id'))
view = Exists(User.view_only_channels.through.objects.filter(**
filters).values('user_id'))
else:
edit = boolean_val(False)
view = boolean_val(False)
queryset = queryset.annotate(edit=edit, view=view)
if user_id and user.is_admin:
return queryset
permission_filter = Q()
if user_id:
pending_channels = Invitation.objects.filter(email=user_email,
revoked=False, declined=False, accepted=False).values_list(
'channel_id', flat=True)
permission_filter = Q(view=True) | Q(edit=True) | Q(deleted=
False, id__in=pending_channels)
return queryset.filter(permission_filter | Q(deleted=False, public=
True))
@classmethod
def get_all_channels(cls):
return cls.objects.select_related('main_tree').prefetch_related(
'editors', 'viewers').distinct()
def resource_size_key(self):
return '{}_resource_size'.format(self.pk)
def get_resource_size(self):
cached_data = cache.get(self.resource_size_key())
if cached_data:
return cached_data
tree_id = self.main_tree.tree_id
files = File.objects.select_related('contentnode', 'assessment_item'
).filter(contentnode__tree_id=tree_id).values('checksum',
'file_size').distinct().aggregate(resource_size=Sum('file_size'))
cache.set(self.resource_size_key(), files['resource_size'] or 0, None)
return files['resource_size'] or 0
def on_create(self):
record_channel_stats(self, None)
if not self.content_defaults:
self.content_defaults = DEFAULT_CONTENT_DEFAULTS
if not self.main_tree:
self.main_tree = ContentNode.objects.create(title=self.name,
kind_id=content_kinds.TOPIC, content_id=self.id, node_id=
self.id, original_channel_id=self.id, source_channel_id=
self.id, changed=True, complete=True)
if settings.DEBUG:
if ContentNode.objects.filter(parent=None, tree_id=self.
main_tree.tree_id).count() != 1:
raise AssertionError
if not self.trash_tree:
self.trash_tree = ContentNode.objects.create(title=self.name,
kind_id=content_kinds.TOPIC, content_id=self.id, node_id=
self.id)
if self.public and (self.main_tree and self.main_tree.published):
delete_public_channel_cache_keys()
def on_update(self):
from contentcuration.utils.user import calculate_user_storage
original_values = self._field_updates.changed()
record_channel_stats(self, original_values)
blacklist = set(['public', 'main_tree_id', 'version'])
if self.main_tree and original_values and any(True for field in
original_values if field not in blacklist):
self.main_tree.changed = True
if 'thumbnail' in original_values and original_values['thumbnail'
] and 'static' not in original_values['thumbnail']:
filename, ext = os.path.splitext(original_values['thumbnail'])
delete_empty_file_reference(filename, ext[1:])
if 'deleted' in original_values:
for editor in self.editors.all():
calculate_user_storage(editor.pk)
if 'deleted' in original_values and not original_values['deleted']:
self.pending_editors.all().delete()
export_db_storage_path = os.path.join(settings.DB_ROOT,
'{channel_id}.sqlite3'.format(channel_id=self.id))
if default_storage.exists(export_db_storage_path):
default_storage.delete(export_db_storage_path)
if self.main_tree:
self.main_tree.published = False
if self.main_tree and self.main_tree._field_updates.changed():
self.main_tree.save()
if 'public' in original_values and (self.main_tree and self.
main_tree.published):
delete_public_channel_cache_keys()
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
super(Channel, self).save(*args, **kwargs)
def get_thumbnail(self):
return get_channel_thumbnail(self)
def has_changes(self):
return self.main_tree.get_descendants(include_self=True).filter(changed
=True).exists()
def get_date_modified(self):
return self.main_tree.get_descendants(include_self=True).aggregate(
last_modified=Max('modified'))['last_modified']
def get_resource_count(self):
return self.main_tree.get_descendants().exclude(kind_id=
content_kinds.TOPIC).order_by('content_id').distinct('content_id'
).count()
def get_human_token(self):
return self.secret_tokens.get(is_primary=True)
def get_channel_id_token(self):
return self.secret_tokens.get(token=self.id)
def make_token(self):
token = self.secret_tokens.create(token=SecretToken.
generate_new_token(), is_primary=True)
self.secret_tokens.get_or_create(token=self.id)
return token
def make_public(self, bypass_signals=False):
"""
Sets the current channel object to be public and viewable by anyone.
If bypass_signals is True, update the model in such a way that we
prevent any model signals from running due to the update.
Returns the same channel object.
"""
if bypass_signals:
self.public = True
Channel.objects.filter(id=self.id).update(public=True)
delete_public_channel_cache_keys()
else:
self.public = True
self.save()
return self
def mark_created(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.
CREATION)
def mark_publishing(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.
PUBLICATION)
self.main_tree.publishing = True
self.main_tree.save()
def mark_deleted(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.
DELETION)
self.deleted = True
self.save()
def mark_recovered(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.
RECOVERY)
self.deleted = False
self.save()
@property
def deletion_history(self):
return self.history.filter(action=channel_history.DELETION)
@property
def publishing_history(self):
return self.history.filter(action=channel_history.PUBLICATION)
@classmethod
def get_public_channels(cls, defer_nonmain_trees=False):
"""
Get all public channels.
If defer_nonmain_trees is True, defer the loading of all
trees except for the main_tree."""
if defer_nonmain_trees:
c = Channel.objects.filter(public=True).exclude(deleted=True
).select_related('main_tree').prefetch_related('editors'
).defer('trash_tree', 'clipboard_tree', 'staging_tree',
'chef_tree', 'previous_tree', 'viewers')
else:
c = Channel.objects.filter(public=True).exclude(deleted=True)
return c
class Meta:
verbose_name = 'Channel'
verbose_name_plural = 'Channels'
indexes = [models.Index(fields=['name'], name=CHANNEL_NAME_INDEX_NAME)]
index_together = [['deleted', 'public']]
<|reserved_special_token_0|>
class ChannelHistory(models.Model):
"""
Model for tracking certain actions performed on a channel
"""
channel = models.ForeignKey('Channel', null=False, blank=False,
related_name='history', on_delete=models.CASCADE)
actor = models.ForeignKey('User', null=False, blank=False, related_name
='channel_history', on_delete=models.CASCADE)
performed = models.DateTimeField(default=timezone.now)
action = models.CharField(max_length=50, choices=channel_history.choices)
@classmethod
def prune(cls):
"""
Prunes history records by keeping the most recent actions for each channel and type,
and deleting all other older actions
"""
keep_ids = cls.objects.distinct('channel_id', 'action').order_by(
'channel_id', 'action', '-performed').values_list('id', flat=True)
cls.objects.exclude(id__in=keep_ids).delete()
class Meta:
verbose_name = 'Channel history'
verbose_name_plural = 'Channel histories'
indexes = [models.Index(fields=['channel_id'], name=
CHANNEL_HISTORY_CHANNEL_INDEX_NAME)]
class UserHistory(models.Model):
"""
Model that stores the user's action history.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=
False, related_name='history', on_delete=models.CASCADE)
action = models.CharField(max_length=32, choices=user_history.choices)
performed_at = models.DateTimeField(default=timezone.now)
class ChannelSet(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
public = models.BooleanField(default=False, db_index=True)
editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name
='channel_sets', verbose_name='editors', help_text=
'Users with edit rights', blank=True)
secret_token = models.ForeignKey('SecretToken', null=True, blank=True,
related_name='channel_sets', on_delete=models.SET_NULL)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
user_id = not user.is_anonymous and user.id
edit = Exists(User.channel_sets.through.objects.filter(user_id=
user_id, channelset_id=OuterRef('id')))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
return cls.filter_edit_queryset(queryset, user)
def get_channels(self):
if self.secret_token:
return self.secret_token.channels.filter(deleted=False)
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
super(ChannelSet, self).save()
def on_create(self):
if not self.secret_token:
self.secret_token = SecretToken.objects.create(token=
SecretToken.generate_new_token())
def delete(self, *args, **kwargs):
super(ChannelSet, self).delete(*args, **kwargs)
if self.secret_token:
self.secret_token.delete()
class ContentTag(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
tag_name = models.CharField(max_length=50)
channel = models.ForeignKey('Channel', related_name='tags', blank=True,
null=True, db_index=True, on_delete=models.SET_NULL)
objects = CustomManager()
def __str__(self):
return self.tag_name
class Meta:
unique_together = ['tag_name', 'channel']
class License(models.Model):
"""
Normalize the license of ContentNode model
"""
license_name = models.CharField(max_length=50)
license_url = models.URLField(blank=True)
license_description = models.TextField(blank=True)
copyright_holder_required = models.BooleanField(default=True)
is_custom = models.BooleanField(default=False)
exists = models.BooleanField(default=False, verbose_name=
'license exists', help_text=
'Tells whether or not a content item is licensed to share')
@classmethod
def validate_name(cls, name):
if cls.objects.filter(license_name=name).count() == 0:
raise ValidationError('License `{}` does not exist'.format(name))
def __str__(self):
return self.license_name
<|reserved_special_token_0|>
class ContentNode(MPTTModel, models.Model):
"""
By default, all nodes have a title and can be used as a topic.
"""
id = UUIDField(primary_key=True, default=uuid.uuid4)
content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=
False, db_index=True)
node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
original_channel_id = UUIDField(primary_key=False, editable=False, null
=True, db_index=True)
source_channel_id = UUIDField(primary_key=False, editable=False, null=True)
original_source_node_id = UUIDField(primary_key=False, editable=False,
null=True, db_index=True)
source_node_id = UUIDField(primary_key=False, editable=False, null=True)
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
title = models.CharField(max_length=200, blank=True)
description = models.TextField(blank=True)
kind = models.ForeignKey('ContentKind', related_name='contentnodes',
db_index=True, null=True, blank=True, on_delete=models.SET_NULL)
license = models.ForeignKey('License', null=True, blank=True, on_delete
=models.SET_NULL)
license_description = models.CharField(max_length=400, null=True, blank
=True)
prerequisite = models.ManyToManyField('self', related_name=
'is_prerequisite_of', through='PrerequisiteContentRelationship',
symmetrical=False, blank=True)
is_related = models.ManyToManyField('self', related_name='relate_to',
through='RelatedContentRelationship', symmetrical=False, blank=True)
language = models.ForeignKey('Language', null=True, blank=True,
related_name='content_language', on_delete=models.SET_NULL)
parent = TreeForeignKey('self', null=True, blank=True, related_name=
'children', db_index=True, on_delete=models.CASCADE)
tags = models.ManyToManyField(ContentTag, symmetrical=False,
related_name='tagged_content', blank=True)
sort_order = models.FloatField(max_length=50, default=1, verbose_name=
'sort order', help_text='Ascending, lowest number shown first')
copyright_holder = models.CharField(max_length=200, null=True, blank=
True, default='', help_text=
'Organization of person who holds the essential rights')
original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=
True, blank=True, related_name='duplicates')
cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=
True, blank=True, related_name='clones')
thumbnail_encoding = models.TextField(blank=True, null=True)
created = models.DateTimeField(default=timezone.now, verbose_name='created'
)
modified = models.DateTimeField(auto_now=True, verbose_name='modified')
published = models.BooleanField(default=False)
publishing = models.BooleanField(default=False)
complete = models.BooleanField(null=True)
changed = models.BooleanField(default=True)
"""
Extra fields for exercises:
- type: mastery model to use to determine completion
- m: m value for M out of N mastery criteria
- n: n value for M out of N mastery criteria
"""
extra_fields = JSONField(default=dict, blank=True, null=True)
author = models.CharField(max_length=200, blank=True, default='',
help_text='Who created this content?', null=True)
aggregator = models.CharField(max_length=200, blank=True, default='',
help_text='Who gathered this content together?', null=True)
provider = models.CharField(max_length=200, blank=True, default='',
help_text='Who distributed this content?', null=True)
role_visibility = models.CharField(max_length=50, choices=roles.choices,
default=roles.LEARNER)
freeze_authoring_data = models.BooleanField(default=False)
grade_levels = models.JSONField(blank=True, null=True)
resource_types = models.JSONField(blank=True, null=True)
learning_activities = models.JSONField(blank=True, null=True)
accessibility_labels = models.JSONField(blank=True, null=True)
categories = models.JSONField(blank=True, null=True)
learner_needs = models.JSONField(blank=True, null=True)
suggested_duration = models.IntegerField(blank=True, null=True,
help_text='Suggested duration for the content node (in seconds)')
objects = CustomContentNodeTreeManager()
_field_updates = FieldTracker()
_permission_filter = Q(tree_id=OuterRef('tree_id'))
@classmethod
def _annotate_channel_id(cls, queryset):
return queryset.annotate(channel_id=Subquery(Channel.objects.filter
(main_tree__tree_id=OuterRef('tree_id')).values_list('id', flat
=True)[:1]))
@classmethod
def filter_by_pk(cls, pk):
"""
When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always
returns a queryset filtered by pk.
When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode
for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If
a ContentNode does not exist for `pk` then an empty queryset is returned.
"""
query = ContentNode.objects.filter(pk=pk)
if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:
tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))
if tree_id:
query = query.filter(tree_id=tree_id)
else:
tree_id = ContentNode.objects.filter(pk=pk).values_list(
'tree_id', flat=True).first()
if tree_id:
cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk),
tree_id, None)
query = query.filter(tree_id=tree_id)
else:
query = query.none()
return query
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.
exists(cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(public=Exists(Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef('tree_id')).values('pk')))
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=
boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit
=edit_cte.exists(cls._permission_filter), view=view_cte.exists(
cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))
@raise_if_unsaved
def get_root(self):
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return super(ContentNode, self).get_root()
@raise_if_unsaved
def get_root_id(self):
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return ContentNode.objects.values_list('pk', flat=True).get(tree_id
=self._mpttfield('tree_id'), parent=None)
def get_tree_data(self, levels=float('inf')):
"""
Returns `levels`-deep tree information starting at current node.
Args:
levels (int): depth of tree hierarchy to return
Returns:
tree (dict): starting with self, with children list containing either
the just the children's `node_id`s or full recusive tree.
"""
if self.kind_id == content_kinds.TOPIC:
node_data = {'title': self.title, 'kind': self.kind_id,
'node_id': self.node_id, 'studio_id': self.id}
children = self.children.all()
if levels > 0:
node_data['children'] = [c.get_tree_data(levels=levels - 1) for
c in children]
return node_data
if self.kind_id == content_kinds.EXERCISE:
return {'title': self.title, 'kind': self.kind_id, 'count':
self.assessment_items.count(), 'node_id': self.node_id,
'studio_id': self.id}
return {'title': self.title, 'kind': self.kind_id, 'file_size':
self.files.values('file_size').aggregate(size=Sum('file_size'))
['size'], 'node_id': self.node_id, 'studio_id': self.id}
def get_original_node(self):
original_node = self.original_node or self
if self.original_channel_id and self.original_source_node_id:
original_tree_id = Channel.objects.select_related('main_tree').get(
pk=self.original_channel_id).main_tree.tree_id
original_node = ContentNode.objects.filter(tree_id=
original_tree_id, node_id=self.original_source_node_id).first(
) or ContentNode.objects.filter(tree_id=original_tree_id,
content_id=self.content_id).first() or self
return original_node
def get_associated_presets(self):
key = 'associated_presets_{}'.format(self.kind_id)
cached_data = cache.get(key)
if cached_data:
return cached_data
presets = list(FormatPreset.objects.filter(kind=self.kind).values())
cache.set(key, presets, None)
return presets
def get_prerequisites(self):
prerequisite_mapping = {}
prerequisites = self.prerequisite.all()
prereqlist = list(prerequisites)
for prereq in prerequisites:
prlist, prereqmapping = prereq.get_prerequisites()
prerequisite_mapping.update({prereq.pk: prereqmapping})
prereqlist.extend(prlist)
return prereqlist, prerequisite_mapping
def get_postrequisites(self):
postrequisite_mapping = {}
postrequisites = self.is_prerequisite_of.all()
postreqlist = list(postrequisites)
for postreq in postrequisites:
prlist, postreqmapping = postreq.get_postrequisites()
postrequisite_mapping.update({postreq.pk: postreqmapping})
postreqlist.extend(prlist)
return postreqlist, postrequisite_mapping
def get_channel_id(self):
if hasattr(self, 'channel_id'):
return self.channel_id
channel = self.get_channel()
if channel:
return channel.id
return None
def get_channel(self):
try:
root = self.get_root()
if not root:
return None
return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=
root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(
previous_tree=root)).first()
except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):
return None
def get_thumbnail(self):
if self.thumbnail_encoding:
thumbnail_data = load_json_string(self.thumbnail_encoding)
if type(thumbnail_data) is dict and thumbnail_data.get('base64'):
return thumbnail_data['base64']
thumbnail = self.files.filter(preset__thumbnail=True).first()
if thumbnail:
return generate_storage_url(str(thumbnail))
return ''
@classmethod
def get_nodes_with_title(cls, title, limit_to_children_of=None):
"""
Returns all ContentNodes with a given title. If limit_to_children_of
is passed in with an id, only look at all the children of the node with that id.
"""
if limit_to_children_of:
root = cls.objects.get(id=limit_to_children_of)
return root.get_descendants().filter(title=title)
return cls.objects.filter(title=title)
def get_details(self, channel_id=None):
"""
Returns information about the node and its children, including total size, languages, files, etc.
:return: A dictionary with detailed statistics and information about the node.
"""
from contentcuration.viewsets.common import SQArrayAgg
from contentcuration.viewsets.common import SQCount
from contentcuration.viewsets.common import SQRelatedArrayAgg
from contentcuration.viewsets.common import SQSum
from contentcuration.viewsets.common import SQJSONBKeyArrayAgg
node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id
).order_by()
descendants = self.get_descendants().values('id')
if channel_id:
channel = Channel.objects.filter(id=channel_id)[0]
else:
channel = self.get_channel()
if not descendants.exists():
data = {'last_update': pytz.utc.localize(datetime.now()).
strftime(settings.DATE_TIME_FORMAT), 'created': self.
created.strftime(settings.DATE_TIME_FORMAT),
'resource_count': 0, 'resource_size': 0, 'includes': {
'coach_content': 0, 'exercises': 0}, 'kind_count': [],
'languages': [], 'accessible_languages': [], 'licenses': [],
'tags': [], 'copyright_holders': [], 'authors': [],
'aggregators': [], 'providers': [], 'sample_pathway': [],
'original_channels': [], 'sample_nodes': [], 'levels': [],
'categories': []}
cache.set('details_{}'.format(self.node_id), json.dumps(data), None
)
return data
resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()
nodes = With(File.objects.filter(contentnode_id__in=Subquery(
resources.values('id'))).values('checksum', 'file_size').
order_by(), name='nodes')
file_query = nodes.queryset().with_cte(nodes).values('checksum',
'file_size').distinct()
l_nodes = With(File.objects.filter(contentnode_id__in=Subquery(
resources.values('id'))).values('language_id', 'preset_id').
order_by(), name='l_nodes')
accessible_languages_query = l_nodes.queryset().filter(preset_id=
format_presets.VIDEO_SUBTITLE).with_cte(l_nodes).values(
'language__native_name').distinct()
tags_query = str(ContentTag.objects.filter(tagged_content__pk__in=
descendants.values_list('pk', flat=True)).values('tag_name').
annotate(count=Count('tag_name')).query).replace('topic', "'topic'"
)
kind_count_query = str(resources.values('kind_id').annotate(count=
Count('kind_id')).query).replace('topic', "'topic'")
node = node.annotate(resource_count=SQCount(resources, field='id'),
resource_size=SQSum(file_query, field='file_size'),
copyright_holders=SQArrayAgg(resources.distinct(
'copyright_holder').order_by('copyright_holder'), field=
'copyright_holder'), authors=SQArrayAgg(resources.distinct(
'author').order_by('author'), field='author'), aggregators=
SQArrayAgg(resources.distinct('aggregator').order_by(
'aggregator'), field='aggregator'), providers=SQArrayAgg(
resources.distinct('provider').order_by('provider'), field=
'provider'), languages=SQRelatedArrayAgg(descendants.exclude(
language=None).distinct('language__native_name').order_by(),
field='language__native_name', fieldname='native_name'),
accessible_languages=SQRelatedArrayAgg(
accessible_languages_query, field='language__native_name',
fieldname='native_name'), licenses=SQRelatedArrayAgg(resources.
exclude(license=None).distinct('license__license_name').
order_by('license__license_name'), field=
'license__license_name', fieldname='license_name'), kind_count=
RawSQL('SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format
(kind_count_query), ()), tags_list=RawSQL(
'SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format(
tags_query), ()), coach_content=SQCount(resources.filter(
role_visibility=roles.COACH), field='id'), exercises=SQCount(
resources.filter(kind_id=content_kinds.EXERCISE), field='id'),
levels=SQJSONBKeyArrayAgg(descendants.exclude(
grade_levels__isnull=True), field='grade_levels'),
all_categories=SQJSONBKeyArrayAgg(descendants.exclude(
categories__isnull=True), field='categories'))
max_level = max(resources.values_list('level', flat=True).order_by(
).distinct() or [0])
m_nodes = With(resources.values('id', 'level', 'tree_id', 'lft').
order_by(), name='m_nodes')
deepest_node_record = m_nodes.queryset().with_cte(m_nodes).filter(level
=max_level).values('id').order_by('tree_id', 'lft').first()
if deepest_node_record:
deepest_node = ContentNode.objects.get(pk=deepest_node_record['id']
)
pathway = list(deepest_node.get_ancestors().order_by().exclude(
parent=None).values('title', 'node_id', 'kind_id').order_by()
) if deepest_node_record else []
sample_nodes = [{'node_id': n.node_id, 'title': n.title,
'description': n.description, 'thumbnail': n.get_thumbnail(),
'kind': n.kind_id} for n in deepest_node.get_siblings(
include_self=True)[0:4]] if deepest_node_record else []
channel_id = channel and channel.id
originals = resources.values('original_channel_id').annotate(count=
Count('original_channel_id')).order_by('original_channel_id')
originals = {c['original_channel_id']: c['count'] for c in originals}
original_channels = Channel.objects.exclude(pk=channel_id).filter(
pk__in=originals.keys(), deleted=False).order_by()
original_channels = [{'id': c.id, 'name': '{}{}'.format(c.name, _(
' (Original)') if channel_id == c.id else ''), 'thumbnail': c.
get_thumbnail(), 'count': originals[c.id]} for c in
original_channels]
node = node.order_by().values('id', 'resource_count',
'resource_size', 'copyright_holders', 'authors', 'aggregators',
'providers', 'languages', 'accessible_languages',
'coach_content', 'licenses', 'tags_list', 'kind_count',
'exercises', 'levels', 'all_categories').first()
for_educators = {'coach_content': node['coach_content'],
'exercises': node['exercises']}
data = {'last_update': pytz.utc.localize(datetime.now()).strftime(
settings.DATE_TIME_FORMAT), 'created': self.created.strftime(
settings.DATE_TIME_FORMAT), 'resource_count': node.get(
'resource_count', 0), 'resource_size': node.get('resource_size',
0), 'includes': for_educators, 'kind_count': node.get(
'kind_count') or [], 'languages': node.get('languages') or [],
'accessible_languages': node.get('accessible_languages') or [],
'licenses': node.get('licenses') or [], 'tags': node.get(
'tags_list') or [], 'original_channels': original_channels,
'sample_pathway': pathway, 'sample_nodes': sample_nodes,
'authors': list(filter(bool, node['authors'])), 'aggregators':
list(filter(bool, node['aggregators'])), 'providers': list(
filter(bool, node['providers'])), 'copyright_holders': list(
filter(bool, node['copyright_holders'])), 'levels': node.get(
'levels') or [], 'categories': node.get('all_categories') or []}
cache.set('details_{}'.format(self.node_id), json.dumps(data), None)
return data
def has_changes(self):
mptt_opts = self._mptt_meta
blacklist = set(['changed', 'modified', 'publishing', mptt_opts.
tree_id_attr, mptt_opts.left_attr, mptt_opts.right_attr,
mptt_opts.level_attr])
original_values = self._field_updates.changed()
return any(True for field in original_values if field not in blacklist)
def recalculate_editors_storage(self):
from contentcuration.utils.user import calculate_user_storage
for editor in self.files.values_list('uploaded_by_id', flat=True
).distinct():
calculate_user_storage(editor)
def mark_complete(self):
errors = []
if not (bool(self.title) or self.parent_id is None):
errors.append('Empty title')
if self.kind_id != content_kinds.TOPIC:
if not self.license:
errors.append('Missing license')
if (self.license and self.license.is_custom and not self.
license_description):
errors.append('Missing license description for custom license')
if (self.license and self.license.copyright_holder_required and
not self.copyright_holder):
errors.append('Missing required copyright holder')
if (self.kind_id != content_kinds.EXERCISE and not self.files.
filter(preset__supplementary=False).exists()):
errors.append('Missing default file')
if self.kind_id == content_kinds.EXERCISE:
if not self.assessment_items.filter(~Q(raw_data='') | ~Q(
question='') & ~Q(answers='[]') & (Q(type=exercises.
INPUT_QUESTION) | Q(answers__iregex='"correct":\\s*true'))
).exists():
errors.append(
'No questions with question text and complete answers')
criterion = self.extra_fields.get('options', {}).get(
'completion_criteria')
if not (self.extra_fields.get('mastery_model') or criterion):
errors.append('Missing mastery criterion')
if criterion:
try:
completion_criteria.validate(criterion, kind=
content_kinds.EXERCISE)
except completion_criteria.ValidationError:
errors.append(
'Mastery criterion is defined but is invalid')
self.complete = not errors
return errors
def make_content_id_unique(self):
"""
If self is NOT an original contentnode (in other words, a copied contentnode)
and a contentnode with same content_id exists then we update self's content_id.
"""
is_node_original = (self.original_source_node_id is None or self.
original_source_node_id == self.node_id)
node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(
content_id=self.content_id)
if not is_node_original and node_same_content_id.exists():
ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.
uuid4().hex)
def on_create(self):
self.changed = True
self.recalculate_editors_storage()
self.set_default_learning_activity()
def on_update(self):
self.changed = self.changed or self.has_changes()
def move_to(self, target, *args, **kwargs):
parent_was_trashtree = self.parent.channel_trash.exists()
super(ContentNode, self).move_to(target, *args, **kwargs)
self.save()
cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.
tree_id, None)
if target.channel_trash.exists() or parent_was_trashtree:
self.recalculate_editors_storage()
def set_default_learning_activity(self):
if self.learning_activities is None:
if self.kind in kind_activity_map:
self.learning_activities = {kind_activity_map[self.kind]: True}
def save(self, skip_lock=False, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
old_parent_id = self._field_updates.changed().get('parent_id')
if self._state.adding and (self.parent_id or self.parent):
same_order = False
elif old_parent_id is DeferredAttribute:
same_order = True
else:
same_order = old_parent_id == self.parent_id
if not same_order:
changed_ids = list(filter(lambda x: x is not None, set([
old_parent_id, self.parent_id])))
else:
changed_ids = []
if not same_order and not skip_lock:
with ContentNode.objects.lock_mptt(*ContentNode.objects.filter(
id__in=[pid for pid in [old_parent_id, self.parent_id] if
pid]).values_list('tree_id', flat=True).distinct()):
super(ContentNode, self).save(*args, **kwargs)
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(
changed=True)
else:
super(ContentNode, self).save(*args, **kwargs)
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(changed
=True)
save.alters_data = True
def delete(self, *args, **kwargs):
parent = self.parent or self._field_updates.changed().get('parent')
if parent:
parent.changed = True
parent.save()
self.recalculate_editors_storage()
with ContentNode.objects.lock_mptt(self.tree_id):
return super(ContentNode, self).delete(*args, **kwargs)
delete.alters_data = True
def copy_to(self, target=None, position='last-child', pk=None, mods=
None, excluded_descendants=None, can_edit_source_channel=None,
batch_size=None, progress_tracker=None):
return self._tree_manager.copy_node(self, target, position, pk,
mods, excluded_descendants, can_edit_source_channel, batch_size,
progress_tracker)[0]
def copy(self):
return self.copy_to()
def is_publishable(self):
return self.complete and self.get_descendants(include_self=True
).exclude(kind_id=content_kinds.TOPIC).exists()
class Meta:
verbose_name = 'Topic'
verbose_name_plural = 'Topics'
indexes = [models.Index(fields=['node_id'], name=NODE_ID_INDEX_NAME
), models.Index(fields=['-modified'], name=
NODE_MODIFIED_DESC_INDEX_NAME)]
class ContentKind(models.Model):
kind = models.CharField(primary_key=True, max_length=200, choices=
content_kinds.choices)
def __str__(self):
return self.kind
class FileFormat(models.Model):
extension = models.CharField(primary_key=True, max_length=40, choices=
file_formats.choices)
mimetype = models.CharField(max_length=200, blank=True)
def __str__(self):
return self.extension
class FormatPreset(models.Model):
id = models.CharField(primary_key=True, max_length=150, choices=
format_presets.choices)
readable_name = models.CharField(max_length=400)
multi_language = models.BooleanField(default=False)
supplementary = models.BooleanField(default=False)
thumbnail = models.BooleanField(default=False)
subtitle = models.BooleanField(default=False)
display = models.BooleanField(default=True)
order = models.IntegerField(default=0)
kind = models.ForeignKey(ContentKind, related_name='format_presets',
null=True, on_delete=models.SET_NULL)
allowed_formats = models.ManyToManyField(FileFormat, blank=True)
def __str__(self):
return self.id
@classmethod
def guess_format_preset(cls, filename):
"""
Guess the format preset of a filename based on its extension.
Return None if format is unknown.
"""
_, ext = os.path.splitext(filename)
ext = ext.lstrip('.')
f = FormatPreset.objects.filter(allowed_formats__extension=ext,
display=True)
return f.first()
@classmethod
def get_preset(cls, preset_name):
"""
Get the FormatPreset object with that exact name.
Returns None if that format preset is not found.
"""
try:
return FormatPreset.objects.get(id=preset_name)
except FormatPreset.DoesNotExist:
return None
class Language(models.Model):
id = models.CharField(max_length=14, primary_key=True)
lang_code = models.CharField(max_length=3, db_index=True)
lang_subcode = models.CharField(max_length=10, db_index=True, blank=
True, null=True)
readable_name = models.CharField(max_length=100, blank=True)
native_name = models.CharField(max_length=100, blank=True)
lang_direction = models.CharField(max_length=3, choices=languages.
LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])
def ietf_name(self):
return '{code}-{subcode}'.format(code=self.lang_code, subcode=self.
lang_subcode) if self.lang_subcode else self.lang_code
def __str__(self):
return self.ietf_name()
<|reserved_special_token_0|>
class AssessmentItem(models.Model):
type = models.CharField(max_length=50, default='multiplechoice')
question = models.TextField(blank=True)
hints = models.TextField(default='[]')
answers = models.TextField(default='[]')
order = models.IntegerField(default=1)
contentnode = models.ForeignKey('ContentNode', related_name=
'assessment_items', blank=True, null=True, db_index=True, on_delete
=models.CASCADE)
assessment_id = UUIDField(primary_key=False, default=uuid.uuid4,
editable=False)
raw_data = models.TextField(blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
randomize = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
objects = CustomManager()
_field_updates = FieldTracker()
def has_changes(self):
return bool(self._field_updates.changed())
class Meta:
indexes = [models.Index(fields=['assessment_id'], name=
ASSESSMENT_ID_INDEX_NAME)]
unique_together = ['contentnode', 'assessment_id']
_permission_filter = Q(tree_id=OuterRef('contentnode__tree_id'))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.
exists(cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(public=Exists(Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef('contentnode__tree_id'
)).values('pk')))
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=
boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit
=edit_cte.exists(cls._permission_filter), view=view_cte.exists(
cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))
def on_create(self):
"""
When an exercise is added to a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def on_update(self):
"""
When an exercise is updated of a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def delete(self, *args, **kwargs):
"""
When an exercise is deleted from a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
return super(AssessmentItem, self).delete(*args, **kwargs)
class SlideshowSlide(models.Model):
contentnode = models.ForeignKey('ContentNode', related_name=
'slideshow_slides', blank=True, null=True, db_index=True, on_delete
=models.CASCADE)
sort_order = models.FloatField(default=1.0)
metadata = JSONField(default=dict)
class StagedFile(models.Model):
"""
Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit
"""
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='staged_files',
blank=True, null=True, on_delete=models.CASCADE)
<|reserved_special_token_0|>
class File(models.Model):
"""
The bottom layer of the contentDB schema, defines the basic building brick for content.
Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...
"""
id = UUIDField(primary_key=True, default=uuid.uuid4)
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
file_on_disk = models.FileField(upload_to=object_storage_name, storage=
default_storage, max_length=500, blank=True)
contentnode = models.ForeignKey(ContentNode, related_name='files',
blank=True, null=True, db_index=True, on_delete=models.CASCADE)
assessment_item = models.ForeignKey(AssessmentItem, related_name=
'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE
)
slideshow_slide = models.ForeignKey(SlideshowSlide, related_name=
'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE
)
file_format = models.ForeignKey(FileFormat, related_name='files', blank
=True, null=True, db_index=True, on_delete=models.SET_NULL)
preset = models.ForeignKey(FormatPreset, related_name='files', blank=
True, null=True, db_index=True, on_delete=models.SET_NULL)
language = models.ForeignKey(Language, related_name='files', blank=True,
null=True, on_delete=models.SET_NULL)
original_filename = models.CharField(max_length=255, blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='files', blank=True,
null=True, on_delete=models.SET_NULL)
modified = models.DateTimeField(auto_now=True, verbose_name='modified',
null=True)
duration = models.IntegerField(blank=True, null=True)
objects = CustomManager()
_permission_filter = Q(tree_id=OuterRef('contentnode__tree_id')) | Q(
tree_id=OuterRef('assessment_item__contentnode__tree_id'))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls.
_permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(edit=True) | Q(uploaded_by=user,
contentnode__isnull=True, assessment_item__isnull=True))
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(public=Exists(Channel.objects.filter(
public=True).filter(Q(main_tree__tree_id=OuterRef(
'contentnode__tree_id')) | Q(main_tree__tree_id=OuterRef(
'assessment_item__contentnode__tree_id'))).values('pk')))
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=
boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit
=edit_cte.exists(cls._permission_filter), view=view_cte.exists(
cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True) |
Q(uploaded_by=user, contentnode__isnull=True,
assessment_item__isnull=True))
class Admin:
pass
def __str__(self):
return '{checksum}{extension}'.format(checksum=self.checksum,
extension='.' + self.file_format.extension)
def filename(self):
"""
Returns just the filename of the File in storage, without the path
e.g. abcd.mp4
"""
return os.path.basename(self.file_on_disk.name)
def update_contentnode_content_id(self):
"""
If the file is attached to a contentnode and is not a thumbnail
then update that contentnode's content_id if it's a copied contentnode.
"""
if self.contentnode and self.preset.thumbnail is False:
self.contentnode.make_content_id_unique()
def on_update(self):
self.modified = timezone.now()
self.update_contentnode_content_id()
def save(self, set_by_file_on_disk=True, *args, **kwargs):
"""
Overrider the default save method.
If the file_on_disk FileField gets passed a content copy:
1. generate the MD5 from the content copy
2. fill the other fields accordingly
"""
from contentcuration.utils.user import calculate_user_storage
if self.file_format_id:
if self.file_format_id not in dict(file_formats.choices):
raise ValidationError('Invalid file_format')
if set_by_file_on_disk and self.file_on_disk:
if self.checksum is None or self.checksum == '':
md5 = hashlib.md5()
for chunk in self.file_on_disk.chunks():
md5.update(chunk)
self.checksum = md5.hexdigest()
if not self.file_size:
self.file_size = self.file_on_disk.size
if not self.file_format_id:
ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')
if ext in list(dict(file_formats.choices).keys()):
self.file_format_id = ext
else:
raise ValueError('Files of type `{}` are not supported.'
.format(ext))
super(File, self).save(*args, **kwargs)
if self.uploaded_by_id:
calculate_user_storage(self.uploaded_by_id)
class Meta:
indexes = [models.Index(fields=['checksum', 'file_size'], name=
FILE_DISTINCT_INDEX_NAME), models.Index(fields=['-modified'],
name=FILE_MODIFIED_DESC_INDEX_NAME)]
constraints = [models.CheckConstraint(check=Q(preset__in=
MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True), name
=FILE_DURATION_CONSTRAINT)]
<|reserved_special_token_0|>
class PrerequisiteContentRelationship(models.Model):
"""
Predefine the prerequisite relationship between two ContentNode objects.
"""
target_node = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)
prerequisite = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)
class Meta:
unique_together = ['target_node', 'prerequisite']
def clean(self, *args, **kwargs):
if self.target_node == self.prerequisite:
raise IntegrityError('Cannot self reference as prerequisite.')
if PrerequisiteContentRelationship.objects.using(self._state.db
).filter(target_node=self.prerequisite, prerequisite=self.
target_node):
raise IntegrityError(
'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'
% (self.target_node, self.prerequisite))
super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
self.full_clean()
super(PrerequisiteContentRelationship, self).save(*args, **kwargs)
def __unicode__(self):
return u'%s' % self.pk
class RelatedContentRelationship(models.Model):
"""
Predefine the related relationship between two ContentNode objects.
"""
contentnode_1 = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_1', on_delete=models.CASCADE)
contentnode_2 = models.ForeignKey(ContentNode, related_name=
'%(app_label)s_%(class)s_2', on_delete=models.CASCADE)
class Meta:
unique_together = ['contentnode_1', 'contentnode_2']
def save(self, *args, **kwargs):
if self.contentnode_1 == self.contentnode_2:
raise IntegrityError('Cannot self reference as related.')
if RelatedContentRelationship.objects.using(self._state.db).filter(
contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1
):
return
super(RelatedContentRelationship, self).save(*args, **kwargs)
class Invitation(models.Model):
""" Invitation to edit channel """
id = UUIDField(primary_key=True, default=uuid.uuid4)
accepted = models.BooleanField(default=False)
declined = models.BooleanField(default=False)
revoked = models.BooleanField(default=False)
invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.
SET_NULL, null=True, related_name='sent_to')
share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)
email = models.EmailField(max_length=100, null=True)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=
'sent_by', null=True, on_delete=models.CASCADE)
channel = models.ForeignKey('Channel', null=True, related_name=
'pending_editors', on_delete=models.CASCADE)
first_name = models.CharField(max_length=100, blank=True)
last_name = models.CharField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = 'Invitation'
verbose_name_plural = 'Invitations'
def accept(self):
user = User.objects.filter(email__iexact=self.email).first()
if self.channel:
if self.share_mode == VIEW_ACCESS:
self.channel.editors.remove(user)
self.channel.viewers.add(user)
else:
self.channel.viewers.remove(user)
self.channel.editors.add(user)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |
Q(channel__editors=user)).distinct()
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |
Q(channel__editors=user) | Q(channel__viewers=user)).distinct()
class Change(models.Model):
server_rev = models.BigAutoField(primary_key=True)
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
blank=True, on_delete=models.SET_NULL, related_name='changes_by_user')
channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=
models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=
True, on_delete=models.CASCADE, related_name='changes_about_user')
client_rev = models.IntegerField(null=True, blank=True)
session = models.ForeignKey(Session, null=True, blank=True, on_delete=
models.SET_NULL)
table = models.CharField(max_length=32)
change_type = models.IntegerField()
kwargs = JSONField(encoder=JSONEncoder)
applied = models.BooleanField(default=False)
errored = models.BooleanField(default=False)
@classmethod
def _create_from_change(cls, created_by_id=None, channel_id=None,
user_id=None, session_key=None, applied=False, table=None, rev=None,
**data):
change_type = data.pop('type')
if table is None or table not in ALL_TABLES:
raise TypeError(
'table is a required argument for creating changes and must be a valid table name'
)
if change_type is None or change_type not in ALL_CHANGES:
raise TypeError(
'change_type is a required argument for creating changes and must be a valid change type integer'
)
return cls(session_id=session_key, created_by_id=created_by_id,
channel_id=channel_id, user_id=user_id, client_rev=rev, table=
table, change_type=change_type, kwargs=data, applied=applied)
@classmethod
def create_changes(cls, changes, created_by_id=None, session_key=None,
applied=False):
change_models = []
for change in changes:
change_models.append(cls._create_from_change(created_by_id=
created_by_id, session_key=session_key, applied=applied, **
change))
cls.objects.bulk_create(change_models)
return change_models
@classmethod
def create_change(cls, change, created_by_id=None, session_key=None,
applied=False):
obj = cls._create_from_change(created_by_id=created_by_id,
session_key=session_key, applied=applied, **change)
obj.save()
return obj
@classmethod
def serialize(cls, change):
datum = get_attribute(change, ['kwargs']).copy()
datum.update({'server_rev': get_attribute(change, ['server_rev']),
'table': get_attribute(change, ['table']), 'type':
get_attribute(change, ['change_type']), 'channel_id':
get_attribute(change, ['channel_id']), 'user_id': get_attribute
(change, ['user_id']), 'created_by_id': get_attribute(change, [
'created_by_id'])})
return datum
def serialize_to_change_dict(self):
return self.serialize(self)
class TaskResultCustom(object):
"""
Custom fields to add to django_celery_results's TaskResult model
If adding fields to this class, run `makemigrations` then move the generated migration from the
`django_celery_results` app to the `contentcuration` app and override the constructor to change
the app_label. See `0141_add_task_signature` for an example
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='tasks',
on_delete=models.CASCADE, null=True)
channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)
progress = models.IntegerField(null=True, blank=True, validators=[
MinValueValidator(0), MaxValueValidator(100)])
signature = models.CharField(null=True, blank=False, max_length=32)
super_as_dict = TaskResult.as_dict
def as_dict(self):
"""
:return: A dictionary representation
"""
super_dict = self.super_as_dict()
super_dict.update(user_id=self.user_id, channel_id=self.channel_id,
progress=self.progress)
return super_dict
@classmethod
def contribute_to_class(cls, model_class=TaskResult):
"""
Adds fields to model, by default TaskResult
:param model_class: TaskResult model
"""
for field in dir(cls):
if not field.startswith('_') and field not in (
'contribute_to_class', 'Meta'):
model_class.add_to_class(field, getattr(cls, field))
setattr(model_class._meta, 'indexes', getattr(model_class._meta,
'indexes', []) + cls.Meta.indexes)
class Meta:
indexes = [models.Index(fields=['signature'], name=
'task_result_signature_idx', condition=Q(status__in=
celery_states.UNREADY_STATES))]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import hashlib
import json
import logging
import os
import urllib.parse
import uuid
from datetime import datetime
import pytz
from celery import states as celery_states
from django.conf import settings
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.contrib.sessions.models import Session
from django.core.cache import cache
from django.core.exceptions import MultipleObjectsReturned
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import PermissionDenied
from django.core.exceptions import ValidationError
from django.core.files.storage import default_storage
from django.core.files.storage import FileSystemStorage
from django.core.mail import send_mail
from django.core.validators import MaxValueValidator
from django.core.validators import MinValueValidator
from django.db import IntegrityError
from django.db import models
from django.db.models import Count
from django.db.models import Exists
from django.db.models import F
from django.db.models import Index
from django.db.models import JSONField
from django.db.models import Max
from django.db.models import OuterRef
from django.db.models import Q
from django.db.models import Subquery
from django.db.models import Sum
from django.db.models import UUIDField as DjangoUUIDField
from django.db.models import Value
from django.db.models.expressions import ExpressionList
from django.db.models.expressions import RawSQL
from django.db.models.functions import Lower
from django.db.models.indexes import IndexExpression
from django.db.models.query_utils import DeferredAttribute
from django.db.models.sql import Query
from django.dispatch import receiver
from django.utils import timezone
from django.utils.translation import gettext as _
from django_celery_results.models import TaskResult
from django_cte import With
from le_utils import proquint
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import file_formats
from le_utils.constants import format_presets
from le_utils.constants import languages
from le_utils.constants import roles
from model_utils import FieldTracker
from mptt.models import MPTTModel
from mptt.models import raise_if_unsaved
from mptt.models import TreeForeignKey
from postmark.core import PMMailInactiveRecipientException
from postmark.core import PMMailUnauthorizedException
from rest_framework.authtoken.models import Token
from rest_framework.fields import get_attribute
from rest_framework.utils.encoders import JSONEncoder
from contentcuration.constants import channel_history
from contentcuration.constants import completion_criteria
from contentcuration.constants import user_history
from contentcuration.constants.contentnode import kind_activity_map
from contentcuration.db.models.expressions import Array
from contentcuration.db.models.functions import ArrayRemove
from contentcuration.db.models.functions import Unnest
from contentcuration.db.models.manager import CustomContentNodeTreeManager
from contentcuration.db.models.manager import CustomManager
from contentcuration.statistics import record_channel_stats
from contentcuration.utils.cache import delete_public_channel_cache_keys
from contentcuration.utils.parser import load_json_string
from contentcuration.viewsets.sync.constants import ALL_CHANGES
from contentcuration.viewsets.sync.constants import ALL_TABLES
EDIT_ACCESS = "edit"
VIEW_ACCESS = "view"
DEFAULT_CONTENT_DEFAULTS = {
'license': None,
'language': None,
'author': None,
'aggregator': None,
'provider': None,
'copyright_holder': None,
'license_description': None,
'mastery_model': exercises.NUM_CORRECT_IN_A_ROW_5,
'm_value': 5,
'n_value': 5,
'auto_derive_video_thumbnail': True,
'auto_derive_audio_thumbnail': True,
'auto_derive_document_thumbnail': True,
'auto_derive_html5_thumbnail': True,
'auto_derive_exercise_thumbnail': True,
'auto_randomize_questions': True,
}
DEFAULT_USER_PREFERENCES = json.dumps(DEFAULT_CONTENT_DEFAULTS, ensure_ascii=False)
def to_pk(model_or_pk):
if isinstance(model_or_pk, models.Model):
return model_or_pk.pk
return model_or_pk
class UserManager(BaseUserManager):
def create_user(self, email, first_name, last_name, password=None):
if not email:
raise ValueError('Email address not specified')
new_user = self.model(
email=self.normalize_email(email),
)
new_user.set_password(password)
new_user.first_name = first_name
new_user.last_name = last_name
new_user.save(using=self._db)
return new_user
def create_superuser(self, email, first_name, last_name, password=None):
new_user = self.create_user(email, first_name, last_name, password=password)
new_user.is_admin = True
new_user.save(using=self._db)
return new_user
class UniqueActiveUserIndex(Index):
def create_sql(self, model, schema_editor, using='', **kwargs):
"""
This is a vendored and modified version of the Django create_sql method
We do this so that we can monkey patch in the unique index statement onto the schema_editor
while we create the statement for this index, and then revert it to normal.
We should remove this as soon as Django natively supports UniqueConstraints with Expressions.
This should hopefully be the case in Django 3.3.
"""
include = [model._meta.get_field(field_name).column for field_name in self.include]
condition = self._get_condition_sql(model, schema_editor)
if self.expressions:
index_expressions = []
for expression in self.expressions:
index_expression = IndexExpression(expression)
index_expression.set_wrapper_classes(schema_editor.connection)
index_expressions.append(index_expression)
expressions = ExpressionList(*index_expressions).resolve_expression(
Query(model, alias_cols=False),
)
fields = None
col_suffixes = None
else:
fields = [
model._meta.get_field(field_name)
for field_name, _ in self.fields_orders
]
col_suffixes = [order[1] for order in self.fields_orders]
expressions = None
sql = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)%(include)s%(condition)s"
# Store the normal SQL statement for indexes
old_create_index_sql = schema_editor.sql_create_index
# Replace it with our own unique index so that this index actually adds a constraint
schema_editor.sql_create_index = sql
# Generate the SQL staetment that we want to return
return_statement = schema_editor._create_index_sql(
model, fields=fields, name=self.name, using=using,
db_tablespace=self.db_tablespace, col_suffixes=col_suffixes,
opclasses=self.opclasses, condition=condition, include=include,
expressions=expressions, **kwargs,
)
# Reinstate the previous index SQL statement so that we have done no harm
schema_editor.sql_create_index = old_create_index_sql
# Return our SQL statement
return return_statement
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=100, unique=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField('active', default=False,
help_text='Designates whether this user should be treated as active.')
is_staff = models.BooleanField('staff status', default=False,
help_text='Designates whether the user can log into this admin site.')
date_joined = models.DateTimeField('date joined', default=timezone.now)
clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='user_clipboard', on_delete=models.SET_NULL)
preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)
disk_space = models.FloatField(default=524288000, help_text='How many bytes a user can upload')
disk_space_used = models.FloatField(default=0, help_text='How many bytes a user has uploaded')
information = JSONField(null=True)
content_defaults = JSONField(default=dict)
policies = JSONField(default=dict, null=True)
feature_flags = JSONField(default=dict, null=True)
deleted = models.BooleanField(default=False, db_index=True)
_field_updates = FieldTracker(fields=[
# Field to watch for changes
"disk_space",
])
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
def __unicode__(self):
return self.email
def delete(self):
"""
Soft deletes the user account.
"""
self.deleted = True
# Deactivate the user to disallow authentication and also
# to let the user verify the email again after recovery.
self.is_active = False
self.save()
self.history.create(user_id=self.pk, action=user_history.DELETION)
def recover(self):
"""
Use this method when we want to recover a user.
"""
self.deleted = False
self.save()
self.history.create(user_id=self.pk, action=user_history.RECOVERY)
def hard_delete_user_related_data(self):
"""
Hard delete all user related data. But keeps the user record itself intact.
User related data that gets hard deleted are:
- sole editor non-public channels.
- sole editor non-public channelsets.
- sole editor non-public channels' content nodes and its underlying files that are not
used by any other channel.
- all user invitations.
"""
from contentcuration.viewsets.common import SQCount
# Hard delete invitations associated to this account.
self.sent_to.all().delete()
self.sent_by.all().delete()
editable_channels_user_query = (
User.objects.filter(editable_channels__id=OuterRef('id'))
.values_list('id', flat=True)
.distinct()
)
non_public_channels_sole_editor = self.editable_channels.annotate(num_editors=SQCount(
editable_channels_user_query, field="id")).filter(num_editors=1, public=False)
# Point sole editor non-public channels' contentnodes to orphan tree to let
# our garbage collection delete the nodes and underlying files.
ContentNode._annotate_channel_id(ContentNode.objects).filter(channel_id__in=list(
non_public_channels_sole_editor.values_list("id", flat=True))).update(parent_id=settings.ORPHANAGE_ROOT_ID)
# Hard delete non-public channels associated with this user (if user is the only editor).
non_public_channels_sole_editor.delete()
# Hard delete non-public channel collections associated with this user (if user is the only editor).
user_query = (
User.objects.filter(channel_sets__id=OuterRef('id'))
.values_list('id', flat=True)
.distinct()
)
self.channel_sets.annotate(num_editors=SQCount(user_query, field="id")).filter(num_editors=1, public=False).delete()
# Create history!
self.history.create(user_id=self.pk, action=user_history.RELATED_DATA_HARD_DELETION)
def can_edit(self, channel_id):
return Channel.filter_edit_queryset(Channel.objects.all(), self).filter(pk=channel_id).exists()
def check_space(self, size, checksum):
if self.is_admin:
return True
active_files = self.get_user_active_files()
if active_files.filter(checksum=checksum).exists():
return True
space = self.get_available_space(active_files=active_files)
if space < size:
raise PermissionDenied(_("Not enough space. Check your storage under Settings page."))
def check_channel_space(self, channel):
active_files = self.get_user_active_files()
staging_tree_id = channel.staging_tree.tree_id
channel_files = self.files\
.filter(contentnode__tree_id=staging_tree_id)\
.values('checksum')\
.distinct()\
.exclude(checksum__in=active_files.values_list('checksum', flat=True))
staged_size = float(channel_files.aggregate(used=Sum('file_size'))['used'] or 0)
if self.get_available_space(active_files=active_files) < (staged_size):
raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))
def check_staged_space(self, size, checksum):
if self.staged_files.filter(checksum=checksum).exists():
return True
space = self.get_available_staged_space()
if space < size:
raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))
def get_available_staged_space(self):
space_used = self.staged_files.values('checksum').distinct().aggregate(size=Sum("file_size"))['size'] or 0
return float(max(self.disk_space - space_used, 0))
def get_available_space(self, active_files=None):
return float(max(self.disk_space - self.get_space_used(active_files=active_files), 0))
def get_user_active_trees(self):
return self.editable_channels.exclude(deleted=True)\
.values(tree_id=F("main_tree__tree_id"))
def get_user_active_files(self):
cte = With(self.get_user_active_trees().distinct())
return cte.join(self.files.get_queryset(), contentnode__tree_id=cte.col.tree_id)\
.with_cte(cte)\
.values('checksum')\
.distinct()
def get_space_used(self, active_files=None):
active_files = active_files or self.get_user_active_files()
files = active_files.aggregate(total_used=Sum('file_size'))
return float(files['total_used'] or 0)
def set_space_used(self):
self.disk_space_used = self.get_space_used()
self.save()
return self.disk_space_used
def get_space_used_by_kind(self):
active_files = self.get_user_active_files()
files = active_files.values('preset__kind_id')\
.annotate(space=Sum('file_size'))\
.order_by()
kind_dict = {}
for item in files:
kind_dict[item['preset__kind_id']] = item['space']
return kind_dict
def email_user(self, subject, message, from_email=None, **kwargs):
try:
# msg = EmailMultiAlternatives(subject, message, from_email, [self.email])
# msg.attach_alternative(kwargs["html_message"],"text/html")
# msg.send()
send_mail(subject, message, from_email, [self.email], **kwargs)
except (PMMailInactiveRecipientException, PMMailUnauthorizedException) as e:
logging.error(str(e))
def clean(self):
super(User, self).clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""
Returns the short name for the user.
"""
return self.first_name
def get_token(self):
token, _ = Token.objects.get_or_create(user=self)
return token.key
def save(self, *args, **kwargs):
from contentcuration.utils.user import calculate_user_storage
super(User, self).save(*args, **kwargs)
if 'disk_space' in self._field_updates.changed():
calculate_user_storage(self.pk)
changed = False
if not self.content_defaults:
self.content_defaults = DEFAULT_CONTENT_DEFAULTS
changed = True
if not self.clipboard_tree:
self.clipboard_tree = ContentNode.objects.create(title=self.email + " clipboard", kind_id=content_kinds.TOPIC)
self.clipboard_tree.save()
changed = True
if changed:
self.save()
class Meta:
verbose_name = "User"
verbose_name_plural = "Users"
indexes = [
UniqueActiveUserIndex(Lower('email'), condition=Q(is_active=True), name="contentcura_email_d4d492_idx")
]
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
# all shared editors
all_editable = User.editable_channels.through.objects.all()
editable = all_editable.filter(
channel_id__in=all_editable.filter(user_id=user.pk).values_list("channel_id", flat=True)
)
# all shared viewers
all_view_only = User.view_only_channels.through.objects.all()
view_only = all_view_only.filter(
channel_id__in=all_view_only.filter(user_id=user.pk).values_list("channel_id", flat=True)
)
return queryset.filter(
Q(pk=user.pk)
| Q(pk__in=editable.values_list("user_id", flat=True))
| Q(pk__in=view_only.values_list("user_id", flat=True))
)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(pk=user.pk)
@classmethod
def get_for_email(cls, email, deleted=False, **filters):
"""
Returns the appropriate User record given an email, ordered by:
- those with is_active=True first, which there should only ever be one
- otherwise by ID DESC so most recent inactive shoud be returned
Filters out deleted User records by default. To include both deleted and
undeleted user records pass None to the deleted argument.
:param email: A string of the user's email
:param filters: Additional filters to filter the User queryset
:return: User or None
"""
user_qs = User.objects.filter(email__iexact=email.strip())
if deleted is not None:
user_qs = user_qs.filter(deleted=deleted)
return user_qs.filter(**filters).order_by("-is_active", "-id").first()
class UUIDField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(*args, **kwargs)
def prepare_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
def get_default(self):
result = super(UUIDField, self).get_default()
if isinstance(result, uuid.UUID):
result = result.hex
return result
def to_python(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
class MPTTTreeIDManager(models.Model):
"""
Because MPTT uses plain integers for tree IDs and does not use an auto-incrementing field for them,
the same ID can sometimes be assigned to two trees if two channel create ops happen concurrently.
As we are using this table only for the ID generation, it does not need any fields.
We resolve this by creating a dummy table and using its ID as the tree index to take advantage of the db's
concurrency-friendly way of generating sequential integer IDs. There is a custom migration that ensures
that the number of records (and thus id) matches the max tree ID number when this table gets added.
"""
def file_on_disk_name(instance, filename):
"""
Create a name spaced file path from the File obejct's checksum property.
This path will be used to store the content copy
:param instance: File (content File model)
:param filename: str
:return: str
"""
return generate_file_on_disk_name(instance.checksum, filename)
def generate_file_on_disk_name(checksum, filename):
""" Separated from file_on_disk_name to allow for simple way to check if has already exists """
h = checksum
basename, ext = os.path.splitext(filename)
directory = os.path.join(settings.STORAGE_ROOT, h[0], h[1])
if not os.path.exists(directory):
os.makedirs(directory)
return os.path.join(directory, h + ext.lower())
def object_storage_name(instance, filename):
"""
Create a name spaced file path from the File obejct's checksum property.
This path will be used to store the content copy
:param instance: File (content File model)
:param filename: str
:return: str
"""
default_ext = ''
if instance.file_format_id:
default_ext = '.{}'.format(instance.file_format_id)
return generate_object_storage_name(instance.checksum, filename, default_ext)
def generate_object_storage_name(checksum, filename, default_ext=''):
""" Separated from file_on_disk_name to allow for simple way to check if has already exists """
h = checksum
basename, actual_ext = os.path.splitext(filename)
ext = actual_ext if actual_ext else default_ext
# Use / instead of os.path.join as Windows makes this \\
directory = "/".join([settings.STORAGE_ROOT, h[0], h[1]])
return os.path.join(directory, h + ext.lower())
def generate_storage_url(filename, request=None, *args):
"""
Generate a storage URL for the given content filename.
"""
path = generate_object_storage_name(os.path.splitext(filename)[0], filename)
# There are three scenarios where Studio might be run as:
#
# 1. In normal kubernetes, nginx will proxy for us. We'll know we're in kubernetes when the
# environment variable RUN_MODE=k8s
#
# 2. In Docker Compose and bare metal runserver, we'll be running in runserver, and minio
# will be exposed in port 9000 in the host's localhost network.
# Note (aron): returning the true storage URL (e.g. https://storage.googleapis.com/storage/a.mp4)
# isn't too important, because we have CDN in front of our servers, so it should be cached.
# But change the logic here in case there is a potential for bandwidth and latency improvement.
# Detect our current state first
run_mode = os.getenv("RUN_MODE")
# if we're running inside k8s, then just serve the normal /content/{storage,databases} URL,
# and let nginx handle proper proxying.
if run_mode == "k8s":
url = "/content/{path}".format(
path=path,
)
# if we're in docker-compose or in baremetal, just return the object storage URL as localhost:9000
elif run_mode == "docker-compose" or run_mode is None:
# generate the minio storage URL, so we can get the GET parameters that give everyone
# access even if they don't need to log in
params = urllib.parse.urlparse(default_storage.url(path)).query
host = "localhost"
port = 9000 # hardcoded to the default minio IP address
url = "http://{host}:{port}/{bucket}/{path}?{params}".format(
host=host,
port=port,
bucket=settings.AWS_S3_BUCKET_NAME,
path=path,
params=params,
)
return url
class FileOnDiskStorage(FileSystemStorage):
"""
Overrider FileSystemStorage's default save method to ignore duplicated file.
"""
def get_available_name(self, name):
return name
def _save(self, name, content):
if self.exists(name):
# if the file exists, do not call the superclasses _save method
logging.warn('Content copy "%s" already exists!' % name)
return name
return super(FileOnDiskStorage, self)._save(name, content)
class SecretToken(models.Model):
"""Tokens for channels"""
token = models.CharField(max_length=100, unique=True)
is_primary = models.BooleanField(default=False)
@classmethod
def exists(cls, token):
"""
Return true when the token string given by string already exists.
Returns false otherwise.
"""
return cls.objects.filter(token=token).exists()
@classmethod
def generate_new_token(cls):
"""
Creates a primary secret token for the current channel using a proquint
string. Creates a secondary token containing the channel id.
These tokens can be used to refer to the channel to download its content
database.
"""
token = proquint.generate()
# Try 100 times to generate a unique token.
TRIALS = 100
for __ in range(TRIALS):
token = proquint.generate()
if SecretToken.exists(token):
continue
break
# after TRIALS attempts and we didn't get a unique token,
# just raise an error.
# See https://stackoverflow.com/a/9980160 on what for-else loop does.
else:
raise ValueError("Cannot generate new token")
# We found a unique token! Save it
return token
def __str__(self):
return "{}-{}".format(self.token[:5], self.token[5:])
def get_channel_thumbnail(channel):
if not isinstance(channel, dict):
channel = channel.__dict__
if channel.get("thumbnail_encoding"):
thumbnail_data = channel.get("thumbnail_encoding")
if thumbnail_data.get("base64"):
return thumbnail_data["base64"]
if channel.get("thumbnail") and 'static' not in channel.get("thumbnail"):
return generate_storage_url(channel.get("thumbnail"))
return '/static/img/kolibri_placeholder.png'
CHANNEL_NAME_INDEX_NAME = "channel_name_idx"
# A list of all the FKs from Channel object
# to ContentNode trees
# used for permissions filtering
CHANNEL_TREES = (
"main_tree",
"chef_tree",
"trash_tree",
"staging_tree",
"previous_tree",
)
def boolean_val(val):
return Value(val, output_field=models.BooleanField())
class PermissionCTE(With):
tree_id_fields = [
"channel__{}__tree_id".format(tree_name)
for tree_name in CHANNEL_TREES
]
def __init__(self, model, user_id, **kwargs):
queryset = model.objects.filter(user_id=user_id)\
.annotate(
tree_id=Unnest(ArrayRemove(Array(*self.tree_id_fields), None), output_field=models.IntegerField())
)
super(PermissionCTE, self).__init__(queryset=queryset.values("user_id", "channel_id", "tree_id"), **kwargs)
@classmethod
def editable_channels(cls, user_id):
return PermissionCTE(User.editable_channels.through, user_id, name="editable_channels_cte")
@classmethod
def view_only_channels(cls, user_id):
return PermissionCTE(User.view_only_channels.through, user_id, name="view_only_channels_cte")
def exists(self, *filters):
return Exists(self.queryset().filter(*filters).values("user_id"))
class Channel(models.Model):
""" Permissions come from association with organizations """
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
tagline = models.CharField(max_length=150, blank=True, null=True)
version = models.IntegerField(default=0)
thumbnail = models.TextField(blank=True, null=True)
thumbnail_encoding = JSONField(default=dict)
editors = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='editable_channels',
verbose_name="editors",
help_text="Users with edit rights",
blank=True,
)
viewers = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='view_only_channels',
verbose_name="viewers",
help_text="Users with view only rights",
blank=True,
)
language = models.ForeignKey('Language', null=True, blank=True, related_name='channel_language', on_delete=models.SET_NULL)
trash_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_trash', on_delete=models.SET_NULL)
clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_clipboard', on_delete=models.SET_NULL)
main_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_main', on_delete=models.SET_NULL)
staging_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_staging', on_delete=models.SET_NULL)
chef_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_chef', on_delete=models.SET_NULL)
previous_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_previous', on_delete=models.SET_NULL)
bookmarked_by = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='bookmarked_channels',
verbose_name="bookmarked by",
)
deleted = models.BooleanField(default=False, db_index=True)
public = models.BooleanField(default=False, db_index=True)
preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)
content_defaults = JSONField(default=dict)
priority = models.IntegerField(default=0, help_text="Order to display public channels")
last_published = models.DateTimeField(blank=True, null=True)
secret_tokens = models.ManyToManyField(
SecretToken,
related_name='channels',
verbose_name="secret tokens",
blank=True,
)
source_url = models.CharField(max_length=200, blank=True, null=True)
demo_server_url = models.CharField(max_length=200, blank=True, null=True)
# Fields specific to content generated by Ricecooker
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
ricecooker_version = models.CharField(max_length=100, blank=True, null=True)
# Fields to calculate when channel is published
published_data = JSONField(default=dict)
icon_encoding = models.TextField(blank=True, null=True)
total_resource_count = models.IntegerField(default=0)
published_kind_count = models.TextField(blank=True, null=True)
published_size = models.FloatField(default=0)
included_languages = models.ManyToManyField(
"Language",
related_name='channels',
verbose_name="languages",
blank=True,
)
_field_updates = FieldTracker(fields=[
# Field to watch for changes
"description",
"language_id",
"thumbnail",
"name",
"thumbnail_encoding",
# watch these fields for changes
# but exclude them from setting changed
# on the main tree
"deleted",
"public",
"main_tree_id",
"version",
])
@classmethod
def get_editable(cls, user, channel_id):
return cls.filter_edit_queryset(cls.objects.all(), user).get(id=channel_id)
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
# it won't return anything
if not user_id:
return queryset.none()
edit = Exists(User.editable_channels.through.objects.filter(user_id=user_id, channel_id=OuterRef("id")))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
user_email = not user.is_anonymous and user.email
if user_id:
filters = dict(user_id=user_id, channel_id=OuterRef("id"))
edit = Exists(User.editable_channels.through.objects.filter(**filters).values("user_id"))
view = Exists(User.view_only_channels.through.objects.filter(**filters).values("user_id"))
else:
edit = boolean_val(False)
view = boolean_val(False)
queryset = queryset.annotate(
edit=edit,
view=view,
)
if user_id and user.is_admin:
return queryset
permission_filter = Q()
if user_id:
pending_channels = Invitation.objects.filter(email=user_email, revoked=False, declined=False, accepted=False).values_list(
"channel_id", flat=True
)
permission_filter = (
Q(view=True) | Q(edit=True) | Q(deleted=False, id__in=pending_channels)
)
return queryset.filter(permission_filter | Q(deleted=False, public=True))
@classmethod
def get_all_channels(cls):
return cls.objects.select_related('main_tree').prefetch_related('editors', 'viewers').distinct()
def resource_size_key(self):
return "{}_resource_size".format(self.pk)
# Might be good to display resource size, but need to improve query time first
def get_resource_size(self):
cached_data = cache.get(self.resource_size_key())
if cached_data:
return cached_data
tree_id = self.main_tree.tree_id
files = File.objects.select_related('contentnode', 'assessment_item')\
.filter(contentnode__tree_id=tree_id)\
.values('checksum', 'file_size')\
.distinct()\
.aggregate(resource_size=Sum('file_size'))
cache.set(self.resource_size_key(), files['resource_size'] or 0, None)
return files['resource_size'] or 0
def on_create(self):
record_channel_stats(self, None)
if not self.content_defaults:
self.content_defaults = DEFAULT_CONTENT_DEFAULTS
if not self.main_tree:
self.main_tree = ContentNode.objects.create(
title=self.name,
kind_id=content_kinds.TOPIC,
content_id=self.id,
node_id=self.id,
original_channel_id=self.id,
source_channel_id=self.id,
changed=True,
complete=True,
)
# Ensure that locust or unit tests raise if there are any concurrency issues with tree ids.
if settings.DEBUG:
if ContentNode.objects.filter(parent=None, tree_id=self.main_tree.tree_id).count() != 1:
raise AssertionError
if not self.trash_tree:
self.trash_tree = ContentNode.objects.create(
title=self.name,
kind_id=content_kinds.TOPIC,
content_id=self.id,
node_id=self.id,
)
# if this change affects the published channel list, clear the channel cache
if self.public and (self.main_tree and self.main_tree.published):
delete_public_channel_cache_keys()
def on_update(self):
from contentcuration.utils.user import calculate_user_storage
original_values = self._field_updates.changed()
record_channel_stats(self, original_values)
blacklist = set([
"public",
"main_tree_id",
"version",
])
if self.main_tree and original_values and any((True for field in original_values if field not in blacklist)):
# Changing channel metadata should also mark main_tree as changed
self.main_tree.changed = True
# Check if original thumbnail is no longer referenced
if "thumbnail" in original_values and original_values["thumbnail"] and 'static' not in original_values["thumbnail"]:
filename, ext = os.path.splitext(original_values["thumbnail"])
delete_empty_file_reference(filename, ext[1:])
# Refresh storage for all editors on the channel
if "deleted" in original_values:
for editor in self.editors.all():
calculate_user_storage(editor.pk)
# Delete db if channel has been deleted and mark as unpublished
if "deleted" in original_values and not original_values["deleted"]:
self.pending_editors.all().delete()
export_db_storage_path = os.path.join(settings.DB_ROOT, "{channel_id}.sqlite3".format(channel_id=self.id))
if default_storage.exists(export_db_storage_path):
default_storage.delete(export_db_storage_path)
if self.main_tree:
self.main_tree.published = False
if self.main_tree and self.main_tree._field_updates.changed():
self.main_tree.save()
# if this change affects the published channel list, clear the channel cache
if "public" in original_values and (self.main_tree and self.main_tree.published):
delete_public_channel_cache_keys()
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
super(Channel, self).save(*args, **kwargs)
def get_thumbnail(self):
return get_channel_thumbnail(self)
def has_changes(self):
return self.main_tree.get_descendants(include_self=True).filter(changed=True).exists()
def get_date_modified(self):
return self.main_tree.get_descendants(include_self=True).aggregate(last_modified=Max('modified'))['last_modified']
def get_resource_count(self):
return self.main_tree.get_descendants().exclude(kind_id=content_kinds.TOPIC).order_by('content_id').distinct('content_id').count()
def get_human_token(self):
return self.secret_tokens.get(is_primary=True)
def get_channel_id_token(self):
return self.secret_tokens.get(token=self.id)
def make_token(self):
token = self.secret_tokens.create(token=SecretToken.generate_new_token(), is_primary=True)
self.secret_tokens.get_or_create(token=self.id)
return token
def make_public(self, bypass_signals=False):
"""
Sets the current channel object to be public and viewable by anyone.
If bypass_signals is True, update the model in such a way that we
prevent any model signals from running due to the update.
Returns the same channel object.
"""
if bypass_signals:
self.public = True # set this attribute still, so the object will be updated
Channel.objects.filter(id=self.id).update(public=True)
# clear the channel cache
delete_public_channel_cache_keys()
else:
self.public = True
self.save()
return self
def mark_created(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.CREATION)
def mark_publishing(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.PUBLICATION)
self.main_tree.publishing = True
self.main_tree.save()
def mark_deleted(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.DELETION)
self.deleted = True
self.save()
def mark_recovered(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.RECOVERY)
self.deleted = False
self.save()
@property
def deletion_history(self):
return self.history.filter(action=channel_history.DELETION)
@property
def publishing_history(self):
return self.history.filter(action=channel_history.PUBLICATION)
@classmethod
def get_public_channels(cls, defer_nonmain_trees=False):
"""
Get all public channels.
If defer_nonmain_trees is True, defer the loading of all
trees except for the main_tree."""
if defer_nonmain_trees:
c = (Channel.objects
.filter(public=True)
.exclude(deleted=True)
.select_related('main_tree')
.prefetch_related('editors')
.defer('trash_tree', 'clipboard_tree', 'staging_tree', 'chef_tree', 'previous_tree', 'viewers'))
else:
c = Channel.objects.filter(public=True).exclude(deleted=True)
return c
class Meta:
verbose_name = "Channel"
verbose_name_plural = "Channels"
indexes = [
models.Index(fields=["name"], name=CHANNEL_NAME_INDEX_NAME),
]
index_together = [
["deleted", "public"]
]
CHANNEL_HISTORY_CHANNEL_INDEX_NAME = "idx_channel_history_channel_id"
class ChannelHistory(models.Model):
"""
Model for tracking certain actions performed on a channel
"""
channel = models.ForeignKey('Channel', null=False, blank=False, related_name='history', on_delete=models.CASCADE)
actor = models.ForeignKey('User', null=False, blank=False, related_name='channel_history', on_delete=models.CASCADE)
performed = models.DateTimeField(default=timezone.now)
action = models.CharField(max_length=50, choices=channel_history.choices)
@classmethod
def prune(cls):
"""
Prunes history records by keeping the most recent actions for each channel and type,
and deleting all other older actions
"""
keep_ids = cls.objects.distinct("channel_id", "action").order_by("channel_id", "action", "-performed").values_list("id", flat=True)
cls.objects.exclude(id__in=keep_ids).delete()
class Meta:
verbose_name = "Channel history"
verbose_name_plural = "Channel histories"
indexes = [
models.Index(fields=["channel_id"], name=CHANNEL_HISTORY_CHANNEL_INDEX_NAME),
]
class UserHistory(models.Model):
"""
Model that stores the user's action history.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=False, related_name="history", on_delete=models.CASCADE)
action = models.CharField(max_length=32, choices=user_history.choices)
performed_at = models.DateTimeField(default=timezone.now)
class ChannelSet(models.Model):
# NOTE: this is referred to as "channel collections" on the front-end, but we need to call it
# something else as there is already a ChannelCollection model on the front-end
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
public = models.BooleanField(default=False, db_index=True)
editors = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='channel_sets',
verbose_name="editors",
help_text="Users with edit rights",
blank=True,
)
secret_token = models.ForeignKey('SecretToken', null=True, blank=True, related_name='channel_sets', on_delete=models.SET_NULL)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
user_id = not user.is_anonymous and user.id
edit = Exists(User.channel_sets.through.objects.filter(user_id=user_id, channelset_id=OuterRef("id")))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
return cls.filter_edit_queryset(queryset, user)
def get_channels(self):
if self.secret_token:
return self.secret_token.channels.filter(deleted=False)
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
super(ChannelSet, self).save()
def on_create(self):
if not self.secret_token:
self.secret_token = SecretToken.objects.create(token=SecretToken.generate_new_token())
def delete(self, *args, **kwargs):
super(ChannelSet, self).delete(*args, **kwargs)
if self.secret_token:
self.secret_token.delete()
class ContentTag(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
tag_name = models.CharField(max_length=50)
channel = models.ForeignKey('Channel', related_name='tags', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
objects = CustomManager()
def __str__(self):
return self.tag_name
class Meta:
unique_together = ['tag_name', 'channel']
class License(models.Model):
"""
Normalize the license of ContentNode model
"""
license_name = models.CharField(max_length=50)
license_url = models.URLField(blank=True)
license_description = models.TextField(blank=True)
copyright_holder_required = models.BooleanField(default=True)
is_custom = models.BooleanField(default=False)
exists = models.BooleanField(
default=False,
verbose_name="license exists",
help_text="Tells whether or not a content item is licensed to share",
)
@classmethod
def validate_name(cls, name):
if cls.objects.filter(license_name=name).count() == 0:
raise ValidationError('License `{}` does not exist'.format(name))
def __str__(self):
return self.license_name
NODE_ID_INDEX_NAME = "node_id_idx"
NODE_MODIFIED_INDEX_NAME = "node_modified_idx"
NODE_MODIFIED_DESC_INDEX_NAME = "node_modified_desc_idx"
CONTENTNODE_TREE_ID_CACHE_KEY = "contentnode_{pk}__tree_id"
class ContentNode(MPTTModel, models.Model):
"""
By default, all nodes have a title and can be used as a topic.
"""
# Random id used internally on Studio (See `node_id` for id used in Kolibri)
id = UUIDField(primary_key=True, default=uuid.uuid4)
# the content_id is used for tracking a user's interaction with a piece of
# content, in the face of possibly many copies of that content. When a user
# interacts with a piece of content, all substantially similar pieces of
# content should be marked as such as well. We track these "substantially
# similar" types of content by having them have the same content_id.
content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False, db_index=True)
# Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta
node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
# TODO: disallow nulls once existing models have been set
original_channel_id = UUIDField(primary_key=False, editable=False, null=True,
db_index=True) # Original channel copied from
source_channel_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate channel copied from
# Original node_id of node copied from (TODO: original_node_id clashes with original_node field - temporary)
original_source_node_id = UUIDField(primary_key=False, editable=False, null=True,
db_index=True)
source_node_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate node_id of node copied from
# Fields specific to content generated by Ricecooker
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
title = models.CharField(max_length=200, blank=True)
description = models.TextField(blank=True)
kind = models.ForeignKey('ContentKind', related_name='contentnodes', db_index=True, null=True, blank=True, on_delete=models.SET_NULL)
license = models.ForeignKey('License', null=True, blank=True, on_delete=models.SET_NULL)
license_description = models.CharField(max_length=400, null=True, blank=True)
prerequisite = models.ManyToManyField('self', related_name='is_prerequisite_of',
through='PrerequisiteContentRelationship', symmetrical=False, blank=True)
is_related = models.ManyToManyField('self', related_name='relate_to', through='RelatedContentRelationship',
symmetrical=False, blank=True)
language = models.ForeignKey('Language', null=True, blank=True, related_name='content_language', on_delete=models.SET_NULL)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True, on_delete=models.CASCADE)
tags = models.ManyToManyField(ContentTag, symmetrical=False, related_name='tagged_content', blank=True)
# No longer used
sort_order = models.FloatField(max_length=50, default=1, verbose_name="sort order",
help_text="Ascending, lowest number shown first")
copyright_holder = models.CharField(max_length=200, null=True, blank=True, default="",
help_text="Organization of person who holds the essential rights")
# legacy field...
original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='duplicates')
cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='clones')
thumbnail_encoding = models.TextField(blank=True, null=True)
created = models.DateTimeField(default=timezone.now, verbose_name="created")
modified = models.DateTimeField(auto_now=True, verbose_name="modified")
published = models.BooleanField(default=False)
publishing = models.BooleanField(default=False)
complete = models.BooleanField(null=True)
changed = models.BooleanField(default=True)
"""
Extra fields for exercises:
- type: mastery model to use to determine completion
- m: m value for M out of N mastery criteria
- n: n value for M out of N mastery criteria
"""
extra_fields = JSONField(default=dict, blank=True, null=True)
author = models.CharField(max_length=200, blank=True, default="", help_text="Who created this content?",
null=True)
aggregator = models.CharField(max_length=200, blank=True, default="", help_text="Who gathered this content together?",
null=True)
provider = models.CharField(max_length=200, blank=True, default="", help_text="Who distributed this content?",
null=True)
role_visibility = models.CharField(max_length=50, choices=roles.choices, default=roles.LEARNER)
freeze_authoring_data = models.BooleanField(default=False)
# Fields for metadata labels
# These fields use a map to store applied labels
# {
# "<label_id1>": true,
# "<label_id2>": true,
# }
grade_levels = models.JSONField(blank=True, null=True)
resource_types = models.JSONField(blank=True, null=True)
learning_activities = models.JSONField(blank=True, null=True)
accessibility_labels = models.JSONField(blank=True, null=True)
categories = models.JSONField(blank=True, null=True)
learner_needs = models.JSONField(blank=True, null=True)
# A field for storing a suggested duration for the content node
# this duration should be in seconds.
suggested_duration = models.IntegerField(blank=True, null=True, help_text="Suggested duration for the content node (in seconds)")
objects = CustomContentNodeTreeManager()
# Track all updates and ignore a blacklist of attributes
# when we check for changes
_field_updates = FieldTracker()
_permission_filter = Q(tree_id=OuterRef("tree_id"))
@classmethod
def _annotate_channel_id(cls, queryset):
# Annotate channel id
return queryset.annotate(
channel_id=Subquery(
Channel.objects.filter(
main_tree__tree_id=OuterRef("tree_id")
).values_list("id", flat=True)[:1]
)
)
@classmethod
def filter_by_pk(cls, pk):
"""
When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always
returns a queryset filtered by pk.
When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode
for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If
a ContentNode does not exist for `pk` then an empty queryset is returned.
"""
query = ContentNode.objects.filter(pk=pk)
if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:
tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))
if tree_id:
query = query.filter(tree_id=tree_id)
else:
tree_id = ContentNode.objects.filter(pk=pk).values_list("tree_id", flat=True).first()
if tree_id:
cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk), tree_id, None)
query = query.filter(tree_id=tree_id)
else:
query = query.none()
return query
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef("tree_id")
).values("pk")
),
)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(
Q(view=True)
| Q(edit=True)
| Q(public=True)
)
@raise_if_unsaved
def get_root(self):
# Only topics can be root nodes
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return super(ContentNode, self).get_root()
@raise_if_unsaved
def get_root_id(self):
# Only topics can be root nodes
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return ContentNode.objects.values_list('pk', flat=True).get(
tree_id=self._mpttfield('tree_id'),
parent=None,
)
def get_tree_data(self, levels=float('inf')):
"""
Returns `levels`-deep tree information starting at current node.
Args:
levels (int): depth of tree hierarchy to return
Returns:
tree (dict): starting with self, with children list containing either
the just the children's `node_id`s or full recusive tree.
"""
if self.kind_id == content_kinds.TOPIC:
node_data = {
"title": self.title,
"kind": self.kind_id,
"node_id": self.node_id,
"studio_id": self.id,
}
children = self.children.all()
if levels > 0:
node_data["children"] = [c.get_tree_data(levels=levels - 1) for c in children]
return node_data
if self.kind_id == content_kinds.EXERCISE:
return {
"title": self.title,
"kind": self.kind_id,
"count": self.assessment_items.count(),
"node_id": self.node_id,
"studio_id": self.id,
}
return {
"title": self.title,
"kind": self.kind_id,
"file_size": self.files.values('file_size').aggregate(size=Sum('file_size'))['size'],
"node_id": self.node_id,
"studio_id": self.id,
}
def get_original_node(self):
original_node = self.original_node or self
if self.original_channel_id and self.original_source_node_id:
original_tree_id = Channel.objects.select_related("main_tree").get(pk=self.original_channel_id).main_tree.tree_id
original_node = ContentNode.objects.filter(tree_id=original_tree_id, node_id=self.original_source_node_id).first() or \
ContentNode.objects.filter(tree_id=original_tree_id, content_id=self.content_id).first() or self
return original_node
def get_associated_presets(self):
key = "associated_presets_{}".format(self.kind_id)
cached_data = cache.get(key)
if cached_data:
return cached_data
presets = list(FormatPreset.objects.filter(kind=self.kind).values())
cache.set(key, presets, None)
return presets
def get_prerequisites(self):
prerequisite_mapping = {}
prerequisites = self.prerequisite.all()
prereqlist = list(prerequisites)
for prereq in prerequisites:
prlist, prereqmapping = prereq.get_prerequisites()
prerequisite_mapping.update({prereq.pk: prereqmapping})
prereqlist.extend(prlist)
return prereqlist, prerequisite_mapping
def get_postrequisites(self):
postrequisite_mapping = {}
postrequisites = self.is_prerequisite_of.all()
postreqlist = list(postrequisites)
for postreq in postrequisites:
prlist, postreqmapping = postreq.get_postrequisites()
postrequisite_mapping.update({postreq.pk: postreqmapping})
postreqlist.extend(prlist)
return postreqlist, postrequisite_mapping
def get_channel_id(self):
if hasattr(self, "channel_id"):
return self.channel_id
channel = self.get_channel()
if channel:
return channel.id
return None
def get_channel(self):
try:
root = self.get_root()
if not root:
return None
return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(previous_tree=root)).first()
except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):
return None
def get_thumbnail(self):
# Problems with json.loads, so use ast.literal_eval to get dict
if self.thumbnail_encoding:
thumbnail_data = load_json_string(self.thumbnail_encoding)
if type(thumbnail_data) is dict and thumbnail_data.get("base64"):
return thumbnail_data["base64"]
thumbnail = self.files.filter(preset__thumbnail=True).first()
if thumbnail:
return generate_storage_url(str(thumbnail))
return ""
@classmethod
def get_nodes_with_title(cls, title, limit_to_children_of=None):
"""
Returns all ContentNodes with a given title. If limit_to_children_of
is passed in with an id, only look at all the children of the node with that id.
"""
if limit_to_children_of:
root = cls.objects.get(id=limit_to_children_of)
return root.get_descendants().filter(title=title)
return cls.objects.filter(title=title)
def get_details(self, channel_id=None):
"""
Returns information about the node and its children, including total size, languages, files, etc.
:return: A dictionary with detailed statistics and information about the node.
"""
from contentcuration.viewsets.common import SQArrayAgg
from contentcuration.viewsets.common import SQCount
from contentcuration.viewsets.common import SQRelatedArrayAgg
from contentcuration.viewsets.common import SQSum
from contentcuration.viewsets.common import SQJSONBKeyArrayAgg
node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id).order_by()
descendants = (
self.get_descendants()
.values("id")
)
if channel_id:
channel = Channel.objects.filter(id=channel_id)[0]
else:
channel = self.get_channel()
if not descendants.exists():
data = {
"last_update": pytz.utc.localize(datetime.now()).strftime(
settings.DATE_TIME_FORMAT
),
"created": self.created.strftime(settings.DATE_TIME_FORMAT),
"resource_count": 0,
"resource_size": 0,
"includes": {"coach_content": 0, "exercises": 0},
"kind_count": [],
"languages": [],
"accessible_languages": [],
"licenses": [],
"tags": [],
"copyright_holders": [],
"authors": [],
"aggregators": [],
"providers": [],
"sample_pathway": [],
"original_channels": [],
"sample_nodes": [],
"levels": [],
"categories": [],
}
# Set cache with latest data
cache.set("details_{}".format(self.node_id), json.dumps(data), None)
return data
# Get resources
resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()
nodes = With(
File.objects.filter(contentnode_id__in=Subquery(resources.values("id")))
.values("checksum", "file_size")
.order_by(),
name="nodes",
)
file_query = (
nodes.queryset().with_cte(nodes).values("checksum", "file_size").distinct()
)
l_nodes = With(
File.objects.filter(contentnode_id__in=Subquery(resources.values("id")))
.values("language_id", "preset_id")
.order_by(),
name="l_nodes",
)
accessible_languages_query = (
l_nodes.queryset()
.filter(preset_id=format_presets.VIDEO_SUBTITLE)
.with_cte(l_nodes)
.values("language__native_name")
.distinct()
)
tags_query = str(
ContentTag.objects.filter(
tagged_content__pk__in=descendants.values_list("pk", flat=True)
)
.values("tag_name")
.annotate(count=Count("tag_name"))
.query
).replace("topic", "'topic'")
kind_count_query = str(
resources.values("kind_id").annotate(count=Count("kind_id")).query
).replace("topic", "'topic'")
node = node.annotate(
resource_count=SQCount(resources, field="id"),
resource_size=SQSum(file_query, field="file_size"),
copyright_holders=SQArrayAgg(
resources.distinct("copyright_holder").order_by("copyright_holder"),
field="copyright_holder",
),
authors=SQArrayAgg(
resources.distinct("author").order_by("author"), field="author"
),
aggregators=SQArrayAgg(
resources.distinct("aggregator").order_by("aggregator"),
field="aggregator",
),
providers=SQArrayAgg(
resources.distinct("provider").order_by("provider"), field="provider"
),
languages=SQRelatedArrayAgg(
descendants.exclude(language=None)
.distinct("language__native_name")
.order_by(),
field="language__native_name",
fieldname="native_name",
),
accessible_languages=SQRelatedArrayAgg(
accessible_languages_query,
field="language__native_name",
fieldname="native_name",
),
licenses=SQRelatedArrayAgg(
resources.exclude(license=None)
.distinct("license__license_name")
.order_by("license__license_name"),
field="license__license_name",
fieldname="license_name",
),
kind_count=RawSQL(
"SELECT json_agg(row_to_json (x)) FROM ({}) as x".format(
kind_count_query
),
(),
),
tags_list=RawSQL(
"SELECT json_agg(row_to_json (x)) FROM ({}) as x".format(tags_query), ()
),
coach_content=SQCount(
resources.filter(role_visibility=roles.COACH), field="id"
),
exercises=SQCount(
resources.filter(kind_id=content_kinds.EXERCISE), field="id"
),
levels=SQJSONBKeyArrayAgg(
descendants.exclude(grade_levels__isnull=True),
field="grade_levels",
),
all_categories=SQJSONBKeyArrayAgg(
descendants.exclude(categories__isnull=True),
field="categories",
),
)
# Get sample pathway by getting longest path
# Using resources.aggregate adds a lot of time, use values that have already been fetched
max_level = max(
resources.values_list("level", flat=True).order_by().distinct() or [0]
)
m_nodes = With(
resources.values("id", "level", "tree_id", "lft").order_by(),
name="m_nodes",
)
deepest_node_record = (
m_nodes.queryset()
.with_cte(m_nodes)
.filter(level=max_level)
.values("id")
.order_by("tree_id", "lft")
.first()
)
if deepest_node_record:
deepest_node = ContentNode.objects.get(pk=deepest_node_record["id"])
pathway = (
list(
deepest_node.get_ancestors()
.order_by()
.exclude(parent=None)
.values("title", "node_id", "kind_id")
.order_by()
)
if deepest_node_record
else []
)
sample_nodes = (
[
{
"node_id": n.node_id,
"title": n.title,
"description": n.description,
"thumbnail": n.get_thumbnail(),
"kind": n.kind_id,
}
for n in deepest_node.get_siblings(include_self=True)[0:4]
]
if deepest_node_record
else []
)
# Get list of channels nodes were originally imported from (omitting the current channel)
channel_id = channel and channel.id
originals = (
resources.values("original_channel_id")
.annotate(count=Count("original_channel_id"))
.order_by("original_channel_id")
)
originals = {c["original_channel_id"]: c["count"] for c in originals}
original_channels = (
Channel.objects.exclude(pk=channel_id)
.filter(pk__in=originals.keys(), deleted=False)
.order_by()
)
original_channels = [
{
"id": c.id,
"name": "{}{}".format(
c.name, _(" (Original)") if channel_id == c.id else ""
),
"thumbnail": c.get_thumbnail(),
"count": originals[c.id],
}
for c in original_channels
]
node = (
node.order_by()
.values(
"id",
"resource_count",
"resource_size",
"copyright_holders",
"authors",
"aggregators",
"providers",
"languages",
"accessible_languages",
"coach_content",
"licenses",
"tags_list",
"kind_count",
"exercises",
"levels",
"all_categories",
)
.first()
)
for_educators = {
"coach_content": node["coach_content"],
"exercises": node["exercises"],
}
# Serialize data
data = {
"last_update": pytz.utc.localize(datetime.now()).strftime(
settings.DATE_TIME_FORMAT
),
"created": self.created.strftime(settings.DATE_TIME_FORMAT),
"resource_count": node.get("resource_count", 0),
"resource_size": node.get("resource_size", 0),
"includes": for_educators,
"kind_count": node.get("kind_count") or [],
"languages": node.get("languages") or [],
"accessible_languages": node.get("accessible_languages") or [],
"licenses": node.get("licenses") or [],
"tags": node.get("tags_list") or [],
"original_channels": original_channels,
"sample_pathway": pathway,
"sample_nodes": sample_nodes,
# source model fields for the below default to an empty string, but can also be null
"authors": list(filter(bool, node["authors"])),
"aggregators": list(filter(bool, node["aggregators"])),
"providers": list(filter(bool, node["providers"])),
"copyright_holders": list(filter(bool, node["copyright_holders"])),
"levels": node.get("levels") or [],
"categories": node.get("all_categories") or [],
}
# Set cache with latest data
cache.set("details_{}".format(self.node_id), json.dumps(data), None)
return data
def has_changes(self):
mptt_opts = self._mptt_meta
# Ignore fields that are used for dirty tracking, and also mptt fields, as changes to these are tracked in mptt manager methods.
blacklist = set([
'changed',
'modified',
'publishing',
mptt_opts.tree_id_attr,
mptt_opts.left_attr,
mptt_opts.right_attr,
mptt_opts.level_attr,
])
original_values = self._field_updates.changed()
return any((True for field in original_values if field not in blacklist))
def recalculate_editors_storage(self):
from contentcuration.utils.user import calculate_user_storage
for editor in self.files.values_list('uploaded_by_id', flat=True).distinct():
calculate_user_storage(editor)
def mark_complete(self): # noqa C901
errors = []
# Is complete if title is falsy but only if not a root node.
if not (bool(self.title) or self.parent_id is None):
errors.append("Empty title")
if self.kind_id != content_kinds.TOPIC:
if not self.license:
errors.append("Missing license")
if self.license and self.license.is_custom and not self.license_description:
errors.append("Missing license description for custom license")
if self.license and self.license.copyright_holder_required and not self.copyright_holder:
errors.append("Missing required copyright holder")
if self.kind_id != content_kinds.EXERCISE and not self.files.filter(preset__supplementary=False).exists():
errors.append("Missing default file")
if self.kind_id == content_kinds.EXERCISE:
# Check to see if the exercise has at least one assessment item that has:
if not self.assessment_items.filter(
# Item with non-blank raw data
~Q(raw_data="") | (
# A non-blank question
~Q(question='')
# Non-blank answers
& ~Q(answers='[]')
# With either an input question or one answer marked as correct
& (Q(type=exercises.INPUT_QUESTION) | Q(answers__iregex=r'"correct":\s*true'))
)
).exists():
errors.append("No questions with question text and complete answers")
# Check that it has a mastery model set
# Either check for the previous location for the mastery model, or rely on our completion criteria validation
# that if it has been set, then it has been set correctly.
criterion = self.extra_fields.get("options", {}).get("completion_criteria")
if not (self.extra_fields.get("mastery_model") or criterion):
errors.append("Missing mastery criterion")
if criterion:
try:
completion_criteria.validate(criterion, kind=content_kinds.EXERCISE)
except completion_criteria.ValidationError:
errors.append("Mastery criterion is defined but is invalid")
self.complete = not errors
return errors
def make_content_id_unique(self):
"""
If self is NOT an original contentnode (in other words, a copied contentnode)
and a contentnode with same content_id exists then we update self's content_id.
"""
is_node_original = self.original_source_node_id is None or self.original_source_node_id == self.node_id
node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(content_id=self.content_id)
if (not is_node_original) and node_same_content_id.exists():
ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.uuid4().hex)
def on_create(self):
self.changed = True
self.recalculate_editors_storage()
self.set_default_learning_activity()
def on_update(self):
self.changed = self.changed or self.has_changes()
def move_to(self, target, *args, **kwargs):
parent_was_trashtree = self.parent.channel_trash.exists()
super(ContentNode, self).move_to(target, *args, **kwargs)
self.save()
# Update tree_id cache when node is moved to another tree
cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.tree_id, None)
# Recalculate storage if node was moved to or from the trash tree
if target.channel_trash.exists() or parent_was_trashtree:
self.recalculate_editors_storage()
def set_default_learning_activity(self):
if self.learning_activities is None:
if self.kind in kind_activity_map:
self.learning_activities = {
kind_activity_map[self.kind]: True
}
def save(self, skip_lock=False, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
# Logic borrowed from mptt - do a simple check to see if we have changed
# the parent of the node. We use the mptt specific cached fields here
# because these get updated by the mptt move methods, and so will be up to
# date, meaning we can avoid locking the DB twice when the fields have already
# been updated in the database.
# If most moves are being done independently of just changing the parent
# and then calling a save, locking within the save method itself should rarely
# be triggered - meaning updates to contentnode metadata should only rarely
# trigger a write lock on mptt fields.
old_parent_id = self._field_updates.changed().get("parent_id")
if self._state.adding and (self.parent_id or self.parent):
same_order = False
elif old_parent_id is DeferredAttribute:
same_order = True
else:
same_order = old_parent_id == self.parent_id
if not same_order:
changed_ids = list(filter(lambda x: x is not None, set([old_parent_id, self.parent_id])))
else:
changed_ids = []
if not same_order and not skip_lock:
# Lock the mptt fields for the trees of the old and new parent
with ContentNode.objects.lock_mptt(*ContentNode.objects
.filter(id__in=[pid for pid in [old_parent_id, self.parent_id] if pid])
.values_list('tree_id', flat=True).distinct()):
super(ContentNode, self).save(*args, **kwargs)
# Always write to the database for the parent change updates, as we have
# no persistent object references for the original and new parent to modify
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(changed=True)
else:
super(ContentNode, self).save(*args, **kwargs)
# Always write to the database for the parent change updates, as we have
# no persistent object references for the original and new parent to modify
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(changed=True)
# Copied from MPTT
save.alters_data = True
def delete(self, *args, **kwargs):
parent = self.parent or self._field_updates.changed().get('parent')
if parent:
parent.changed = True
parent.save()
self.recalculate_editors_storage()
# Lock the mptt fields for the tree of this node
with ContentNode.objects.lock_mptt(self.tree_id):
return super(ContentNode, self).delete(*args, **kwargs)
# Copied from MPTT
delete.alters_data = True
def copy_to(
self,
target=None,
position="last-child",
pk=None,
mods=None,
excluded_descendants=None,
can_edit_source_channel=None,
batch_size=None,
progress_tracker=None
):
return self._tree_manager.copy_node(self, target, position, pk, mods, excluded_descendants, can_edit_source_channel, batch_size, progress_tracker)[0]
def copy(self):
return self.copy_to()
def is_publishable(self):
return self.complete and self.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists()
class Meta:
verbose_name = "Topic"
verbose_name_plural = "Topics"
# Do not allow two nodes with the same name on the same level
# unique_together = ('parent', 'title')
indexes = [
models.Index(fields=["node_id"], name=NODE_ID_INDEX_NAME),
models.Index(fields=["-modified"], name=NODE_MODIFIED_DESC_INDEX_NAME),
]
class ContentKind(models.Model):
kind = models.CharField(primary_key=True, max_length=200, choices=content_kinds.choices)
def __str__(self):
return self.kind
class FileFormat(models.Model):
extension = models.CharField(primary_key=True, max_length=40, choices=file_formats.choices)
mimetype = models.CharField(max_length=200, blank=True)
def __str__(self):
return self.extension
class FormatPreset(models.Model):
id = models.CharField(primary_key=True, max_length=150, choices=format_presets.choices)
readable_name = models.CharField(max_length=400)
multi_language = models.BooleanField(default=False)
supplementary = models.BooleanField(default=False)
thumbnail = models.BooleanField(default=False)
subtitle = models.BooleanField(default=False)
display = models.BooleanField(default=True) # Render on client side
order = models.IntegerField(default=0)
kind = models.ForeignKey(ContentKind, related_name='format_presets', null=True, on_delete=models.SET_NULL)
allowed_formats = models.ManyToManyField(FileFormat, blank=True)
def __str__(self):
return self.id
@classmethod
def guess_format_preset(cls, filename):
"""
Guess the format preset of a filename based on its extension.
Return None if format is unknown.
"""
_, ext = os.path.splitext(filename)
ext = ext.lstrip(".")
f = FormatPreset.objects.filter(
allowed_formats__extension=ext,
display=True
)
return f.first()
@classmethod
def get_preset(cls, preset_name):
"""
Get the FormatPreset object with that exact name.
Returns None if that format preset is not found.
"""
try:
return FormatPreset.objects.get(id=preset_name)
except FormatPreset.DoesNotExist:
return None
class Language(models.Model):
id = models.CharField(max_length=14, primary_key=True)
lang_code = models.CharField(max_length=3, db_index=True)
lang_subcode = models.CharField(max_length=10, db_index=True, blank=True, null=True)
readable_name = models.CharField(max_length=100, blank=True)
native_name = models.CharField(max_length=100, blank=True)
lang_direction = models.CharField(max_length=3, choices=languages.LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])
def ietf_name(self):
return "{code}-{subcode}".format(code=self.lang_code,
subcode=self.lang_subcode) if self.lang_subcode else self.lang_code
def __str__(self):
return self.ietf_name()
ASSESSMENT_ID_INDEX_NAME = "assessment_id_idx"
class AssessmentItem(models.Model):
type = models.CharField(max_length=50, default="multiplechoice")
question = models.TextField(blank=True)
hints = models.TextField(default="[]")
answers = models.TextField(default="[]")
order = models.IntegerField(default=1)
contentnode = models.ForeignKey('ContentNode', related_name="assessment_items", blank=True, null=True,
db_index=True, on_delete=models.CASCADE)
# Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta
assessment_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
raw_data = models.TextField(blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
randomize = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
objects = CustomManager()
# Track all updates
_field_updates = FieldTracker()
def has_changes(self):
return bool(self._field_updates.changed())
class Meta:
indexes = [
models.Index(fields=["assessment_id"], name=ASSESSMENT_ID_INDEX_NAME),
]
unique_together = ['contentnode', 'assessment_id']
_permission_filter = Q(tree_id=OuterRef("contentnode__tree_id"))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef("contentnode__tree_id")
).values("pk")
),
)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))
def on_create(self):
"""
When an exercise is added to a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def on_update(self):
"""
When an exercise is updated of a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def delete(self, *args, **kwargs):
"""
When an exercise is deleted from a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
return super(AssessmentItem, self).delete(*args, **kwargs)
class SlideshowSlide(models.Model):
contentnode = models.ForeignKey('ContentNode', related_name="slideshow_slides", blank=True, null=True,
db_index=True, on_delete=models.CASCADE)
sort_order = models.FloatField(default=1.0)
metadata = JSONField(default=dict)
class StagedFile(models.Model):
"""
Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit
"""
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='staged_files', blank=True, null=True, on_delete=models.CASCADE)
FILE_DISTINCT_INDEX_NAME = "file_checksum_file_size_idx"
FILE_MODIFIED_DESC_INDEX_NAME = "file_modified_desc_idx"
FILE_DURATION_CONSTRAINT = "file_media_duration_int"
MEDIA_PRESETS = [
format_presets.AUDIO,
format_presets.AUDIO_DEPENDENCY,
format_presets.VIDEO_HIGH_RES,
format_presets.VIDEO_LOW_RES,
format_presets.VIDEO_DEPENDENCY,
]
class File(models.Model):
"""
The bottom layer of the contentDB schema, defines the basic building brick for content.
Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...
"""
id = UUIDField(primary_key=True, default=uuid.uuid4)
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
file_on_disk = models.FileField(upload_to=object_storage_name, storage=default_storage, max_length=500,
blank=True)
contentnode = models.ForeignKey(ContentNode, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
assessment_item = models.ForeignKey(AssessmentItem, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
slideshow_slide = models.ForeignKey(SlideshowSlide, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
file_format = models.ForeignKey(FileFormat, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
preset = models.ForeignKey(FormatPreset, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
language = models.ForeignKey(Language, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)
original_filename = models.CharField(max_length=255, blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)
modified = models.DateTimeField(auto_now=True, verbose_name="modified", null=True)
duration = models.IntegerField(blank=True, null=True)
objects = CustomManager()
_permission_filter = Q(tree_id=OuterRef("contentnode__tree_id")) | Q(tree_id=OuterRef("assessment_item__contentnode__tree_id"))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(
Q(edit=True) | Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)
)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(public=True).filter(
Q(main_tree__tree_id=OuterRef("contentnode__tree_id"))
| Q(main_tree__tree_id=OuterRef("assessment_item__contentnode__tree_id"))
).values("pk")
),
)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(
Q(view=True)
| Q(edit=True)
| Q(public=True)
| Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)
)
class Admin:
pass
def __str__(self):
return '{checksum}{extension}'.format(checksum=self.checksum, extension='.' + self.file_format.extension)
def filename(self):
"""
Returns just the filename of the File in storage, without the path
e.g. abcd.mp4
"""
# TODO(aron): write tests for this
return os.path.basename(self.file_on_disk.name)
def update_contentnode_content_id(self):
"""
If the file is attached to a contentnode and is not a thumbnail
then update that contentnode's content_id if it's a copied contentnode.
"""
if self.contentnode and self.preset.thumbnail is False:
self.contentnode.make_content_id_unique()
def on_update(self):
# since modified was added later as a nullable field to File, we don't use a default but
# instead we'll just make sure it's always updated through our serializers
self.modified = timezone.now()
self.update_contentnode_content_id()
def save(self, set_by_file_on_disk=True, *args, **kwargs):
"""
Overrider the default save method.
If the file_on_disk FileField gets passed a content copy:
1. generate the MD5 from the content copy
2. fill the other fields accordingly
"""
from contentcuration.utils.user import calculate_user_storage
# check if the file format exists in file_formats.choices
if self.file_format_id:
if self.file_format_id not in dict(file_formats.choices):
raise ValidationError("Invalid file_format")
if set_by_file_on_disk and self.file_on_disk: # if file_on_disk is supplied, hash out the file
if self.checksum is None or self.checksum == "":
md5 = hashlib.md5()
for chunk in self.file_on_disk.chunks():
md5.update(chunk)
self.checksum = md5.hexdigest()
if not self.file_size:
self.file_size = self.file_on_disk.size
if not self.file_format_id:
ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')
if ext in list(dict(file_formats.choices).keys()):
self.file_format_id = ext
else:
raise ValueError("Files of type `{}` are not supported.".format(ext))
super(File, self).save(*args, **kwargs)
if self.uploaded_by_id:
calculate_user_storage(self.uploaded_by_id)
class Meta:
indexes = [
models.Index(fields=['checksum', 'file_size'], name=FILE_DISTINCT_INDEX_NAME),
models.Index(fields=["-modified"], name=FILE_MODIFIED_DESC_INDEX_NAME),
]
constraints = [
# enforces that duration is null when not a media preset, but the duration may be null for media presets
# but if not-null, should be greater than 0
models.CheckConstraint(
check=(Q(preset__in=MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True)),
name=FILE_DURATION_CONSTRAINT
)
]
@receiver(models.signals.post_delete, sender=File)
def auto_delete_file_on_delete(sender, instance, **kwargs):
"""
Deletes file from filesystem if no other File objects are referencing the same file on disk
when corresponding `File` object is deleted.
Be careful! we don't know if this will work when perform bash delete on File obejcts.
"""
# Recalculate storage
from contentcuration.utils.user import calculate_user_storage
if instance.uploaded_by_id:
calculate_user_storage(instance.uploaded_by_id)
def delete_empty_file_reference(checksum, extension):
filename = checksum + '.' + extension
if not File.objects.filter(checksum=checksum).exists() and not Channel.objects.filter(thumbnail=filename).exists():
storage_path = generate_object_storage_name(checksum, filename)
if default_storage.exists(storage_path):
default_storage.delete(storage_path)
class PrerequisiteContentRelationship(models.Model):
"""
Predefine the prerequisite relationship between two ContentNode objects.
"""
target_node = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)
prerequisite = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)
class Meta:
unique_together = ['target_node', 'prerequisite']
def clean(self, *args, **kwargs):
# self reference exception
if self.target_node == self.prerequisite:
raise IntegrityError('Cannot self reference as prerequisite.')
# immediate cyclic exception
if PrerequisiteContentRelationship.objects.using(self._state.db) \
.filter(target_node=self.prerequisite, prerequisite=self.target_node):
raise IntegrityError(
'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'
% (self.target_node, self.prerequisite))
# distant cyclic exception
# elif <this is a nice to have exception, may implement in the future when the priority raises.>
# raise Exception('Note: Prerequisite relationship is acyclic! %s and %s forms a closed loop!' % (
# self.target_node, self.prerequisite
# ))
super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
self.full_clean()
super(PrerequisiteContentRelationship, self).save(*args, **kwargs)
def __unicode__(self):
return u'%s' % (self.pk)
class RelatedContentRelationship(models.Model):
"""
Predefine the related relationship between two ContentNode objects.
"""
contentnode_1 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_1', on_delete=models.CASCADE)
contentnode_2 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_2', on_delete=models.CASCADE)
class Meta:
unique_together = ['contentnode_1', 'contentnode_2']
def save(self, *args, **kwargs):
# self reference exception
if self.contentnode_1 == self.contentnode_2:
raise IntegrityError('Cannot self reference as related.')
# handle immediate cyclic
if RelatedContentRelationship.objects.using(self._state.db) \
.filter(contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1):
return # silently cancel the save
super(RelatedContentRelationship, self).save(*args, **kwargs)
class Invitation(models.Model):
""" Invitation to edit channel """
id = UUIDField(primary_key=True, default=uuid.uuid4)
accepted = models.BooleanField(default=False)
declined = models.BooleanField(default=False)
revoked = models.BooleanField(default=False)
invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, related_name='sent_to')
share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)
email = models.EmailField(max_length=100, null=True)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='sent_by', null=True, on_delete=models.CASCADE)
channel = models.ForeignKey('Channel', null=True, related_name='pending_editors', on_delete=models.CASCADE)
first_name = models.CharField(max_length=100, blank=True)
last_name = models.CharField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = "Invitation"
verbose_name_plural = "Invitations"
def accept(self):
user = User.objects.filter(email__iexact=self.email).first()
if self.channel:
# channel is a nullable field, so check that it exists.
if self.share_mode == VIEW_ACCESS:
self.channel.editors.remove(user)
self.channel.viewers.add(user)
else:
self.channel.viewers.remove(user)
self.channel.editors.add(user)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(
Q(email__iexact=user.email)
| Q(sender=user)
| Q(channel__editors=user)
).distinct()
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(
Q(email__iexact=user.email)
| Q(sender=user)
| Q(channel__editors=user)
| Q(channel__viewers=user)
).distinct()
class Change(models.Model):
server_rev = models.BigAutoField(primary_key=True)
# We need to store the user who is applying this change
# so that we can validate they have permissions to do so
# allow to be null so that we don't lose changes if a user
# account is hard deleted.
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.SET_NULL, related_name="changes_by_user")
# Almost all changes are related to channels, but some are specific only to users
# so we allow this to be nullable for these edge cases.
# Indexed by default because it's a ForeignKey field.
channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=models.CASCADE)
# For those changes related to users, store a user value instead of channel
# this may be different to created_by, as changes to invitations affect individual users.
# Indexed by default because it's a ForeignKey field.
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.CASCADE, related_name="changes_about_user")
# Use client_rev to keep track of changes coming from the client side
# but let it be blank or null for changes we generate on the server side
client_rev = models.IntegerField(null=True, blank=True)
# client_rev numbers are by session, we add the session key here for bookkeeping
# to allow a check within the same session to return whether a change has been applied
# or not, and hence remove it from the frontend
session = models.ForeignKey(Session, null=True, blank=True, on_delete=models.SET_NULL)
table = models.CharField(max_length=32)
change_type = models.IntegerField()
# Use the DRF JSONEncoder class as the encoder here
# so that we can handle anything that has been deserialized by DRF
# or that will be later be serialized by DRF
kwargs = JSONField(encoder=JSONEncoder)
applied = models.BooleanField(default=False)
errored = models.BooleanField(default=False)
@classmethod
def _create_from_change(cls, created_by_id=None, channel_id=None, user_id=None, session_key=None, applied=False, table=None, rev=None, **data):
change_type = data.pop("type")
if table is None or table not in ALL_TABLES:
raise TypeError("table is a required argument for creating changes and must be a valid table name")
if change_type is None or change_type not in ALL_CHANGES:
raise TypeError("change_type is a required argument for creating changes and must be a valid change type integer")
return cls(
session_id=session_key,
created_by_id=created_by_id,
channel_id=channel_id,
user_id=user_id,
client_rev=rev,
table=table,
change_type=change_type,
kwargs=data,
applied=applied
)
@classmethod
def create_changes(cls, changes, created_by_id=None, session_key=None, applied=False):
change_models = []
for change in changes:
change_models.append(cls._create_from_change(created_by_id=created_by_id, session_key=session_key, applied=applied, **change))
cls.objects.bulk_create(change_models)
return change_models
@classmethod
def create_change(cls, change, created_by_id=None, session_key=None, applied=False):
obj = cls._create_from_change(created_by_id=created_by_id, session_key=session_key, applied=applied, **change)
obj.save()
return obj
@classmethod
def serialize(cls, change):
datum = get_attribute(change, ["kwargs"]).copy()
datum.update({
"server_rev": get_attribute(change, ["server_rev"]),
"table": get_attribute(change, ["table"]),
"type": get_attribute(change, ["change_type"]),
"channel_id": get_attribute(change, ["channel_id"]),
"user_id": get_attribute(change, ["user_id"]),
"created_by_id": get_attribute(change, ["created_by_id"])
})
return datum
def serialize_to_change_dict(self):
return self.serialize(self)
class TaskResultCustom(object):
"""
Custom fields to add to django_celery_results's TaskResult model
If adding fields to this class, run `makemigrations` then move the generated migration from the
`django_celery_results` app to the `contentcuration` app and override the constructor to change
the app_label. See `0141_add_task_signature` for an example
"""
# user shouldn't be null, but in order to append the field, this needs to be allowed
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="tasks", on_delete=models.CASCADE, null=True)
channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)
progress = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0), MaxValueValidator(100)])
# a hash of the task name and kwargs for identifying repeat tasks
signature = models.CharField(null=True, blank=False, max_length=32)
super_as_dict = TaskResult.as_dict
def as_dict(self):
"""
:return: A dictionary representation
"""
super_dict = self.super_as_dict()
super_dict.update(
user_id=self.user_id,
channel_id=self.channel_id,
progress=self.progress,
)
return super_dict
@classmethod
def contribute_to_class(cls, model_class=TaskResult):
"""
Adds fields to model, by default TaskResult
:param model_class: TaskResult model
"""
for field in dir(cls):
if not field.startswith("_") and field not in ('contribute_to_class', 'Meta'):
model_class.add_to_class(field, getattr(cls, field))
# manually add Meta afterwards
setattr(model_class._meta, 'indexes', getattr(model_class._meta, 'indexes', []) + cls.Meta.indexes)
class Meta:
indexes = [
# add index that matches query usage for signature
models.Index(
fields=['signature'],
name='task_result_signature_idx',
condition=Q(status__in=celery_states.UNREADY_STATES),
),
]
# trigger class contributions immediately
TaskResultCustom.contribute_to_class()
|
flexible
|
{
"blob_id": "32e904a39d03d3166369420b49db0b9b118110a3",
"index": 4179,
"step-1": "<mask token>\n\n\nclass ContentKind(models.Model):\n <mask token>\n\n def __str__(self):\n return self.kind\n\n\nclass FileFormat(models.Model):\n extension = models.CharField(primary_key=True, max_length=40, choices=\n file_formats.choices)\n mimetype = models.CharField(max_length=200, blank=True)\n\n def __str__(self):\n return self.extension\n\n\nclass FormatPreset(models.Model):\n id = models.CharField(primary_key=True, max_length=150, choices=\n format_presets.choices)\n readable_name = models.CharField(max_length=400)\n multi_language = models.BooleanField(default=False)\n supplementary = models.BooleanField(default=False)\n thumbnail = models.BooleanField(default=False)\n subtitle = models.BooleanField(default=False)\n display = models.BooleanField(default=True)\n order = models.IntegerField(default=0)\n kind = models.ForeignKey(ContentKind, related_name='format_presets',\n null=True, on_delete=models.SET_NULL)\n allowed_formats = models.ManyToManyField(FileFormat, blank=True)\n\n def __str__(self):\n return self.id\n\n @classmethod\n def guess_format_preset(cls, filename):\n \"\"\"\n Guess the format preset of a filename based on its extension.\n\n Return None if format is unknown.\n \"\"\"\n _, ext = os.path.splitext(filename)\n ext = ext.lstrip('.')\n f = FormatPreset.objects.filter(allowed_formats__extension=ext,\n display=True)\n return f.first()\n\n @classmethod\n def get_preset(cls, preset_name):\n \"\"\"\n Get the FormatPreset object with that exact name.\n\n Returns None if that format preset is not found.\n \"\"\"\n try:\n return FormatPreset.objects.get(id=preset_name)\n except FormatPreset.DoesNotExist:\n return None\n\n\nclass Language(models.Model):\n id = models.CharField(max_length=14, primary_key=True)\n lang_code = models.CharField(max_length=3, db_index=True)\n lang_subcode = models.CharField(max_length=10, db_index=True, blank=\n True, null=True)\n readable_name = models.CharField(max_length=100, blank=True)\n native_name = models.CharField(max_length=100, blank=True)\n lang_direction = models.CharField(max_length=3, choices=languages.\n LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])\n\n def ietf_name(self):\n return '{code}-{subcode}'.format(code=self.lang_code, subcode=self.\n lang_subcode) if self.lang_subcode else self.lang_code\n\n def __str__(self):\n return self.ietf_name()\n\n\n<mask token>\n\n\nclass AssessmentItem(models.Model):\n type = models.CharField(max_length=50, default='multiplechoice')\n question = models.TextField(blank=True)\n hints = models.TextField(default='[]')\n answers = models.TextField(default='[]')\n order = models.IntegerField(default=1)\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'assessment_items', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n assessment_id = UUIDField(primary_key=False, default=uuid.uuid4,\n editable=False)\n raw_data = models.TextField(blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n randomize = models.BooleanField(default=False)\n deleted = models.BooleanField(default=False)\n objects = CustomManager()\n _field_updates = FieldTracker()\n\n def has_changes(self):\n return bool(self._field_updates.changed())\n\n\n class Meta:\n indexes = [models.Index(fields=['assessment_id'], name=\n ASSESSMENT_ID_INDEX_NAME)]\n unique_together = ['contentnode', 'assessment_id']\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('contentnode__tree_id'\n )).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n def on_create(self):\n \"\"\"\n When an exercise is added to a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n \"\"\"\n When an exercise is updated of a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n When an exercise is deleted from a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n return super(AssessmentItem, self).delete(*args, **kwargs)\n\n\nclass SlideshowSlide(models.Model):\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'slideshow_slides', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n sort_order = models.FloatField(default=1.0)\n metadata = JSONField(default=dict)\n\n\nclass StagedFile(models.Model):\n \"\"\"\n Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit\n \"\"\"\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='staged_files',\n blank=True, null=True, on_delete=models.CASCADE)\n\n\n<mask token>\n\n\nclass File(models.Model):\n \"\"\"\n The bottom layer of the contentDB schema, defines the basic building brick for content.\n Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n file_on_disk = models.FileField(upload_to=object_storage_name, storage=\n default_storage, max_length=500, blank=True)\n contentnode = models.ForeignKey(ContentNode, related_name='files',\n blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n assessment_item = models.ForeignKey(AssessmentItem, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n slideshow_slide = models.ForeignKey(SlideshowSlide, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n file_format = models.ForeignKey(FileFormat, related_name='files', blank\n =True, null=True, db_index=True, on_delete=models.SET_NULL)\n preset = models.ForeignKey(FormatPreset, related_name='files', blank=\n True, null=True, db_index=True, on_delete=models.SET_NULL)\n language = models.ForeignKey(Language, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n original_filename = models.CharField(max_length=255, blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n modified = models.DateTimeField(auto_now=True, verbose_name='modified',\n null=True)\n duration = models.IntegerField(blank=True, null=True)\n objects = CustomManager()\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id')) | Q(\n tree_id=OuterRef('assessment_item__contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls.\n _permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(edit=True) | Q(uploaded_by=user,\n contentnode__isnull=True, assessment_item__isnull=True))\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True).filter(Q(main_tree__tree_id=OuterRef(\n 'contentnode__tree_id')) | Q(main_tree__tree_id=OuterRef(\n 'assessment_item__contentnode__tree_id'))).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True) |\n Q(uploaded_by=user, contentnode__isnull=True,\n assessment_item__isnull=True))\n\n\n class Admin:\n pass\n\n def __str__(self):\n return '{checksum}{extension}'.format(checksum=self.checksum,\n extension='.' + self.file_format.extension)\n\n def filename(self):\n \"\"\"\n Returns just the filename of the File in storage, without the path\n\n e.g. abcd.mp4\n \"\"\"\n return os.path.basename(self.file_on_disk.name)\n\n def update_contentnode_content_id(self):\n \"\"\"\n If the file is attached to a contentnode and is not a thumbnail\n then update that contentnode's content_id if it's a copied contentnode.\n \"\"\"\n if self.contentnode and self.preset.thumbnail is False:\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n self.modified = timezone.now()\n self.update_contentnode_content_id()\n\n def save(self, set_by_file_on_disk=True, *args, **kwargs):\n \"\"\"\n Overrider the default save method.\n If the file_on_disk FileField gets passed a content copy:\n 1. generate the MD5 from the content copy\n 2. fill the other fields accordingly\n \"\"\"\n from contentcuration.utils.user import calculate_user_storage\n if self.file_format_id:\n if self.file_format_id not in dict(file_formats.choices):\n raise ValidationError('Invalid file_format')\n if set_by_file_on_disk and self.file_on_disk:\n if self.checksum is None or self.checksum == '':\n md5 = hashlib.md5()\n for chunk in self.file_on_disk.chunks():\n md5.update(chunk)\n self.checksum = md5.hexdigest()\n if not self.file_size:\n self.file_size = self.file_on_disk.size\n if not self.file_format_id:\n ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')\n if ext in list(dict(file_formats.choices).keys()):\n self.file_format_id = ext\n else:\n raise ValueError('Files of type `{}` are not supported.'\n .format(ext))\n super(File, self).save(*args, **kwargs)\n if self.uploaded_by_id:\n calculate_user_storage(self.uploaded_by_id)\n\n\n class Meta:\n indexes = [models.Index(fields=['checksum', 'file_size'], name=\n FILE_DISTINCT_INDEX_NAME), models.Index(fields=['-modified'],\n name=FILE_MODIFIED_DESC_INDEX_NAME)]\n constraints = [models.CheckConstraint(check=Q(preset__in=\n MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True), name\n =FILE_DURATION_CONSTRAINT)]\n\n\n<mask token>\n\n\nclass PrerequisiteContentRelationship(models.Model):\n \"\"\"\n Predefine the prerequisite relationship between two ContentNode objects.\n \"\"\"\n target_node = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)\n prerequisite = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['target_node', 'prerequisite']\n\n def clean(self, *args, **kwargs):\n if self.target_node == self.prerequisite:\n raise IntegrityError('Cannot self reference as prerequisite.')\n if PrerequisiteContentRelationship.objects.using(self._state.db\n ).filter(target_node=self.prerequisite, prerequisite=self.\n target_node):\n raise IntegrityError(\n 'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'\n % (self.target_node, self.prerequisite))\n super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n super(PrerequisiteContentRelationship, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n\nclass RelatedContentRelationship(models.Model):\n \"\"\"\n Predefine the related relationship between two ContentNode objects.\n \"\"\"\n contentnode_1 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_1', on_delete=models.CASCADE)\n contentnode_2 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_2', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['contentnode_1', 'contentnode_2']\n\n def save(self, *args, **kwargs):\n if self.contentnode_1 == self.contentnode_2:\n raise IntegrityError('Cannot self reference as related.')\n if RelatedContentRelationship.objects.using(self._state.db).filter(\n contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1\n ):\n return\n super(RelatedContentRelationship, self).save(*args, **kwargs)\n\n\nclass Invitation(models.Model):\n \"\"\" Invitation to edit channel \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n accepted = models.BooleanField(default=False)\n declined = models.BooleanField(default=False)\n revoked = models.BooleanField(default=False)\n invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n SET_NULL, null=True, related_name='sent_to')\n share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)\n email = models.EmailField(max_length=100, null=True)\n sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\n 'sent_by', null=True, on_delete=models.CASCADE)\n channel = models.ForeignKey('Channel', null=True, related_name=\n 'pending_editors', on_delete=models.CASCADE)\n first_name = models.CharField(max_length=100, blank=True)\n last_name = models.CharField(max_length=100, blank=True, null=True)\n\n\n class Meta:\n verbose_name = 'Invitation'\n verbose_name_plural = 'Invitations'\n\n def accept(self):\n user = User.objects.filter(email__iexact=self.email).first()\n if self.channel:\n if self.share_mode == VIEW_ACCESS:\n self.channel.editors.remove(user)\n self.channel.viewers.add(user)\n else:\n self.channel.viewers.remove(user)\n self.channel.editors.add(user)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user)).distinct()\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user) | Q(channel__viewers=user)).distinct()\n\n\nclass Change(models.Model):\n server_rev = models.BigAutoField(primary_key=True)\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,\n blank=True, on_delete=models.SET_NULL, related_name='changes_by_user')\n channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=\n models.CASCADE)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=\n True, on_delete=models.CASCADE, related_name='changes_about_user')\n client_rev = models.IntegerField(null=True, blank=True)\n session = models.ForeignKey(Session, null=True, blank=True, on_delete=\n models.SET_NULL)\n table = models.CharField(max_length=32)\n change_type = models.IntegerField()\n kwargs = JSONField(encoder=JSONEncoder)\n applied = models.BooleanField(default=False)\n errored = models.BooleanField(default=False)\n\n @classmethod\n def _create_from_change(cls, created_by_id=None, channel_id=None,\n user_id=None, session_key=None, applied=False, table=None, rev=None,\n **data):\n change_type = data.pop('type')\n if table is None or table not in ALL_TABLES:\n raise TypeError(\n 'table is a required argument for creating changes and must be a valid table name'\n )\n if change_type is None or change_type not in ALL_CHANGES:\n raise TypeError(\n 'change_type is a required argument for creating changes and must be a valid change type integer'\n )\n return cls(session_id=session_key, created_by_id=created_by_id,\n channel_id=channel_id, user_id=user_id, client_rev=rev, table=\n table, change_type=change_type, kwargs=data, applied=applied)\n\n @classmethod\n def create_changes(cls, changes, created_by_id=None, session_key=None,\n applied=False):\n change_models = []\n for change in changes:\n change_models.append(cls._create_from_change(created_by_id=\n created_by_id, session_key=session_key, applied=applied, **\n change))\n cls.objects.bulk_create(change_models)\n return change_models\n\n @classmethod\n def create_change(cls, change, created_by_id=None, session_key=None,\n applied=False):\n obj = cls._create_from_change(created_by_id=created_by_id,\n session_key=session_key, applied=applied, **change)\n obj.save()\n return obj\n\n @classmethod\n def serialize(cls, change):\n datum = get_attribute(change, ['kwargs']).copy()\n datum.update({'server_rev': get_attribute(change, ['server_rev']),\n 'table': get_attribute(change, ['table']), 'type':\n get_attribute(change, ['change_type']), 'channel_id':\n get_attribute(change, ['channel_id']), 'user_id': get_attribute\n (change, ['user_id']), 'created_by_id': get_attribute(change, [\n 'created_by_id'])})\n return datum\n\n def serialize_to_change_dict(self):\n return self.serialize(self)\n\n\nclass TaskResultCustom(object):\n \"\"\"\n Custom fields to add to django_celery_results's TaskResult model\n\n If adding fields to this class, run `makemigrations` then move the generated migration from the\n `django_celery_results` app to the `contentcuration` app and override the constructor to change\n the app_label. See `0141_add_task_signature` for an example\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='tasks',\n on_delete=models.CASCADE, null=True)\n channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)\n progress = models.IntegerField(null=True, blank=True, validators=[\n MinValueValidator(0), MaxValueValidator(100)])\n signature = models.CharField(null=True, blank=False, max_length=32)\n super_as_dict = TaskResult.as_dict\n\n def as_dict(self):\n \"\"\"\n :return: A dictionary representation\n \"\"\"\n super_dict = self.super_as_dict()\n super_dict.update(user_id=self.user_id, channel_id=self.channel_id,\n progress=self.progress)\n return super_dict\n\n @classmethod\n def contribute_to_class(cls, model_class=TaskResult):\n \"\"\"\n Adds fields to model, by default TaskResult\n :param model_class: TaskResult model\n \"\"\"\n for field in dir(cls):\n if not field.startswith('_') and field not in (\n 'contribute_to_class', 'Meta'):\n model_class.add_to_class(field, getattr(cls, field))\n setattr(model_class._meta, 'indexes', getattr(model_class._meta,\n 'indexes', []) + cls.Meta.indexes)\n\n\n class Meta:\n indexes = [models.Index(fields=['signature'], name=\n 'task_result_signature_idx', condition=Q(status__in=\n celery_states.UNREADY_STATES))]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass License(models.Model):\n <mask token>\n license_name = models.CharField(max_length=50)\n license_url = models.URLField(blank=True)\n license_description = models.TextField(blank=True)\n copyright_holder_required = models.BooleanField(default=True)\n is_custom = models.BooleanField(default=False)\n exists = models.BooleanField(default=False, verbose_name=\n 'license exists', help_text=\n 'Tells whether or not a content item is licensed to share')\n\n @classmethod\n def validate_name(cls, name):\n if cls.objects.filter(license_name=name).count() == 0:\n raise ValidationError('License `{}` does not exist'.format(name))\n\n def __str__(self):\n return self.license_name\n\n\n<mask token>\n\n\nclass ContentNode(MPTTModel, models.Model):\n \"\"\"\n By default, all nodes have a title and can be used as a topic.\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=\n False, db_index=True)\n node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)\n original_channel_id = UUIDField(primary_key=False, editable=False, null\n =True, db_index=True)\n source_channel_id = UUIDField(primary_key=False, editable=False, null=True)\n original_source_node_id = UUIDField(primary_key=False, editable=False,\n null=True, db_index=True)\n source_node_id = UUIDField(primary_key=False, editable=False, null=True)\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n title = models.CharField(max_length=200, blank=True)\n description = models.TextField(blank=True)\n kind = models.ForeignKey('ContentKind', related_name='contentnodes',\n db_index=True, null=True, blank=True, on_delete=models.SET_NULL)\n license = models.ForeignKey('License', null=True, blank=True, on_delete\n =models.SET_NULL)\n license_description = models.CharField(max_length=400, null=True, blank\n =True)\n prerequisite = models.ManyToManyField('self', related_name=\n 'is_prerequisite_of', through='PrerequisiteContentRelationship',\n symmetrical=False, blank=True)\n is_related = models.ManyToManyField('self', related_name='relate_to',\n through='RelatedContentRelationship', symmetrical=False, blank=True)\n language = models.ForeignKey('Language', null=True, blank=True,\n related_name='content_language', on_delete=models.SET_NULL)\n parent = TreeForeignKey('self', null=True, blank=True, related_name=\n 'children', db_index=True, on_delete=models.CASCADE)\n tags = models.ManyToManyField(ContentTag, symmetrical=False,\n related_name='tagged_content', blank=True)\n sort_order = models.FloatField(max_length=50, default=1, verbose_name=\n 'sort order', help_text='Ascending, lowest number shown first')\n copyright_holder = models.CharField(max_length=200, null=True, blank=\n True, default='', help_text=\n 'Organization of person who holds the essential rights')\n original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=\n True, blank=True, related_name='duplicates')\n cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=\n True, blank=True, related_name='clones')\n thumbnail_encoding = models.TextField(blank=True, null=True)\n created = models.DateTimeField(default=timezone.now, verbose_name='created'\n )\n modified = models.DateTimeField(auto_now=True, verbose_name='modified')\n published = models.BooleanField(default=False)\n publishing = models.BooleanField(default=False)\n complete = models.BooleanField(null=True)\n changed = models.BooleanField(default=True)\n \"\"\"\n Extra fields for exercises:\n - type: mastery model to use to determine completion\n - m: m value for M out of N mastery criteria\n - n: n value for M out of N mastery criteria\n \"\"\"\n extra_fields = JSONField(default=dict, blank=True, null=True)\n author = models.CharField(max_length=200, blank=True, default='',\n help_text='Who created this content?', null=True)\n aggregator = models.CharField(max_length=200, blank=True, default='',\n help_text='Who gathered this content together?', null=True)\n provider = models.CharField(max_length=200, blank=True, default='',\n help_text='Who distributed this content?', null=True)\n role_visibility = models.CharField(max_length=50, choices=roles.choices,\n default=roles.LEARNER)\n freeze_authoring_data = models.BooleanField(default=False)\n grade_levels = models.JSONField(blank=True, null=True)\n resource_types = models.JSONField(blank=True, null=True)\n learning_activities = models.JSONField(blank=True, null=True)\n accessibility_labels = models.JSONField(blank=True, null=True)\n categories = models.JSONField(blank=True, null=True)\n learner_needs = models.JSONField(blank=True, null=True)\n suggested_duration = models.IntegerField(blank=True, null=True,\n help_text='Suggested duration for the content node (in seconds)')\n objects = CustomContentNodeTreeManager()\n _field_updates = FieldTracker()\n _permission_filter = Q(tree_id=OuterRef('tree_id'))\n\n @classmethod\n def _annotate_channel_id(cls, queryset):\n return queryset.annotate(channel_id=Subquery(Channel.objects.filter\n (main_tree__tree_id=OuterRef('tree_id')).values_list('id', flat\n =True)[:1]))\n\n @classmethod\n def filter_by_pk(cls, pk):\n \"\"\"\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always\n returns a queryset filtered by pk.\n\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode\n for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If\n a ContentNode does not exist for `pk` then an empty queryset is returned.\n \"\"\"\n query = ContentNode.objects.filter(pk=pk)\n if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:\n tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))\n if tree_id:\n query = query.filter(tree_id=tree_id)\n else:\n tree_id = ContentNode.objects.filter(pk=pk).values_list(\n 'tree_id', flat=True).first()\n if tree_id:\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk),\n tree_id, None)\n query = query.filter(tree_id=tree_id)\n else:\n query = query.none()\n return query\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('tree_id')).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n @raise_if_unsaved\n def get_root(self):\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return super(ContentNode, self).get_root()\n\n @raise_if_unsaved\n def get_root_id(self):\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return ContentNode.objects.values_list('pk', flat=True).get(tree_id\n =self._mpttfield('tree_id'), parent=None)\n\n def get_tree_data(self, levels=float('inf')):\n \"\"\"\n Returns `levels`-deep tree information starting at current node.\n Args:\n levels (int): depth of tree hierarchy to return\n Returns:\n tree (dict): starting with self, with children list containing either\n the just the children's `node_id`s or full recusive tree.\n \"\"\"\n if self.kind_id == content_kinds.TOPIC:\n node_data = {'title': self.title, 'kind': self.kind_id,\n 'node_id': self.node_id, 'studio_id': self.id}\n children = self.children.all()\n if levels > 0:\n node_data['children'] = [c.get_tree_data(levels=levels - 1) for\n c in children]\n return node_data\n if self.kind_id == content_kinds.EXERCISE:\n return {'title': self.title, 'kind': self.kind_id, 'count':\n self.assessment_items.count(), 'node_id': self.node_id,\n 'studio_id': self.id}\n return {'title': self.title, 'kind': self.kind_id, 'file_size':\n self.files.values('file_size').aggregate(size=Sum('file_size'))\n ['size'], 'node_id': self.node_id, 'studio_id': self.id}\n\n def get_original_node(self):\n original_node = self.original_node or self\n if self.original_channel_id and self.original_source_node_id:\n original_tree_id = Channel.objects.select_related('main_tree').get(\n pk=self.original_channel_id).main_tree.tree_id\n original_node = ContentNode.objects.filter(tree_id=\n original_tree_id, node_id=self.original_source_node_id).first(\n ) or ContentNode.objects.filter(tree_id=original_tree_id,\n content_id=self.content_id).first() or self\n return original_node\n\n def get_associated_presets(self):\n key = 'associated_presets_{}'.format(self.kind_id)\n cached_data = cache.get(key)\n if cached_data:\n return cached_data\n presets = list(FormatPreset.objects.filter(kind=self.kind).values())\n cache.set(key, presets, None)\n return presets\n\n def get_prerequisites(self):\n prerequisite_mapping = {}\n prerequisites = self.prerequisite.all()\n prereqlist = list(prerequisites)\n for prereq in prerequisites:\n prlist, prereqmapping = prereq.get_prerequisites()\n prerequisite_mapping.update({prereq.pk: prereqmapping})\n prereqlist.extend(prlist)\n return prereqlist, prerequisite_mapping\n\n def get_postrequisites(self):\n postrequisite_mapping = {}\n postrequisites = self.is_prerequisite_of.all()\n postreqlist = list(postrequisites)\n for postreq in postrequisites:\n prlist, postreqmapping = postreq.get_postrequisites()\n postrequisite_mapping.update({postreq.pk: postreqmapping})\n postreqlist.extend(prlist)\n return postreqlist, postrequisite_mapping\n\n def get_channel_id(self):\n if hasattr(self, 'channel_id'):\n return self.channel_id\n channel = self.get_channel()\n if channel:\n return channel.id\n return None\n\n def get_channel(self):\n try:\n root = self.get_root()\n if not root:\n return None\n return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=\n root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(\n previous_tree=root)).first()\n except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):\n return None\n\n def get_thumbnail(self):\n if self.thumbnail_encoding:\n thumbnail_data = load_json_string(self.thumbnail_encoding)\n if type(thumbnail_data) is dict and thumbnail_data.get('base64'):\n return thumbnail_data['base64']\n thumbnail = self.files.filter(preset__thumbnail=True).first()\n if thumbnail:\n return generate_storage_url(str(thumbnail))\n return ''\n\n @classmethod\n def get_nodes_with_title(cls, title, limit_to_children_of=None):\n \"\"\"\n Returns all ContentNodes with a given title. If limit_to_children_of\n is passed in with an id, only look at all the children of the node with that id.\n \"\"\"\n if limit_to_children_of:\n root = cls.objects.get(id=limit_to_children_of)\n return root.get_descendants().filter(title=title)\n return cls.objects.filter(title=title)\n\n def get_details(self, channel_id=None):\n \"\"\"\n Returns information about the node and its children, including total size, languages, files, etc.\n\n :return: A dictionary with detailed statistics and information about the node.\n \"\"\"\n from contentcuration.viewsets.common import SQArrayAgg\n from contentcuration.viewsets.common import SQCount\n from contentcuration.viewsets.common import SQRelatedArrayAgg\n from contentcuration.viewsets.common import SQSum\n from contentcuration.viewsets.common import SQJSONBKeyArrayAgg\n node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id\n ).order_by()\n descendants = self.get_descendants().values('id')\n if channel_id:\n channel = Channel.objects.filter(id=channel_id)[0]\n else:\n channel = self.get_channel()\n if not descendants.exists():\n data = {'last_update': pytz.utc.localize(datetime.now()).\n strftime(settings.DATE_TIME_FORMAT), 'created': self.\n created.strftime(settings.DATE_TIME_FORMAT),\n 'resource_count': 0, 'resource_size': 0, 'includes': {\n 'coach_content': 0, 'exercises': 0}, 'kind_count': [],\n 'languages': [], 'accessible_languages': [], 'licenses': [],\n 'tags': [], 'copyright_holders': [], 'authors': [],\n 'aggregators': [], 'providers': [], 'sample_pathway': [],\n 'original_channels': [], 'sample_nodes': [], 'levels': [],\n 'categories': []}\n cache.set('details_{}'.format(self.node_id), json.dumps(data), None\n )\n return data\n resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()\n nodes = With(File.objects.filter(contentnode_id__in=Subquery(\n resources.values('id'))).values('checksum', 'file_size').\n order_by(), name='nodes')\n file_query = nodes.queryset().with_cte(nodes).values('checksum',\n 'file_size').distinct()\n l_nodes = With(File.objects.filter(contentnode_id__in=Subquery(\n resources.values('id'))).values('language_id', 'preset_id').\n order_by(), name='l_nodes')\n accessible_languages_query = l_nodes.queryset().filter(preset_id=\n format_presets.VIDEO_SUBTITLE).with_cte(l_nodes).values(\n 'language__native_name').distinct()\n tags_query = str(ContentTag.objects.filter(tagged_content__pk__in=\n descendants.values_list('pk', flat=True)).values('tag_name').\n annotate(count=Count('tag_name')).query).replace('topic', \"'topic'\"\n )\n kind_count_query = str(resources.values('kind_id').annotate(count=\n Count('kind_id')).query).replace('topic', \"'topic'\")\n node = node.annotate(resource_count=SQCount(resources, field='id'),\n resource_size=SQSum(file_query, field='file_size'),\n copyright_holders=SQArrayAgg(resources.distinct(\n 'copyright_holder').order_by('copyright_holder'), field=\n 'copyright_holder'), authors=SQArrayAgg(resources.distinct(\n 'author').order_by('author'), field='author'), aggregators=\n SQArrayAgg(resources.distinct('aggregator').order_by(\n 'aggregator'), field='aggregator'), providers=SQArrayAgg(\n resources.distinct('provider').order_by('provider'), field=\n 'provider'), languages=SQRelatedArrayAgg(descendants.exclude(\n language=None).distinct('language__native_name').order_by(),\n field='language__native_name', fieldname='native_name'),\n accessible_languages=SQRelatedArrayAgg(\n accessible_languages_query, field='language__native_name',\n fieldname='native_name'), licenses=SQRelatedArrayAgg(resources.\n exclude(license=None).distinct('license__license_name').\n order_by('license__license_name'), field=\n 'license__license_name', fieldname='license_name'), kind_count=\n RawSQL('SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format\n (kind_count_query), ()), tags_list=RawSQL(\n 'SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format(\n tags_query), ()), coach_content=SQCount(resources.filter(\n role_visibility=roles.COACH), field='id'), exercises=SQCount(\n resources.filter(kind_id=content_kinds.EXERCISE), field='id'),\n levels=SQJSONBKeyArrayAgg(descendants.exclude(\n grade_levels__isnull=True), field='grade_levels'),\n all_categories=SQJSONBKeyArrayAgg(descendants.exclude(\n categories__isnull=True), field='categories'))\n max_level = max(resources.values_list('level', flat=True).order_by(\n ).distinct() or [0])\n m_nodes = With(resources.values('id', 'level', 'tree_id', 'lft').\n order_by(), name='m_nodes')\n deepest_node_record = m_nodes.queryset().with_cte(m_nodes).filter(level\n =max_level).values('id').order_by('tree_id', 'lft').first()\n if deepest_node_record:\n deepest_node = ContentNode.objects.get(pk=deepest_node_record['id']\n )\n pathway = list(deepest_node.get_ancestors().order_by().exclude(\n parent=None).values('title', 'node_id', 'kind_id').order_by()\n ) if deepest_node_record else []\n sample_nodes = [{'node_id': n.node_id, 'title': n.title,\n 'description': n.description, 'thumbnail': n.get_thumbnail(),\n 'kind': n.kind_id} for n in deepest_node.get_siblings(\n include_self=True)[0:4]] if deepest_node_record else []\n channel_id = channel and channel.id\n originals = resources.values('original_channel_id').annotate(count=\n Count('original_channel_id')).order_by('original_channel_id')\n originals = {c['original_channel_id']: c['count'] for c in originals}\n original_channels = Channel.objects.exclude(pk=channel_id).filter(\n pk__in=originals.keys(), deleted=False).order_by()\n original_channels = [{'id': c.id, 'name': '{}{}'.format(c.name, _(\n ' (Original)') if channel_id == c.id else ''), 'thumbnail': c.\n get_thumbnail(), 'count': originals[c.id]} for c in\n original_channels]\n node = node.order_by().values('id', 'resource_count',\n 'resource_size', 'copyright_holders', 'authors', 'aggregators',\n 'providers', 'languages', 'accessible_languages',\n 'coach_content', 'licenses', 'tags_list', 'kind_count',\n 'exercises', 'levels', 'all_categories').first()\n for_educators = {'coach_content': node['coach_content'],\n 'exercises': node['exercises']}\n data = {'last_update': pytz.utc.localize(datetime.now()).strftime(\n settings.DATE_TIME_FORMAT), 'created': self.created.strftime(\n settings.DATE_TIME_FORMAT), 'resource_count': node.get(\n 'resource_count', 0), 'resource_size': node.get('resource_size',\n 0), 'includes': for_educators, 'kind_count': node.get(\n 'kind_count') or [], 'languages': node.get('languages') or [],\n 'accessible_languages': node.get('accessible_languages') or [],\n 'licenses': node.get('licenses') or [], 'tags': node.get(\n 'tags_list') or [], 'original_channels': original_channels,\n 'sample_pathway': pathway, 'sample_nodes': sample_nodes,\n 'authors': list(filter(bool, node['authors'])), 'aggregators':\n list(filter(bool, node['aggregators'])), 'providers': list(\n filter(bool, node['providers'])), 'copyright_holders': list(\n filter(bool, node['copyright_holders'])), 'levels': node.get(\n 'levels') or [], 'categories': node.get('all_categories') or []}\n cache.set('details_{}'.format(self.node_id), json.dumps(data), None)\n return data\n\n def has_changes(self):\n mptt_opts = self._mptt_meta\n blacklist = set(['changed', 'modified', 'publishing', mptt_opts.\n tree_id_attr, mptt_opts.left_attr, mptt_opts.right_attr,\n mptt_opts.level_attr])\n original_values = self._field_updates.changed()\n return any(True for field in original_values if field not in blacklist)\n\n def recalculate_editors_storage(self):\n from contentcuration.utils.user import calculate_user_storage\n for editor in self.files.values_list('uploaded_by_id', flat=True\n ).distinct():\n calculate_user_storage(editor)\n\n def mark_complete(self):\n errors = []\n if not (bool(self.title) or self.parent_id is None):\n errors.append('Empty title')\n if self.kind_id != content_kinds.TOPIC:\n if not self.license:\n errors.append('Missing license')\n if (self.license and self.license.is_custom and not self.\n license_description):\n errors.append('Missing license description for custom license')\n if (self.license and self.license.copyright_holder_required and\n not self.copyright_holder):\n errors.append('Missing required copyright holder')\n if (self.kind_id != content_kinds.EXERCISE and not self.files.\n filter(preset__supplementary=False).exists()):\n errors.append('Missing default file')\n if self.kind_id == content_kinds.EXERCISE:\n if not self.assessment_items.filter(~Q(raw_data='') | ~Q(\n question='') & ~Q(answers='[]') & (Q(type=exercises.\n INPUT_QUESTION) | Q(answers__iregex='\"correct\":\\\\s*true'))\n ).exists():\n errors.append(\n 'No questions with question text and complete answers')\n criterion = self.extra_fields.get('options', {}).get(\n 'completion_criteria')\n if not (self.extra_fields.get('mastery_model') or criterion):\n errors.append('Missing mastery criterion')\n if criterion:\n try:\n completion_criteria.validate(criterion, kind=\n content_kinds.EXERCISE)\n except completion_criteria.ValidationError:\n errors.append(\n 'Mastery criterion is defined but is invalid')\n self.complete = not errors\n return errors\n\n def make_content_id_unique(self):\n \"\"\"\n If self is NOT an original contentnode (in other words, a copied contentnode)\n and a contentnode with same content_id exists then we update self's content_id.\n \"\"\"\n is_node_original = (self.original_source_node_id is None or self.\n original_source_node_id == self.node_id)\n node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(\n content_id=self.content_id)\n if not is_node_original and node_same_content_id.exists():\n ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.\n uuid4().hex)\n\n def on_create(self):\n self.changed = True\n self.recalculate_editors_storage()\n self.set_default_learning_activity()\n\n def on_update(self):\n self.changed = self.changed or self.has_changes()\n\n def move_to(self, target, *args, **kwargs):\n parent_was_trashtree = self.parent.channel_trash.exists()\n super(ContentNode, self).move_to(target, *args, **kwargs)\n self.save()\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.\n tree_id, None)\n if target.channel_trash.exists() or parent_was_trashtree:\n self.recalculate_editors_storage()\n\n def set_default_learning_activity(self):\n if self.learning_activities is None:\n if self.kind in kind_activity_map:\n self.learning_activities = {kind_activity_map[self.kind]: True}\n\n def save(self, skip_lock=False, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n old_parent_id = self._field_updates.changed().get('parent_id')\n if self._state.adding and (self.parent_id or self.parent):\n same_order = False\n elif old_parent_id is DeferredAttribute:\n same_order = True\n else:\n same_order = old_parent_id == self.parent_id\n if not same_order:\n changed_ids = list(filter(lambda x: x is not None, set([\n old_parent_id, self.parent_id])))\n else:\n changed_ids = []\n if not same_order and not skip_lock:\n with ContentNode.objects.lock_mptt(*ContentNode.objects.filter(\n id__in=[pid for pid in [old_parent_id, self.parent_id] if\n pid]).values_list('tree_id', flat=True).distinct()):\n super(ContentNode, self).save(*args, **kwargs)\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(\n changed=True)\n else:\n super(ContentNode, self).save(*args, **kwargs)\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(changed\n =True)\n save.alters_data = True\n\n def delete(self, *args, **kwargs):\n parent = self.parent or self._field_updates.changed().get('parent')\n if parent:\n parent.changed = True\n parent.save()\n self.recalculate_editors_storage()\n with ContentNode.objects.lock_mptt(self.tree_id):\n return super(ContentNode, self).delete(*args, **kwargs)\n delete.alters_data = True\n\n def copy_to(self, target=None, position='last-child', pk=None, mods=\n None, excluded_descendants=None, can_edit_source_channel=None,\n batch_size=None, progress_tracker=None):\n return self._tree_manager.copy_node(self, target, position, pk,\n mods, excluded_descendants, can_edit_source_channel, batch_size,\n progress_tracker)[0]\n\n def copy(self):\n return self.copy_to()\n\n def is_publishable(self):\n return self.complete and self.get_descendants(include_self=True\n ).exclude(kind_id=content_kinds.TOPIC).exists()\n\n\n class Meta:\n verbose_name = 'Topic'\n verbose_name_plural = 'Topics'\n indexes = [models.Index(fields=['node_id'], name=NODE_ID_INDEX_NAME\n ), models.Index(fields=['-modified'], name=\n NODE_MODIFIED_DESC_INDEX_NAME)]\n\n\nclass ContentKind(models.Model):\n kind = models.CharField(primary_key=True, max_length=200, choices=\n content_kinds.choices)\n\n def __str__(self):\n return self.kind\n\n\nclass FileFormat(models.Model):\n extension = models.CharField(primary_key=True, max_length=40, choices=\n file_formats.choices)\n mimetype = models.CharField(max_length=200, blank=True)\n\n def __str__(self):\n return self.extension\n\n\nclass FormatPreset(models.Model):\n id = models.CharField(primary_key=True, max_length=150, choices=\n format_presets.choices)\n readable_name = models.CharField(max_length=400)\n multi_language = models.BooleanField(default=False)\n supplementary = models.BooleanField(default=False)\n thumbnail = models.BooleanField(default=False)\n subtitle = models.BooleanField(default=False)\n display = models.BooleanField(default=True)\n order = models.IntegerField(default=0)\n kind = models.ForeignKey(ContentKind, related_name='format_presets',\n null=True, on_delete=models.SET_NULL)\n allowed_formats = models.ManyToManyField(FileFormat, blank=True)\n\n def __str__(self):\n return self.id\n\n @classmethod\n def guess_format_preset(cls, filename):\n \"\"\"\n Guess the format preset of a filename based on its extension.\n\n Return None if format is unknown.\n \"\"\"\n _, ext = os.path.splitext(filename)\n ext = ext.lstrip('.')\n f = FormatPreset.objects.filter(allowed_formats__extension=ext,\n display=True)\n return f.first()\n\n @classmethod\n def get_preset(cls, preset_name):\n \"\"\"\n Get the FormatPreset object with that exact name.\n\n Returns None if that format preset is not found.\n \"\"\"\n try:\n return FormatPreset.objects.get(id=preset_name)\n except FormatPreset.DoesNotExist:\n return None\n\n\nclass Language(models.Model):\n id = models.CharField(max_length=14, primary_key=True)\n lang_code = models.CharField(max_length=3, db_index=True)\n lang_subcode = models.CharField(max_length=10, db_index=True, blank=\n True, null=True)\n readable_name = models.CharField(max_length=100, blank=True)\n native_name = models.CharField(max_length=100, blank=True)\n lang_direction = models.CharField(max_length=3, choices=languages.\n LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])\n\n def ietf_name(self):\n return '{code}-{subcode}'.format(code=self.lang_code, subcode=self.\n lang_subcode) if self.lang_subcode else self.lang_code\n\n def __str__(self):\n return self.ietf_name()\n\n\n<mask token>\n\n\nclass AssessmentItem(models.Model):\n type = models.CharField(max_length=50, default='multiplechoice')\n question = models.TextField(blank=True)\n hints = models.TextField(default='[]')\n answers = models.TextField(default='[]')\n order = models.IntegerField(default=1)\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'assessment_items', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n assessment_id = UUIDField(primary_key=False, default=uuid.uuid4,\n editable=False)\n raw_data = models.TextField(blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n randomize = models.BooleanField(default=False)\n deleted = models.BooleanField(default=False)\n objects = CustomManager()\n _field_updates = FieldTracker()\n\n def has_changes(self):\n return bool(self._field_updates.changed())\n\n\n class Meta:\n indexes = [models.Index(fields=['assessment_id'], name=\n ASSESSMENT_ID_INDEX_NAME)]\n unique_together = ['contentnode', 'assessment_id']\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('contentnode__tree_id'\n )).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n def on_create(self):\n \"\"\"\n When an exercise is added to a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n \"\"\"\n When an exercise is updated of a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n When an exercise is deleted from a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n return super(AssessmentItem, self).delete(*args, **kwargs)\n\n\nclass SlideshowSlide(models.Model):\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'slideshow_slides', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n sort_order = models.FloatField(default=1.0)\n metadata = JSONField(default=dict)\n\n\nclass StagedFile(models.Model):\n \"\"\"\n Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit\n \"\"\"\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='staged_files',\n blank=True, null=True, on_delete=models.CASCADE)\n\n\n<mask token>\n\n\nclass File(models.Model):\n \"\"\"\n The bottom layer of the contentDB schema, defines the basic building brick for content.\n Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n file_on_disk = models.FileField(upload_to=object_storage_name, storage=\n default_storage, max_length=500, blank=True)\n contentnode = models.ForeignKey(ContentNode, related_name='files',\n blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n assessment_item = models.ForeignKey(AssessmentItem, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n slideshow_slide = models.ForeignKey(SlideshowSlide, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n file_format = models.ForeignKey(FileFormat, related_name='files', blank\n =True, null=True, db_index=True, on_delete=models.SET_NULL)\n preset = models.ForeignKey(FormatPreset, related_name='files', blank=\n True, null=True, db_index=True, on_delete=models.SET_NULL)\n language = models.ForeignKey(Language, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n original_filename = models.CharField(max_length=255, blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n modified = models.DateTimeField(auto_now=True, verbose_name='modified',\n null=True)\n duration = models.IntegerField(blank=True, null=True)\n objects = CustomManager()\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id')) | Q(\n tree_id=OuterRef('assessment_item__contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls.\n _permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(edit=True) | Q(uploaded_by=user,\n contentnode__isnull=True, assessment_item__isnull=True))\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True).filter(Q(main_tree__tree_id=OuterRef(\n 'contentnode__tree_id')) | Q(main_tree__tree_id=OuterRef(\n 'assessment_item__contentnode__tree_id'))).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True) |\n Q(uploaded_by=user, contentnode__isnull=True,\n assessment_item__isnull=True))\n\n\n class Admin:\n pass\n\n def __str__(self):\n return '{checksum}{extension}'.format(checksum=self.checksum,\n extension='.' + self.file_format.extension)\n\n def filename(self):\n \"\"\"\n Returns just the filename of the File in storage, without the path\n\n e.g. abcd.mp4\n \"\"\"\n return os.path.basename(self.file_on_disk.name)\n\n def update_contentnode_content_id(self):\n \"\"\"\n If the file is attached to a contentnode and is not a thumbnail\n then update that contentnode's content_id if it's a copied contentnode.\n \"\"\"\n if self.contentnode and self.preset.thumbnail is False:\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n self.modified = timezone.now()\n self.update_contentnode_content_id()\n\n def save(self, set_by_file_on_disk=True, *args, **kwargs):\n \"\"\"\n Overrider the default save method.\n If the file_on_disk FileField gets passed a content copy:\n 1. generate the MD5 from the content copy\n 2. fill the other fields accordingly\n \"\"\"\n from contentcuration.utils.user import calculate_user_storage\n if self.file_format_id:\n if self.file_format_id not in dict(file_formats.choices):\n raise ValidationError('Invalid file_format')\n if set_by_file_on_disk and self.file_on_disk:\n if self.checksum is None or self.checksum == '':\n md5 = hashlib.md5()\n for chunk in self.file_on_disk.chunks():\n md5.update(chunk)\n self.checksum = md5.hexdigest()\n if not self.file_size:\n self.file_size = self.file_on_disk.size\n if not self.file_format_id:\n ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')\n if ext in list(dict(file_formats.choices).keys()):\n self.file_format_id = ext\n else:\n raise ValueError('Files of type `{}` are not supported.'\n .format(ext))\n super(File, self).save(*args, **kwargs)\n if self.uploaded_by_id:\n calculate_user_storage(self.uploaded_by_id)\n\n\n class Meta:\n indexes = [models.Index(fields=['checksum', 'file_size'], name=\n FILE_DISTINCT_INDEX_NAME), models.Index(fields=['-modified'],\n name=FILE_MODIFIED_DESC_INDEX_NAME)]\n constraints = [models.CheckConstraint(check=Q(preset__in=\n MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True), name\n =FILE_DURATION_CONSTRAINT)]\n\n\n<mask token>\n\n\nclass PrerequisiteContentRelationship(models.Model):\n \"\"\"\n Predefine the prerequisite relationship between two ContentNode objects.\n \"\"\"\n target_node = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)\n prerequisite = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['target_node', 'prerequisite']\n\n def clean(self, *args, **kwargs):\n if self.target_node == self.prerequisite:\n raise IntegrityError('Cannot self reference as prerequisite.')\n if PrerequisiteContentRelationship.objects.using(self._state.db\n ).filter(target_node=self.prerequisite, prerequisite=self.\n target_node):\n raise IntegrityError(\n 'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'\n % (self.target_node, self.prerequisite))\n super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n super(PrerequisiteContentRelationship, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n\nclass RelatedContentRelationship(models.Model):\n \"\"\"\n Predefine the related relationship between two ContentNode objects.\n \"\"\"\n contentnode_1 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_1', on_delete=models.CASCADE)\n contentnode_2 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_2', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['contentnode_1', 'contentnode_2']\n\n def save(self, *args, **kwargs):\n if self.contentnode_1 == self.contentnode_2:\n raise IntegrityError('Cannot self reference as related.')\n if RelatedContentRelationship.objects.using(self._state.db).filter(\n contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1\n ):\n return\n super(RelatedContentRelationship, self).save(*args, **kwargs)\n\n\nclass Invitation(models.Model):\n \"\"\" Invitation to edit channel \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n accepted = models.BooleanField(default=False)\n declined = models.BooleanField(default=False)\n revoked = models.BooleanField(default=False)\n invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n SET_NULL, null=True, related_name='sent_to')\n share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)\n email = models.EmailField(max_length=100, null=True)\n sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\n 'sent_by', null=True, on_delete=models.CASCADE)\n channel = models.ForeignKey('Channel', null=True, related_name=\n 'pending_editors', on_delete=models.CASCADE)\n first_name = models.CharField(max_length=100, blank=True)\n last_name = models.CharField(max_length=100, blank=True, null=True)\n\n\n class Meta:\n verbose_name = 'Invitation'\n verbose_name_plural = 'Invitations'\n\n def accept(self):\n user = User.objects.filter(email__iexact=self.email).first()\n if self.channel:\n if self.share_mode == VIEW_ACCESS:\n self.channel.editors.remove(user)\n self.channel.viewers.add(user)\n else:\n self.channel.viewers.remove(user)\n self.channel.editors.add(user)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user)).distinct()\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user) | Q(channel__viewers=user)).distinct()\n\n\nclass Change(models.Model):\n server_rev = models.BigAutoField(primary_key=True)\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,\n blank=True, on_delete=models.SET_NULL, related_name='changes_by_user')\n channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=\n models.CASCADE)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=\n True, on_delete=models.CASCADE, related_name='changes_about_user')\n client_rev = models.IntegerField(null=True, blank=True)\n session = models.ForeignKey(Session, null=True, blank=True, on_delete=\n models.SET_NULL)\n table = models.CharField(max_length=32)\n change_type = models.IntegerField()\n kwargs = JSONField(encoder=JSONEncoder)\n applied = models.BooleanField(default=False)\n errored = models.BooleanField(default=False)\n\n @classmethod\n def _create_from_change(cls, created_by_id=None, channel_id=None,\n user_id=None, session_key=None, applied=False, table=None, rev=None,\n **data):\n change_type = data.pop('type')\n if table is None or table not in ALL_TABLES:\n raise TypeError(\n 'table is a required argument for creating changes and must be a valid table name'\n )\n if change_type is None or change_type not in ALL_CHANGES:\n raise TypeError(\n 'change_type is a required argument for creating changes and must be a valid change type integer'\n )\n return cls(session_id=session_key, created_by_id=created_by_id,\n channel_id=channel_id, user_id=user_id, client_rev=rev, table=\n table, change_type=change_type, kwargs=data, applied=applied)\n\n @classmethod\n def create_changes(cls, changes, created_by_id=None, session_key=None,\n applied=False):\n change_models = []\n for change in changes:\n change_models.append(cls._create_from_change(created_by_id=\n created_by_id, session_key=session_key, applied=applied, **\n change))\n cls.objects.bulk_create(change_models)\n return change_models\n\n @classmethod\n def create_change(cls, change, created_by_id=None, session_key=None,\n applied=False):\n obj = cls._create_from_change(created_by_id=created_by_id,\n session_key=session_key, applied=applied, **change)\n obj.save()\n return obj\n\n @classmethod\n def serialize(cls, change):\n datum = get_attribute(change, ['kwargs']).copy()\n datum.update({'server_rev': get_attribute(change, ['server_rev']),\n 'table': get_attribute(change, ['table']), 'type':\n get_attribute(change, ['change_type']), 'channel_id':\n get_attribute(change, ['channel_id']), 'user_id': get_attribute\n (change, ['user_id']), 'created_by_id': get_attribute(change, [\n 'created_by_id'])})\n return datum\n\n def serialize_to_change_dict(self):\n return self.serialize(self)\n\n\nclass TaskResultCustom(object):\n \"\"\"\n Custom fields to add to django_celery_results's TaskResult model\n\n If adding fields to this class, run `makemigrations` then move the generated migration from the\n `django_celery_results` app to the `contentcuration` app and override the constructor to change\n the app_label. See `0141_add_task_signature` for an example\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='tasks',\n on_delete=models.CASCADE, null=True)\n channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)\n progress = models.IntegerField(null=True, blank=True, validators=[\n MinValueValidator(0), MaxValueValidator(100)])\n signature = models.CharField(null=True, blank=False, max_length=32)\n super_as_dict = TaskResult.as_dict\n\n def as_dict(self):\n \"\"\"\n :return: A dictionary representation\n \"\"\"\n super_dict = self.super_as_dict()\n super_dict.update(user_id=self.user_id, channel_id=self.channel_id,\n progress=self.progress)\n return super_dict\n\n @classmethod\n def contribute_to_class(cls, model_class=TaskResult):\n \"\"\"\n Adds fields to model, by default TaskResult\n :param model_class: TaskResult model\n \"\"\"\n for field in dir(cls):\n if not field.startswith('_') and field not in (\n 'contribute_to_class', 'Meta'):\n model_class.add_to_class(field, getattr(cls, field))\n setattr(model_class._meta, 'indexes', getattr(model_class._meta,\n 'indexes', []) + cls.Meta.indexes)\n\n\n class Meta:\n indexes = [models.Index(fields=['signature'], name=\n 'task_result_signature_idx', condition=Q(status__in=\n celery_states.UNREADY_STATES))]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SecretToken(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def exists(cls, token):\n \"\"\"\n Return true when the token string given by string already exists.\n Returns false otherwise.\n \"\"\"\n return cls.objects.filter(token=token).exists()\n\n @classmethod\n def generate_new_token(cls):\n \"\"\"\n Creates a primary secret token for the current channel using a proquint\n string. Creates a secondary token containing the channel id.\n\n These tokens can be used to refer to the channel to download its content\n database.\n \"\"\"\n token = proquint.generate()\n TRIALS = 100\n for __ in range(TRIALS):\n token = proquint.generate()\n if SecretToken.exists(token):\n continue\n break\n else:\n raise ValueError('Cannot generate new token')\n return token\n\n def __str__(self):\n return '{}-{}'.format(self.token[:5], self.token[5:])\n\n\n<mask token>\n\n\nclass PermissionCTE(With):\n tree_id_fields = ['channel__{}__tree_id'.format(tree_name) for\n tree_name in CHANNEL_TREES]\n\n def __init__(self, model, user_id, **kwargs):\n queryset = model.objects.filter(user_id=user_id).annotate(tree_id=\n Unnest(ArrayRemove(Array(*self.tree_id_fields), None),\n output_field=models.IntegerField()))\n super(PermissionCTE, self).__init__(queryset=queryset.values(\n 'user_id', 'channel_id', 'tree_id'), **kwargs)\n\n @classmethod\n def editable_channels(cls, user_id):\n return PermissionCTE(User.editable_channels.through, user_id, name=\n 'editable_channels_cte')\n\n @classmethod\n def view_only_channels(cls, user_id):\n return PermissionCTE(User.view_only_channels.through, user_id, name\n ='view_only_channels_cte')\n\n def exists(self, *filters):\n return Exists(self.queryset().filter(*filters).values('user_id'))\n\n\nclass Channel(models.Model):\n \"\"\" Permissions come from association with organizations \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n name = models.CharField(max_length=200, blank=True)\n description = models.CharField(max_length=400, blank=True)\n tagline = models.CharField(max_length=150, blank=True, null=True)\n version = models.IntegerField(default=0)\n thumbnail = models.TextField(blank=True, null=True)\n thumbnail_encoding = JSONField(default=dict)\n editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name\n ='editable_channels', verbose_name='editors', help_text=\n 'Users with edit rights', blank=True)\n viewers = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name\n ='view_only_channels', verbose_name='viewers', help_text=\n 'Users with view only rights', blank=True)\n language = models.ForeignKey('Language', null=True, blank=True,\n related_name='channel_language', on_delete=models.SET_NULL)\n trash_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_trash', on_delete=models.SET_NULL)\n clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_clipboard', on_delete=models.SET_NULL)\n main_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_main', on_delete=models.SET_NULL)\n staging_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_staging', on_delete=models.SET_NULL)\n chef_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_chef', on_delete=models.SET_NULL)\n previous_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_previous', on_delete=models.SET_NULL)\n bookmarked_by = models.ManyToManyField(settings.AUTH_USER_MODEL,\n related_name='bookmarked_channels', verbose_name='bookmarked by')\n deleted = models.BooleanField(default=False, db_index=True)\n public = models.BooleanField(default=False, db_index=True)\n preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)\n content_defaults = JSONField(default=dict)\n priority = models.IntegerField(default=0, help_text=\n 'Order to display public channels')\n last_published = models.DateTimeField(blank=True, null=True)\n secret_tokens = models.ManyToManyField(SecretToken, related_name=\n 'channels', verbose_name='secret tokens', blank=True)\n source_url = models.CharField(max_length=200, blank=True, null=True)\n demo_server_url = models.CharField(max_length=200, blank=True, null=True)\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n ricecooker_version = models.CharField(max_length=100, blank=True, null=True\n )\n published_data = JSONField(default=dict)\n icon_encoding = models.TextField(blank=True, null=True)\n total_resource_count = models.IntegerField(default=0)\n published_kind_count = models.TextField(blank=True, null=True)\n published_size = models.FloatField(default=0)\n included_languages = models.ManyToManyField('Language', related_name=\n 'channels', verbose_name='languages', blank=True)\n _field_updates = FieldTracker(fields=['description', 'language_id',\n 'thumbnail', 'name', 'thumbnail_encoding', 'deleted', 'public',\n 'main_tree_id', 'version'])\n\n @classmethod\n def get_editable(cls, user, channel_id):\n return cls.filter_edit_queryset(cls.objects.all(), user).get(id=\n channel_id)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit = Exists(User.editable_channels.through.objects.filter(user_id\n =user_id, channel_id=OuterRef('id')))\n queryset = queryset.annotate(edit=edit)\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n user_email = not user.is_anonymous and user.email\n if user_id:\n filters = dict(user_id=user_id, channel_id=OuterRef('id'))\n edit = Exists(User.editable_channels.through.objects.filter(**\n filters).values('user_id'))\n view = Exists(User.view_only_channels.through.objects.filter(**\n filters).values('user_id'))\n else:\n edit = boolean_val(False)\n view = boolean_val(False)\n queryset = queryset.annotate(edit=edit, view=view)\n if user_id and user.is_admin:\n return queryset\n permission_filter = Q()\n if user_id:\n pending_channels = Invitation.objects.filter(email=user_email,\n revoked=False, declined=False, accepted=False).values_list(\n 'channel_id', flat=True)\n permission_filter = Q(view=True) | Q(edit=True) | Q(deleted=\n False, id__in=pending_channels)\n return queryset.filter(permission_filter | Q(deleted=False, public=\n True))\n\n @classmethod\n def get_all_channels(cls):\n return cls.objects.select_related('main_tree').prefetch_related(\n 'editors', 'viewers').distinct()\n\n def resource_size_key(self):\n return '{}_resource_size'.format(self.pk)\n\n def get_resource_size(self):\n cached_data = cache.get(self.resource_size_key())\n if cached_data:\n return cached_data\n tree_id = self.main_tree.tree_id\n files = File.objects.select_related('contentnode', 'assessment_item'\n ).filter(contentnode__tree_id=tree_id).values('checksum',\n 'file_size').distinct().aggregate(resource_size=Sum('file_size'))\n cache.set(self.resource_size_key(), files['resource_size'] or 0, None)\n return files['resource_size'] or 0\n\n def on_create(self):\n record_channel_stats(self, None)\n if not self.content_defaults:\n self.content_defaults = DEFAULT_CONTENT_DEFAULTS\n if not self.main_tree:\n self.main_tree = ContentNode.objects.create(title=self.name,\n kind_id=content_kinds.TOPIC, content_id=self.id, node_id=\n self.id, original_channel_id=self.id, source_channel_id=\n self.id, changed=True, complete=True)\n if settings.DEBUG:\n if ContentNode.objects.filter(parent=None, tree_id=self.\n main_tree.tree_id).count() != 1:\n raise AssertionError\n if not self.trash_tree:\n self.trash_tree = ContentNode.objects.create(title=self.name,\n kind_id=content_kinds.TOPIC, content_id=self.id, node_id=\n self.id)\n if self.public and (self.main_tree and self.main_tree.published):\n delete_public_channel_cache_keys()\n\n def on_update(self):\n from contentcuration.utils.user import calculate_user_storage\n original_values = self._field_updates.changed()\n record_channel_stats(self, original_values)\n blacklist = set(['public', 'main_tree_id', 'version'])\n if self.main_tree and original_values and any(True for field in\n original_values if field not in blacklist):\n self.main_tree.changed = True\n if 'thumbnail' in original_values and original_values['thumbnail'\n ] and 'static' not in original_values['thumbnail']:\n filename, ext = os.path.splitext(original_values['thumbnail'])\n delete_empty_file_reference(filename, ext[1:])\n if 'deleted' in original_values:\n for editor in self.editors.all():\n calculate_user_storage(editor.pk)\n if 'deleted' in original_values and not original_values['deleted']:\n self.pending_editors.all().delete()\n export_db_storage_path = os.path.join(settings.DB_ROOT,\n '{channel_id}.sqlite3'.format(channel_id=self.id))\n if default_storage.exists(export_db_storage_path):\n default_storage.delete(export_db_storage_path)\n if self.main_tree:\n self.main_tree.published = False\n if self.main_tree and self.main_tree._field_updates.changed():\n self.main_tree.save()\n if 'public' in original_values and (self.main_tree and self.\n main_tree.published):\n delete_public_channel_cache_keys()\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n super(Channel, self).save(*args, **kwargs)\n\n def get_thumbnail(self):\n return get_channel_thumbnail(self)\n\n def has_changes(self):\n return self.main_tree.get_descendants(include_self=True).filter(changed\n =True).exists()\n\n def get_date_modified(self):\n return self.main_tree.get_descendants(include_self=True).aggregate(\n last_modified=Max('modified'))['last_modified']\n\n def get_resource_count(self):\n return self.main_tree.get_descendants().exclude(kind_id=\n content_kinds.TOPIC).order_by('content_id').distinct('content_id'\n ).count()\n\n def get_human_token(self):\n return self.secret_tokens.get(is_primary=True)\n\n def get_channel_id_token(self):\n return self.secret_tokens.get(token=self.id)\n\n def make_token(self):\n token = self.secret_tokens.create(token=SecretToken.\n generate_new_token(), is_primary=True)\n self.secret_tokens.get_or_create(token=self.id)\n return token\n\n def make_public(self, bypass_signals=False):\n \"\"\"\n Sets the current channel object to be public and viewable by anyone.\n\n If bypass_signals is True, update the model in such a way that we\n prevent any model signals from running due to the update.\n\n Returns the same channel object.\n \"\"\"\n if bypass_signals:\n self.public = True\n Channel.objects.filter(id=self.id).update(public=True)\n delete_public_channel_cache_keys()\n else:\n self.public = True\n self.save()\n return self\n\n def mark_created(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n CREATION)\n\n def mark_publishing(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n PUBLICATION)\n self.main_tree.publishing = True\n self.main_tree.save()\n\n def mark_deleted(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n DELETION)\n self.deleted = True\n self.save()\n\n def mark_recovered(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n RECOVERY)\n self.deleted = False\n self.save()\n\n @property\n def deletion_history(self):\n return self.history.filter(action=channel_history.DELETION)\n\n @property\n def publishing_history(self):\n return self.history.filter(action=channel_history.PUBLICATION)\n\n @classmethod\n def get_public_channels(cls, defer_nonmain_trees=False):\n \"\"\"\n Get all public channels.\n\n If defer_nonmain_trees is True, defer the loading of all\n trees except for the main_tree.\"\"\"\n if defer_nonmain_trees:\n c = Channel.objects.filter(public=True).exclude(deleted=True\n ).select_related('main_tree').prefetch_related('editors'\n ).defer('trash_tree', 'clipboard_tree', 'staging_tree',\n 'chef_tree', 'previous_tree', 'viewers')\n else:\n c = Channel.objects.filter(public=True).exclude(deleted=True)\n return c\n\n\n class Meta:\n verbose_name = 'Channel'\n verbose_name_plural = 'Channels'\n indexes = [models.Index(fields=['name'], name=CHANNEL_NAME_INDEX_NAME)]\n index_together = [['deleted', 'public']]\n\n\n<mask token>\n\n\nclass ChannelHistory(models.Model):\n \"\"\"\n Model for tracking certain actions performed on a channel\n \"\"\"\n channel = models.ForeignKey('Channel', null=False, blank=False,\n related_name='history', on_delete=models.CASCADE)\n actor = models.ForeignKey('User', null=False, blank=False, related_name\n ='channel_history', on_delete=models.CASCADE)\n performed = models.DateTimeField(default=timezone.now)\n action = models.CharField(max_length=50, choices=channel_history.choices)\n\n @classmethod\n def prune(cls):\n \"\"\"\n Prunes history records by keeping the most recent actions for each channel and type,\n and deleting all other older actions\n \"\"\"\n keep_ids = cls.objects.distinct('channel_id', 'action').order_by(\n 'channel_id', 'action', '-performed').values_list('id', flat=True)\n cls.objects.exclude(id__in=keep_ids).delete()\n\n\n class Meta:\n verbose_name = 'Channel history'\n verbose_name_plural = 'Channel histories'\n indexes = [models.Index(fields=['channel_id'], name=\n CHANNEL_HISTORY_CHANNEL_INDEX_NAME)]\n\n\nclass UserHistory(models.Model):\n \"\"\"\n Model that stores the user's action history.\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=\n False, related_name='history', on_delete=models.CASCADE)\n action = models.CharField(max_length=32, choices=user_history.choices)\n performed_at = models.DateTimeField(default=timezone.now)\n\n\nclass ChannelSet(models.Model):\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n name = models.CharField(max_length=200, blank=True)\n description = models.CharField(max_length=400, blank=True)\n public = models.BooleanField(default=False, db_index=True)\n editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name\n ='channel_sets', verbose_name='editors', help_text=\n 'Users with edit rights', blank=True)\n secret_token = models.ForeignKey('SecretToken', null=True, blank=True,\n related_name='channel_sets', on_delete=models.SET_NULL)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n user_id = not user.is_anonymous and user.id\n edit = Exists(User.channel_sets.through.objects.filter(user_id=\n user_id, channelset_id=OuterRef('id')))\n queryset = queryset.annotate(edit=edit)\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n return cls.filter_edit_queryset(queryset, user)\n\n def get_channels(self):\n if self.secret_token:\n return self.secret_token.channels.filter(deleted=False)\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n super(ChannelSet, self).save()\n\n def on_create(self):\n if not self.secret_token:\n self.secret_token = SecretToken.objects.create(token=\n SecretToken.generate_new_token())\n\n def delete(self, *args, **kwargs):\n super(ChannelSet, self).delete(*args, **kwargs)\n if self.secret_token:\n self.secret_token.delete()\n\n\nclass ContentTag(models.Model):\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n tag_name = models.CharField(max_length=50)\n channel = models.ForeignKey('Channel', related_name='tags', blank=True,\n null=True, db_index=True, on_delete=models.SET_NULL)\n objects = CustomManager()\n\n def __str__(self):\n return self.tag_name\n\n\n class Meta:\n unique_together = ['tag_name', 'channel']\n\n\nclass License(models.Model):\n \"\"\"\n Normalize the license of ContentNode model\n \"\"\"\n license_name = models.CharField(max_length=50)\n license_url = models.URLField(blank=True)\n license_description = models.TextField(blank=True)\n copyright_holder_required = models.BooleanField(default=True)\n is_custom = models.BooleanField(default=False)\n exists = models.BooleanField(default=False, verbose_name=\n 'license exists', help_text=\n 'Tells whether or not a content item is licensed to share')\n\n @classmethod\n def validate_name(cls, name):\n if cls.objects.filter(license_name=name).count() == 0:\n raise ValidationError('License `{}` does not exist'.format(name))\n\n def __str__(self):\n return self.license_name\n\n\n<mask token>\n\n\nclass ContentNode(MPTTModel, models.Model):\n \"\"\"\n By default, all nodes have a title and can be used as a topic.\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=\n False, db_index=True)\n node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)\n original_channel_id = UUIDField(primary_key=False, editable=False, null\n =True, db_index=True)\n source_channel_id = UUIDField(primary_key=False, editable=False, null=True)\n original_source_node_id = UUIDField(primary_key=False, editable=False,\n null=True, db_index=True)\n source_node_id = UUIDField(primary_key=False, editable=False, null=True)\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n title = models.CharField(max_length=200, blank=True)\n description = models.TextField(blank=True)\n kind = models.ForeignKey('ContentKind', related_name='contentnodes',\n db_index=True, null=True, blank=True, on_delete=models.SET_NULL)\n license = models.ForeignKey('License', null=True, blank=True, on_delete\n =models.SET_NULL)\n license_description = models.CharField(max_length=400, null=True, blank\n =True)\n prerequisite = models.ManyToManyField('self', related_name=\n 'is_prerequisite_of', through='PrerequisiteContentRelationship',\n symmetrical=False, blank=True)\n is_related = models.ManyToManyField('self', related_name='relate_to',\n through='RelatedContentRelationship', symmetrical=False, blank=True)\n language = models.ForeignKey('Language', null=True, blank=True,\n related_name='content_language', on_delete=models.SET_NULL)\n parent = TreeForeignKey('self', null=True, blank=True, related_name=\n 'children', db_index=True, on_delete=models.CASCADE)\n tags = models.ManyToManyField(ContentTag, symmetrical=False,\n related_name='tagged_content', blank=True)\n sort_order = models.FloatField(max_length=50, default=1, verbose_name=\n 'sort order', help_text='Ascending, lowest number shown first')\n copyright_holder = models.CharField(max_length=200, null=True, blank=\n True, default='', help_text=\n 'Organization of person who holds the essential rights')\n original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=\n True, blank=True, related_name='duplicates')\n cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=\n True, blank=True, related_name='clones')\n thumbnail_encoding = models.TextField(blank=True, null=True)\n created = models.DateTimeField(default=timezone.now, verbose_name='created'\n )\n modified = models.DateTimeField(auto_now=True, verbose_name='modified')\n published = models.BooleanField(default=False)\n publishing = models.BooleanField(default=False)\n complete = models.BooleanField(null=True)\n changed = models.BooleanField(default=True)\n \"\"\"\n Extra fields for exercises:\n - type: mastery model to use to determine completion\n - m: m value for M out of N mastery criteria\n - n: n value for M out of N mastery criteria\n \"\"\"\n extra_fields = JSONField(default=dict, blank=True, null=True)\n author = models.CharField(max_length=200, blank=True, default='',\n help_text='Who created this content?', null=True)\n aggregator = models.CharField(max_length=200, blank=True, default='',\n help_text='Who gathered this content together?', null=True)\n provider = models.CharField(max_length=200, blank=True, default='',\n help_text='Who distributed this content?', null=True)\n role_visibility = models.CharField(max_length=50, choices=roles.choices,\n default=roles.LEARNER)\n freeze_authoring_data = models.BooleanField(default=False)\n grade_levels = models.JSONField(blank=True, null=True)\n resource_types = models.JSONField(blank=True, null=True)\n learning_activities = models.JSONField(blank=True, null=True)\n accessibility_labels = models.JSONField(blank=True, null=True)\n categories = models.JSONField(blank=True, null=True)\n learner_needs = models.JSONField(blank=True, null=True)\n suggested_duration = models.IntegerField(blank=True, null=True,\n help_text='Suggested duration for the content node (in seconds)')\n objects = CustomContentNodeTreeManager()\n _field_updates = FieldTracker()\n _permission_filter = Q(tree_id=OuterRef('tree_id'))\n\n @classmethod\n def _annotate_channel_id(cls, queryset):\n return queryset.annotate(channel_id=Subquery(Channel.objects.filter\n (main_tree__tree_id=OuterRef('tree_id')).values_list('id', flat\n =True)[:1]))\n\n @classmethod\n def filter_by_pk(cls, pk):\n \"\"\"\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always\n returns a queryset filtered by pk.\n\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode\n for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If\n a ContentNode does not exist for `pk` then an empty queryset is returned.\n \"\"\"\n query = ContentNode.objects.filter(pk=pk)\n if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:\n tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))\n if tree_id:\n query = query.filter(tree_id=tree_id)\n else:\n tree_id = ContentNode.objects.filter(pk=pk).values_list(\n 'tree_id', flat=True).first()\n if tree_id:\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk),\n tree_id, None)\n query = query.filter(tree_id=tree_id)\n else:\n query = query.none()\n return query\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('tree_id')).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n @raise_if_unsaved\n def get_root(self):\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return super(ContentNode, self).get_root()\n\n @raise_if_unsaved\n def get_root_id(self):\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return ContentNode.objects.values_list('pk', flat=True).get(tree_id\n =self._mpttfield('tree_id'), parent=None)\n\n def get_tree_data(self, levels=float('inf')):\n \"\"\"\n Returns `levels`-deep tree information starting at current node.\n Args:\n levels (int): depth of tree hierarchy to return\n Returns:\n tree (dict): starting with self, with children list containing either\n the just the children's `node_id`s or full recusive tree.\n \"\"\"\n if self.kind_id == content_kinds.TOPIC:\n node_data = {'title': self.title, 'kind': self.kind_id,\n 'node_id': self.node_id, 'studio_id': self.id}\n children = self.children.all()\n if levels > 0:\n node_data['children'] = [c.get_tree_data(levels=levels - 1) for\n c in children]\n return node_data\n if self.kind_id == content_kinds.EXERCISE:\n return {'title': self.title, 'kind': self.kind_id, 'count':\n self.assessment_items.count(), 'node_id': self.node_id,\n 'studio_id': self.id}\n return {'title': self.title, 'kind': self.kind_id, 'file_size':\n self.files.values('file_size').aggregate(size=Sum('file_size'))\n ['size'], 'node_id': self.node_id, 'studio_id': self.id}\n\n def get_original_node(self):\n original_node = self.original_node or self\n if self.original_channel_id and self.original_source_node_id:\n original_tree_id = Channel.objects.select_related('main_tree').get(\n pk=self.original_channel_id).main_tree.tree_id\n original_node = ContentNode.objects.filter(tree_id=\n original_tree_id, node_id=self.original_source_node_id).first(\n ) or ContentNode.objects.filter(tree_id=original_tree_id,\n content_id=self.content_id).first() or self\n return original_node\n\n def get_associated_presets(self):\n key = 'associated_presets_{}'.format(self.kind_id)\n cached_data = cache.get(key)\n if cached_data:\n return cached_data\n presets = list(FormatPreset.objects.filter(kind=self.kind).values())\n cache.set(key, presets, None)\n return presets\n\n def get_prerequisites(self):\n prerequisite_mapping = {}\n prerequisites = self.prerequisite.all()\n prereqlist = list(prerequisites)\n for prereq in prerequisites:\n prlist, prereqmapping = prereq.get_prerequisites()\n prerequisite_mapping.update({prereq.pk: prereqmapping})\n prereqlist.extend(prlist)\n return prereqlist, prerequisite_mapping\n\n def get_postrequisites(self):\n postrequisite_mapping = {}\n postrequisites = self.is_prerequisite_of.all()\n postreqlist = list(postrequisites)\n for postreq in postrequisites:\n prlist, postreqmapping = postreq.get_postrequisites()\n postrequisite_mapping.update({postreq.pk: postreqmapping})\n postreqlist.extend(prlist)\n return postreqlist, postrequisite_mapping\n\n def get_channel_id(self):\n if hasattr(self, 'channel_id'):\n return self.channel_id\n channel = self.get_channel()\n if channel:\n return channel.id\n return None\n\n def get_channel(self):\n try:\n root = self.get_root()\n if not root:\n return None\n return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=\n root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(\n previous_tree=root)).first()\n except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):\n return None\n\n def get_thumbnail(self):\n if self.thumbnail_encoding:\n thumbnail_data = load_json_string(self.thumbnail_encoding)\n if type(thumbnail_data) is dict and thumbnail_data.get('base64'):\n return thumbnail_data['base64']\n thumbnail = self.files.filter(preset__thumbnail=True).first()\n if thumbnail:\n return generate_storage_url(str(thumbnail))\n return ''\n\n @classmethod\n def get_nodes_with_title(cls, title, limit_to_children_of=None):\n \"\"\"\n Returns all ContentNodes with a given title. If limit_to_children_of\n is passed in with an id, only look at all the children of the node with that id.\n \"\"\"\n if limit_to_children_of:\n root = cls.objects.get(id=limit_to_children_of)\n return root.get_descendants().filter(title=title)\n return cls.objects.filter(title=title)\n\n def get_details(self, channel_id=None):\n \"\"\"\n Returns information about the node and its children, including total size, languages, files, etc.\n\n :return: A dictionary with detailed statistics and information about the node.\n \"\"\"\n from contentcuration.viewsets.common import SQArrayAgg\n from contentcuration.viewsets.common import SQCount\n from contentcuration.viewsets.common import SQRelatedArrayAgg\n from contentcuration.viewsets.common import SQSum\n from contentcuration.viewsets.common import SQJSONBKeyArrayAgg\n node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id\n ).order_by()\n descendants = self.get_descendants().values('id')\n if channel_id:\n channel = Channel.objects.filter(id=channel_id)[0]\n else:\n channel = self.get_channel()\n if not descendants.exists():\n data = {'last_update': pytz.utc.localize(datetime.now()).\n strftime(settings.DATE_TIME_FORMAT), 'created': self.\n created.strftime(settings.DATE_TIME_FORMAT),\n 'resource_count': 0, 'resource_size': 0, 'includes': {\n 'coach_content': 0, 'exercises': 0}, 'kind_count': [],\n 'languages': [], 'accessible_languages': [], 'licenses': [],\n 'tags': [], 'copyright_holders': [], 'authors': [],\n 'aggregators': [], 'providers': [], 'sample_pathway': [],\n 'original_channels': [], 'sample_nodes': [], 'levels': [],\n 'categories': []}\n cache.set('details_{}'.format(self.node_id), json.dumps(data), None\n )\n return data\n resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()\n nodes = With(File.objects.filter(contentnode_id__in=Subquery(\n resources.values('id'))).values('checksum', 'file_size').\n order_by(), name='nodes')\n file_query = nodes.queryset().with_cte(nodes).values('checksum',\n 'file_size').distinct()\n l_nodes = With(File.objects.filter(contentnode_id__in=Subquery(\n resources.values('id'))).values('language_id', 'preset_id').\n order_by(), name='l_nodes')\n accessible_languages_query = l_nodes.queryset().filter(preset_id=\n format_presets.VIDEO_SUBTITLE).with_cte(l_nodes).values(\n 'language__native_name').distinct()\n tags_query = str(ContentTag.objects.filter(tagged_content__pk__in=\n descendants.values_list('pk', flat=True)).values('tag_name').\n annotate(count=Count('tag_name')).query).replace('topic', \"'topic'\"\n )\n kind_count_query = str(resources.values('kind_id').annotate(count=\n Count('kind_id')).query).replace('topic', \"'topic'\")\n node = node.annotate(resource_count=SQCount(resources, field='id'),\n resource_size=SQSum(file_query, field='file_size'),\n copyright_holders=SQArrayAgg(resources.distinct(\n 'copyright_holder').order_by('copyright_holder'), field=\n 'copyright_holder'), authors=SQArrayAgg(resources.distinct(\n 'author').order_by('author'), field='author'), aggregators=\n SQArrayAgg(resources.distinct('aggregator').order_by(\n 'aggregator'), field='aggregator'), providers=SQArrayAgg(\n resources.distinct('provider').order_by('provider'), field=\n 'provider'), languages=SQRelatedArrayAgg(descendants.exclude(\n language=None).distinct('language__native_name').order_by(),\n field='language__native_name', fieldname='native_name'),\n accessible_languages=SQRelatedArrayAgg(\n accessible_languages_query, field='language__native_name',\n fieldname='native_name'), licenses=SQRelatedArrayAgg(resources.\n exclude(license=None).distinct('license__license_name').\n order_by('license__license_name'), field=\n 'license__license_name', fieldname='license_name'), kind_count=\n RawSQL('SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format\n (kind_count_query), ()), tags_list=RawSQL(\n 'SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format(\n tags_query), ()), coach_content=SQCount(resources.filter(\n role_visibility=roles.COACH), field='id'), exercises=SQCount(\n resources.filter(kind_id=content_kinds.EXERCISE), field='id'),\n levels=SQJSONBKeyArrayAgg(descendants.exclude(\n grade_levels__isnull=True), field='grade_levels'),\n all_categories=SQJSONBKeyArrayAgg(descendants.exclude(\n categories__isnull=True), field='categories'))\n max_level = max(resources.values_list('level', flat=True).order_by(\n ).distinct() or [0])\n m_nodes = With(resources.values('id', 'level', 'tree_id', 'lft').\n order_by(), name='m_nodes')\n deepest_node_record = m_nodes.queryset().with_cte(m_nodes).filter(level\n =max_level).values('id').order_by('tree_id', 'lft').first()\n if deepest_node_record:\n deepest_node = ContentNode.objects.get(pk=deepest_node_record['id']\n )\n pathway = list(deepest_node.get_ancestors().order_by().exclude(\n parent=None).values('title', 'node_id', 'kind_id').order_by()\n ) if deepest_node_record else []\n sample_nodes = [{'node_id': n.node_id, 'title': n.title,\n 'description': n.description, 'thumbnail': n.get_thumbnail(),\n 'kind': n.kind_id} for n in deepest_node.get_siblings(\n include_self=True)[0:4]] if deepest_node_record else []\n channel_id = channel and channel.id\n originals = resources.values('original_channel_id').annotate(count=\n Count('original_channel_id')).order_by('original_channel_id')\n originals = {c['original_channel_id']: c['count'] for c in originals}\n original_channels = Channel.objects.exclude(pk=channel_id).filter(\n pk__in=originals.keys(), deleted=False).order_by()\n original_channels = [{'id': c.id, 'name': '{}{}'.format(c.name, _(\n ' (Original)') if channel_id == c.id else ''), 'thumbnail': c.\n get_thumbnail(), 'count': originals[c.id]} for c in\n original_channels]\n node = node.order_by().values('id', 'resource_count',\n 'resource_size', 'copyright_holders', 'authors', 'aggregators',\n 'providers', 'languages', 'accessible_languages',\n 'coach_content', 'licenses', 'tags_list', 'kind_count',\n 'exercises', 'levels', 'all_categories').first()\n for_educators = {'coach_content': node['coach_content'],\n 'exercises': node['exercises']}\n data = {'last_update': pytz.utc.localize(datetime.now()).strftime(\n settings.DATE_TIME_FORMAT), 'created': self.created.strftime(\n settings.DATE_TIME_FORMAT), 'resource_count': node.get(\n 'resource_count', 0), 'resource_size': node.get('resource_size',\n 0), 'includes': for_educators, 'kind_count': node.get(\n 'kind_count') or [], 'languages': node.get('languages') or [],\n 'accessible_languages': node.get('accessible_languages') or [],\n 'licenses': node.get('licenses') or [], 'tags': node.get(\n 'tags_list') or [], 'original_channels': original_channels,\n 'sample_pathway': pathway, 'sample_nodes': sample_nodes,\n 'authors': list(filter(bool, node['authors'])), 'aggregators':\n list(filter(bool, node['aggregators'])), 'providers': list(\n filter(bool, node['providers'])), 'copyright_holders': list(\n filter(bool, node['copyright_holders'])), 'levels': node.get(\n 'levels') or [], 'categories': node.get('all_categories') or []}\n cache.set('details_{}'.format(self.node_id), json.dumps(data), None)\n return data\n\n def has_changes(self):\n mptt_opts = self._mptt_meta\n blacklist = set(['changed', 'modified', 'publishing', mptt_opts.\n tree_id_attr, mptt_opts.left_attr, mptt_opts.right_attr,\n mptt_opts.level_attr])\n original_values = self._field_updates.changed()\n return any(True for field in original_values if field not in blacklist)\n\n def recalculate_editors_storage(self):\n from contentcuration.utils.user import calculate_user_storage\n for editor in self.files.values_list('uploaded_by_id', flat=True\n ).distinct():\n calculate_user_storage(editor)\n\n def mark_complete(self):\n errors = []\n if not (bool(self.title) or self.parent_id is None):\n errors.append('Empty title')\n if self.kind_id != content_kinds.TOPIC:\n if not self.license:\n errors.append('Missing license')\n if (self.license and self.license.is_custom and not self.\n license_description):\n errors.append('Missing license description for custom license')\n if (self.license and self.license.copyright_holder_required and\n not self.copyright_holder):\n errors.append('Missing required copyright holder')\n if (self.kind_id != content_kinds.EXERCISE and not self.files.\n filter(preset__supplementary=False).exists()):\n errors.append('Missing default file')\n if self.kind_id == content_kinds.EXERCISE:\n if not self.assessment_items.filter(~Q(raw_data='') | ~Q(\n question='') & ~Q(answers='[]') & (Q(type=exercises.\n INPUT_QUESTION) | Q(answers__iregex='\"correct\":\\\\s*true'))\n ).exists():\n errors.append(\n 'No questions with question text and complete answers')\n criterion = self.extra_fields.get('options', {}).get(\n 'completion_criteria')\n if not (self.extra_fields.get('mastery_model') or criterion):\n errors.append('Missing mastery criterion')\n if criterion:\n try:\n completion_criteria.validate(criterion, kind=\n content_kinds.EXERCISE)\n except completion_criteria.ValidationError:\n errors.append(\n 'Mastery criterion is defined but is invalid')\n self.complete = not errors\n return errors\n\n def make_content_id_unique(self):\n \"\"\"\n If self is NOT an original contentnode (in other words, a copied contentnode)\n and a contentnode with same content_id exists then we update self's content_id.\n \"\"\"\n is_node_original = (self.original_source_node_id is None or self.\n original_source_node_id == self.node_id)\n node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(\n content_id=self.content_id)\n if not is_node_original and node_same_content_id.exists():\n ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.\n uuid4().hex)\n\n def on_create(self):\n self.changed = True\n self.recalculate_editors_storage()\n self.set_default_learning_activity()\n\n def on_update(self):\n self.changed = self.changed or self.has_changes()\n\n def move_to(self, target, *args, **kwargs):\n parent_was_trashtree = self.parent.channel_trash.exists()\n super(ContentNode, self).move_to(target, *args, **kwargs)\n self.save()\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.\n tree_id, None)\n if target.channel_trash.exists() or parent_was_trashtree:\n self.recalculate_editors_storage()\n\n def set_default_learning_activity(self):\n if self.learning_activities is None:\n if self.kind in kind_activity_map:\n self.learning_activities = {kind_activity_map[self.kind]: True}\n\n def save(self, skip_lock=False, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n old_parent_id = self._field_updates.changed().get('parent_id')\n if self._state.adding and (self.parent_id or self.parent):\n same_order = False\n elif old_parent_id is DeferredAttribute:\n same_order = True\n else:\n same_order = old_parent_id == self.parent_id\n if not same_order:\n changed_ids = list(filter(lambda x: x is not None, set([\n old_parent_id, self.parent_id])))\n else:\n changed_ids = []\n if not same_order and not skip_lock:\n with ContentNode.objects.lock_mptt(*ContentNode.objects.filter(\n id__in=[pid for pid in [old_parent_id, self.parent_id] if\n pid]).values_list('tree_id', flat=True).distinct()):\n super(ContentNode, self).save(*args, **kwargs)\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(\n changed=True)\n else:\n super(ContentNode, self).save(*args, **kwargs)\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(changed\n =True)\n save.alters_data = True\n\n def delete(self, *args, **kwargs):\n parent = self.parent or self._field_updates.changed().get('parent')\n if parent:\n parent.changed = True\n parent.save()\n self.recalculate_editors_storage()\n with ContentNode.objects.lock_mptt(self.tree_id):\n return super(ContentNode, self).delete(*args, **kwargs)\n delete.alters_data = True\n\n def copy_to(self, target=None, position='last-child', pk=None, mods=\n None, excluded_descendants=None, can_edit_source_channel=None,\n batch_size=None, progress_tracker=None):\n return self._tree_manager.copy_node(self, target, position, pk,\n mods, excluded_descendants, can_edit_source_channel, batch_size,\n progress_tracker)[0]\n\n def copy(self):\n return self.copy_to()\n\n def is_publishable(self):\n return self.complete and self.get_descendants(include_self=True\n ).exclude(kind_id=content_kinds.TOPIC).exists()\n\n\n class Meta:\n verbose_name = 'Topic'\n verbose_name_plural = 'Topics'\n indexes = [models.Index(fields=['node_id'], name=NODE_ID_INDEX_NAME\n ), models.Index(fields=['-modified'], name=\n NODE_MODIFIED_DESC_INDEX_NAME)]\n\n\nclass ContentKind(models.Model):\n kind = models.CharField(primary_key=True, max_length=200, choices=\n content_kinds.choices)\n\n def __str__(self):\n return self.kind\n\n\nclass FileFormat(models.Model):\n extension = models.CharField(primary_key=True, max_length=40, choices=\n file_formats.choices)\n mimetype = models.CharField(max_length=200, blank=True)\n\n def __str__(self):\n return self.extension\n\n\nclass FormatPreset(models.Model):\n id = models.CharField(primary_key=True, max_length=150, choices=\n format_presets.choices)\n readable_name = models.CharField(max_length=400)\n multi_language = models.BooleanField(default=False)\n supplementary = models.BooleanField(default=False)\n thumbnail = models.BooleanField(default=False)\n subtitle = models.BooleanField(default=False)\n display = models.BooleanField(default=True)\n order = models.IntegerField(default=0)\n kind = models.ForeignKey(ContentKind, related_name='format_presets',\n null=True, on_delete=models.SET_NULL)\n allowed_formats = models.ManyToManyField(FileFormat, blank=True)\n\n def __str__(self):\n return self.id\n\n @classmethod\n def guess_format_preset(cls, filename):\n \"\"\"\n Guess the format preset of a filename based on its extension.\n\n Return None if format is unknown.\n \"\"\"\n _, ext = os.path.splitext(filename)\n ext = ext.lstrip('.')\n f = FormatPreset.objects.filter(allowed_formats__extension=ext,\n display=True)\n return f.first()\n\n @classmethod\n def get_preset(cls, preset_name):\n \"\"\"\n Get the FormatPreset object with that exact name.\n\n Returns None if that format preset is not found.\n \"\"\"\n try:\n return FormatPreset.objects.get(id=preset_name)\n except FormatPreset.DoesNotExist:\n return None\n\n\nclass Language(models.Model):\n id = models.CharField(max_length=14, primary_key=True)\n lang_code = models.CharField(max_length=3, db_index=True)\n lang_subcode = models.CharField(max_length=10, db_index=True, blank=\n True, null=True)\n readable_name = models.CharField(max_length=100, blank=True)\n native_name = models.CharField(max_length=100, blank=True)\n lang_direction = models.CharField(max_length=3, choices=languages.\n LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])\n\n def ietf_name(self):\n return '{code}-{subcode}'.format(code=self.lang_code, subcode=self.\n lang_subcode) if self.lang_subcode else self.lang_code\n\n def __str__(self):\n return self.ietf_name()\n\n\n<mask token>\n\n\nclass AssessmentItem(models.Model):\n type = models.CharField(max_length=50, default='multiplechoice')\n question = models.TextField(blank=True)\n hints = models.TextField(default='[]')\n answers = models.TextField(default='[]')\n order = models.IntegerField(default=1)\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'assessment_items', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n assessment_id = UUIDField(primary_key=False, default=uuid.uuid4,\n editable=False)\n raw_data = models.TextField(blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n randomize = models.BooleanField(default=False)\n deleted = models.BooleanField(default=False)\n objects = CustomManager()\n _field_updates = FieldTracker()\n\n def has_changes(self):\n return bool(self._field_updates.changed())\n\n\n class Meta:\n indexes = [models.Index(fields=['assessment_id'], name=\n ASSESSMENT_ID_INDEX_NAME)]\n unique_together = ['contentnode', 'assessment_id']\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('contentnode__tree_id'\n )).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n def on_create(self):\n \"\"\"\n When an exercise is added to a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n \"\"\"\n When an exercise is updated of a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n When an exercise is deleted from a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n return super(AssessmentItem, self).delete(*args, **kwargs)\n\n\nclass SlideshowSlide(models.Model):\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'slideshow_slides', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n sort_order = models.FloatField(default=1.0)\n metadata = JSONField(default=dict)\n\n\nclass StagedFile(models.Model):\n \"\"\"\n Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit\n \"\"\"\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='staged_files',\n blank=True, null=True, on_delete=models.CASCADE)\n\n\n<mask token>\n\n\nclass File(models.Model):\n \"\"\"\n The bottom layer of the contentDB schema, defines the basic building brick for content.\n Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n file_on_disk = models.FileField(upload_to=object_storage_name, storage=\n default_storage, max_length=500, blank=True)\n contentnode = models.ForeignKey(ContentNode, related_name='files',\n blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n assessment_item = models.ForeignKey(AssessmentItem, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n slideshow_slide = models.ForeignKey(SlideshowSlide, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n file_format = models.ForeignKey(FileFormat, related_name='files', blank\n =True, null=True, db_index=True, on_delete=models.SET_NULL)\n preset = models.ForeignKey(FormatPreset, related_name='files', blank=\n True, null=True, db_index=True, on_delete=models.SET_NULL)\n language = models.ForeignKey(Language, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n original_filename = models.CharField(max_length=255, blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n modified = models.DateTimeField(auto_now=True, verbose_name='modified',\n null=True)\n duration = models.IntegerField(blank=True, null=True)\n objects = CustomManager()\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id')) | Q(\n tree_id=OuterRef('assessment_item__contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls.\n _permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(edit=True) | Q(uploaded_by=user,\n contentnode__isnull=True, assessment_item__isnull=True))\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True).filter(Q(main_tree__tree_id=OuterRef(\n 'contentnode__tree_id')) | Q(main_tree__tree_id=OuterRef(\n 'assessment_item__contentnode__tree_id'))).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True) |\n Q(uploaded_by=user, contentnode__isnull=True,\n assessment_item__isnull=True))\n\n\n class Admin:\n pass\n\n def __str__(self):\n return '{checksum}{extension}'.format(checksum=self.checksum,\n extension='.' + self.file_format.extension)\n\n def filename(self):\n \"\"\"\n Returns just the filename of the File in storage, without the path\n\n e.g. abcd.mp4\n \"\"\"\n return os.path.basename(self.file_on_disk.name)\n\n def update_contentnode_content_id(self):\n \"\"\"\n If the file is attached to a contentnode and is not a thumbnail\n then update that contentnode's content_id if it's a copied contentnode.\n \"\"\"\n if self.contentnode and self.preset.thumbnail is False:\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n self.modified = timezone.now()\n self.update_contentnode_content_id()\n\n def save(self, set_by_file_on_disk=True, *args, **kwargs):\n \"\"\"\n Overrider the default save method.\n If the file_on_disk FileField gets passed a content copy:\n 1. generate the MD5 from the content copy\n 2. fill the other fields accordingly\n \"\"\"\n from contentcuration.utils.user import calculate_user_storage\n if self.file_format_id:\n if self.file_format_id not in dict(file_formats.choices):\n raise ValidationError('Invalid file_format')\n if set_by_file_on_disk and self.file_on_disk:\n if self.checksum is None or self.checksum == '':\n md5 = hashlib.md5()\n for chunk in self.file_on_disk.chunks():\n md5.update(chunk)\n self.checksum = md5.hexdigest()\n if not self.file_size:\n self.file_size = self.file_on_disk.size\n if not self.file_format_id:\n ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')\n if ext in list(dict(file_formats.choices).keys()):\n self.file_format_id = ext\n else:\n raise ValueError('Files of type `{}` are not supported.'\n .format(ext))\n super(File, self).save(*args, **kwargs)\n if self.uploaded_by_id:\n calculate_user_storage(self.uploaded_by_id)\n\n\n class Meta:\n indexes = [models.Index(fields=['checksum', 'file_size'], name=\n FILE_DISTINCT_INDEX_NAME), models.Index(fields=['-modified'],\n name=FILE_MODIFIED_DESC_INDEX_NAME)]\n constraints = [models.CheckConstraint(check=Q(preset__in=\n MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True), name\n =FILE_DURATION_CONSTRAINT)]\n\n\n<mask token>\n\n\nclass PrerequisiteContentRelationship(models.Model):\n \"\"\"\n Predefine the prerequisite relationship between two ContentNode objects.\n \"\"\"\n target_node = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)\n prerequisite = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['target_node', 'prerequisite']\n\n def clean(self, *args, **kwargs):\n if self.target_node == self.prerequisite:\n raise IntegrityError('Cannot self reference as prerequisite.')\n if PrerequisiteContentRelationship.objects.using(self._state.db\n ).filter(target_node=self.prerequisite, prerequisite=self.\n target_node):\n raise IntegrityError(\n 'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'\n % (self.target_node, self.prerequisite))\n super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n super(PrerequisiteContentRelationship, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n\nclass RelatedContentRelationship(models.Model):\n \"\"\"\n Predefine the related relationship between two ContentNode objects.\n \"\"\"\n contentnode_1 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_1', on_delete=models.CASCADE)\n contentnode_2 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_2', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['contentnode_1', 'contentnode_2']\n\n def save(self, *args, **kwargs):\n if self.contentnode_1 == self.contentnode_2:\n raise IntegrityError('Cannot self reference as related.')\n if RelatedContentRelationship.objects.using(self._state.db).filter(\n contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1\n ):\n return\n super(RelatedContentRelationship, self).save(*args, **kwargs)\n\n\nclass Invitation(models.Model):\n \"\"\" Invitation to edit channel \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n accepted = models.BooleanField(default=False)\n declined = models.BooleanField(default=False)\n revoked = models.BooleanField(default=False)\n invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n SET_NULL, null=True, related_name='sent_to')\n share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)\n email = models.EmailField(max_length=100, null=True)\n sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\n 'sent_by', null=True, on_delete=models.CASCADE)\n channel = models.ForeignKey('Channel', null=True, related_name=\n 'pending_editors', on_delete=models.CASCADE)\n first_name = models.CharField(max_length=100, blank=True)\n last_name = models.CharField(max_length=100, blank=True, null=True)\n\n\n class Meta:\n verbose_name = 'Invitation'\n verbose_name_plural = 'Invitations'\n\n def accept(self):\n user = User.objects.filter(email__iexact=self.email).first()\n if self.channel:\n if self.share_mode == VIEW_ACCESS:\n self.channel.editors.remove(user)\n self.channel.viewers.add(user)\n else:\n self.channel.viewers.remove(user)\n self.channel.editors.add(user)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user)).distinct()\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user) | Q(channel__viewers=user)).distinct()\n\n\nclass Change(models.Model):\n server_rev = models.BigAutoField(primary_key=True)\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,\n blank=True, on_delete=models.SET_NULL, related_name='changes_by_user')\n channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=\n models.CASCADE)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=\n True, on_delete=models.CASCADE, related_name='changes_about_user')\n client_rev = models.IntegerField(null=True, blank=True)\n session = models.ForeignKey(Session, null=True, blank=True, on_delete=\n models.SET_NULL)\n table = models.CharField(max_length=32)\n change_type = models.IntegerField()\n kwargs = JSONField(encoder=JSONEncoder)\n applied = models.BooleanField(default=False)\n errored = models.BooleanField(default=False)\n\n @classmethod\n def _create_from_change(cls, created_by_id=None, channel_id=None,\n user_id=None, session_key=None, applied=False, table=None, rev=None,\n **data):\n change_type = data.pop('type')\n if table is None or table not in ALL_TABLES:\n raise TypeError(\n 'table is a required argument for creating changes and must be a valid table name'\n )\n if change_type is None or change_type not in ALL_CHANGES:\n raise TypeError(\n 'change_type is a required argument for creating changes and must be a valid change type integer'\n )\n return cls(session_id=session_key, created_by_id=created_by_id,\n channel_id=channel_id, user_id=user_id, client_rev=rev, table=\n table, change_type=change_type, kwargs=data, applied=applied)\n\n @classmethod\n def create_changes(cls, changes, created_by_id=None, session_key=None,\n applied=False):\n change_models = []\n for change in changes:\n change_models.append(cls._create_from_change(created_by_id=\n created_by_id, session_key=session_key, applied=applied, **\n change))\n cls.objects.bulk_create(change_models)\n return change_models\n\n @classmethod\n def create_change(cls, change, created_by_id=None, session_key=None,\n applied=False):\n obj = cls._create_from_change(created_by_id=created_by_id,\n session_key=session_key, applied=applied, **change)\n obj.save()\n return obj\n\n @classmethod\n def serialize(cls, change):\n datum = get_attribute(change, ['kwargs']).copy()\n datum.update({'server_rev': get_attribute(change, ['server_rev']),\n 'table': get_attribute(change, ['table']), 'type':\n get_attribute(change, ['change_type']), 'channel_id':\n get_attribute(change, ['channel_id']), 'user_id': get_attribute\n (change, ['user_id']), 'created_by_id': get_attribute(change, [\n 'created_by_id'])})\n return datum\n\n def serialize_to_change_dict(self):\n return self.serialize(self)\n\n\nclass TaskResultCustom(object):\n \"\"\"\n Custom fields to add to django_celery_results's TaskResult model\n\n If adding fields to this class, run `makemigrations` then move the generated migration from the\n `django_celery_results` app to the `contentcuration` app and override the constructor to change\n the app_label. See `0141_add_task_signature` for an example\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='tasks',\n on_delete=models.CASCADE, null=True)\n channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)\n progress = models.IntegerField(null=True, blank=True, validators=[\n MinValueValidator(0), MaxValueValidator(100)])\n signature = models.CharField(null=True, blank=False, max_length=32)\n super_as_dict = TaskResult.as_dict\n\n def as_dict(self):\n \"\"\"\n :return: A dictionary representation\n \"\"\"\n super_dict = self.super_as_dict()\n super_dict.update(user_id=self.user_id, channel_id=self.channel_id,\n progress=self.progress)\n return super_dict\n\n @classmethod\n def contribute_to_class(cls, model_class=TaskResult):\n \"\"\"\n Adds fields to model, by default TaskResult\n :param model_class: TaskResult model\n \"\"\"\n for field in dir(cls):\n if not field.startswith('_') and field not in (\n 'contribute_to_class', 'Meta'):\n model_class.add_to_class(field, getattr(cls, field))\n setattr(model_class._meta, 'indexes', getattr(model_class._meta,\n 'indexes', []) + cls.Meta.indexes)\n\n\n class Meta:\n indexes = [models.Index(fields=['signature'], name=\n 'task_result_signature_idx', condition=Q(status__in=\n celery_states.UNREADY_STATES))]\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass UUIDField(models.CharField):\n\n def __init__(self, *args, **kwargs):\n kwargs['max_length'] = 32\n super(UUIDField, self).__init__(*args, **kwargs)\n <mask token>\n\n def get_default(self):\n result = super(UUIDField, self).get_default()\n if isinstance(result, uuid.UUID):\n result = result.hex\n return result\n <mask token>\n\n\nclass MPTTTreeIDManager(models.Model):\n \"\"\"\n Because MPTT uses plain integers for tree IDs and does not use an auto-incrementing field for them,\n the same ID can sometimes be assigned to two trees if two channel create ops happen concurrently.\n\n As we are using this table only for the ID generation, it does not need any fields.\n\n We resolve this by creating a dummy table and using its ID as the tree index to take advantage of the db's\n concurrency-friendly way of generating sequential integer IDs. There is a custom migration that ensures\n that the number of records (and thus id) matches the max tree ID number when this table gets added.\n \"\"\"\n\n\n<mask token>\n\n\nclass FileOnDiskStorage(FileSystemStorage):\n \"\"\"\n Overrider FileSystemStorage's default save method to ignore duplicated file.\n \"\"\"\n\n def get_available_name(self, name):\n return name\n\n def _save(self, name, content):\n if self.exists(name):\n logging.warn('Content copy \"%s\" already exists!' % name)\n return name\n return super(FileOnDiskStorage, self)._save(name, content)\n\n\nclass SecretToken(models.Model):\n \"\"\"Tokens for channels\"\"\"\n token = models.CharField(max_length=100, unique=True)\n is_primary = models.BooleanField(default=False)\n\n @classmethod\n def exists(cls, token):\n \"\"\"\n Return true when the token string given by string already exists.\n Returns false otherwise.\n \"\"\"\n return cls.objects.filter(token=token).exists()\n\n @classmethod\n def generate_new_token(cls):\n \"\"\"\n Creates a primary secret token for the current channel using a proquint\n string. Creates a secondary token containing the channel id.\n\n These tokens can be used to refer to the channel to download its content\n database.\n \"\"\"\n token = proquint.generate()\n TRIALS = 100\n for __ in range(TRIALS):\n token = proquint.generate()\n if SecretToken.exists(token):\n continue\n break\n else:\n raise ValueError('Cannot generate new token')\n return token\n\n def __str__(self):\n return '{}-{}'.format(self.token[:5], self.token[5:])\n\n\n<mask token>\n\n\nclass PermissionCTE(With):\n tree_id_fields = ['channel__{}__tree_id'.format(tree_name) for\n tree_name in CHANNEL_TREES]\n\n def __init__(self, model, user_id, **kwargs):\n queryset = model.objects.filter(user_id=user_id).annotate(tree_id=\n Unnest(ArrayRemove(Array(*self.tree_id_fields), None),\n output_field=models.IntegerField()))\n super(PermissionCTE, self).__init__(queryset=queryset.values(\n 'user_id', 'channel_id', 'tree_id'), **kwargs)\n\n @classmethod\n def editable_channels(cls, user_id):\n return PermissionCTE(User.editable_channels.through, user_id, name=\n 'editable_channels_cte')\n\n @classmethod\n def view_only_channels(cls, user_id):\n return PermissionCTE(User.view_only_channels.through, user_id, name\n ='view_only_channels_cte')\n\n def exists(self, *filters):\n return Exists(self.queryset().filter(*filters).values('user_id'))\n\n\nclass Channel(models.Model):\n \"\"\" Permissions come from association with organizations \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n name = models.CharField(max_length=200, blank=True)\n description = models.CharField(max_length=400, blank=True)\n tagline = models.CharField(max_length=150, blank=True, null=True)\n version = models.IntegerField(default=0)\n thumbnail = models.TextField(blank=True, null=True)\n thumbnail_encoding = JSONField(default=dict)\n editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name\n ='editable_channels', verbose_name='editors', help_text=\n 'Users with edit rights', blank=True)\n viewers = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name\n ='view_only_channels', verbose_name='viewers', help_text=\n 'Users with view only rights', blank=True)\n language = models.ForeignKey('Language', null=True, blank=True,\n related_name='channel_language', on_delete=models.SET_NULL)\n trash_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_trash', on_delete=models.SET_NULL)\n clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_clipboard', on_delete=models.SET_NULL)\n main_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_main', on_delete=models.SET_NULL)\n staging_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_staging', on_delete=models.SET_NULL)\n chef_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_chef', on_delete=models.SET_NULL)\n previous_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_previous', on_delete=models.SET_NULL)\n bookmarked_by = models.ManyToManyField(settings.AUTH_USER_MODEL,\n related_name='bookmarked_channels', verbose_name='bookmarked by')\n deleted = models.BooleanField(default=False, db_index=True)\n public = models.BooleanField(default=False, db_index=True)\n preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)\n content_defaults = JSONField(default=dict)\n priority = models.IntegerField(default=0, help_text=\n 'Order to display public channels')\n last_published = models.DateTimeField(blank=True, null=True)\n secret_tokens = models.ManyToManyField(SecretToken, related_name=\n 'channels', verbose_name='secret tokens', blank=True)\n source_url = models.CharField(max_length=200, blank=True, null=True)\n demo_server_url = models.CharField(max_length=200, blank=True, null=True)\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n ricecooker_version = models.CharField(max_length=100, blank=True, null=True\n )\n published_data = JSONField(default=dict)\n icon_encoding = models.TextField(blank=True, null=True)\n total_resource_count = models.IntegerField(default=0)\n published_kind_count = models.TextField(blank=True, null=True)\n published_size = models.FloatField(default=0)\n included_languages = models.ManyToManyField('Language', related_name=\n 'channels', verbose_name='languages', blank=True)\n _field_updates = FieldTracker(fields=['description', 'language_id',\n 'thumbnail', 'name', 'thumbnail_encoding', 'deleted', 'public',\n 'main_tree_id', 'version'])\n\n @classmethod\n def get_editable(cls, user, channel_id):\n return cls.filter_edit_queryset(cls.objects.all(), user).get(id=\n channel_id)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit = Exists(User.editable_channels.through.objects.filter(user_id\n =user_id, channel_id=OuterRef('id')))\n queryset = queryset.annotate(edit=edit)\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n user_email = not user.is_anonymous and user.email\n if user_id:\n filters = dict(user_id=user_id, channel_id=OuterRef('id'))\n edit = Exists(User.editable_channels.through.objects.filter(**\n filters).values('user_id'))\n view = Exists(User.view_only_channels.through.objects.filter(**\n filters).values('user_id'))\n else:\n edit = boolean_val(False)\n view = boolean_val(False)\n queryset = queryset.annotate(edit=edit, view=view)\n if user_id and user.is_admin:\n return queryset\n permission_filter = Q()\n if user_id:\n pending_channels = Invitation.objects.filter(email=user_email,\n revoked=False, declined=False, accepted=False).values_list(\n 'channel_id', flat=True)\n permission_filter = Q(view=True) | Q(edit=True) | Q(deleted=\n False, id__in=pending_channels)\n return queryset.filter(permission_filter | Q(deleted=False, public=\n True))\n\n @classmethod\n def get_all_channels(cls):\n return cls.objects.select_related('main_tree').prefetch_related(\n 'editors', 'viewers').distinct()\n\n def resource_size_key(self):\n return '{}_resource_size'.format(self.pk)\n\n def get_resource_size(self):\n cached_data = cache.get(self.resource_size_key())\n if cached_data:\n return cached_data\n tree_id = self.main_tree.tree_id\n files = File.objects.select_related('contentnode', 'assessment_item'\n ).filter(contentnode__tree_id=tree_id).values('checksum',\n 'file_size').distinct().aggregate(resource_size=Sum('file_size'))\n cache.set(self.resource_size_key(), files['resource_size'] or 0, None)\n return files['resource_size'] or 0\n\n def on_create(self):\n record_channel_stats(self, None)\n if not self.content_defaults:\n self.content_defaults = DEFAULT_CONTENT_DEFAULTS\n if not self.main_tree:\n self.main_tree = ContentNode.objects.create(title=self.name,\n kind_id=content_kinds.TOPIC, content_id=self.id, node_id=\n self.id, original_channel_id=self.id, source_channel_id=\n self.id, changed=True, complete=True)\n if settings.DEBUG:\n if ContentNode.objects.filter(parent=None, tree_id=self.\n main_tree.tree_id).count() != 1:\n raise AssertionError\n if not self.trash_tree:\n self.trash_tree = ContentNode.objects.create(title=self.name,\n kind_id=content_kinds.TOPIC, content_id=self.id, node_id=\n self.id)\n if self.public and (self.main_tree and self.main_tree.published):\n delete_public_channel_cache_keys()\n\n def on_update(self):\n from contentcuration.utils.user import calculate_user_storage\n original_values = self._field_updates.changed()\n record_channel_stats(self, original_values)\n blacklist = set(['public', 'main_tree_id', 'version'])\n if self.main_tree and original_values and any(True for field in\n original_values if field not in blacklist):\n self.main_tree.changed = True\n if 'thumbnail' in original_values and original_values['thumbnail'\n ] and 'static' not in original_values['thumbnail']:\n filename, ext = os.path.splitext(original_values['thumbnail'])\n delete_empty_file_reference(filename, ext[1:])\n if 'deleted' in original_values:\n for editor in self.editors.all():\n calculate_user_storage(editor.pk)\n if 'deleted' in original_values and not original_values['deleted']:\n self.pending_editors.all().delete()\n export_db_storage_path = os.path.join(settings.DB_ROOT,\n '{channel_id}.sqlite3'.format(channel_id=self.id))\n if default_storage.exists(export_db_storage_path):\n default_storage.delete(export_db_storage_path)\n if self.main_tree:\n self.main_tree.published = False\n if self.main_tree and self.main_tree._field_updates.changed():\n self.main_tree.save()\n if 'public' in original_values and (self.main_tree and self.\n main_tree.published):\n delete_public_channel_cache_keys()\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n super(Channel, self).save(*args, **kwargs)\n\n def get_thumbnail(self):\n return get_channel_thumbnail(self)\n\n def has_changes(self):\n return self.main_tree.get_descendants(include_self=True).filter(changed\n =True).exists()\n\n def get_date_modified(self):\n return self.main_tree.get_descendants(include_self=True).aggregate(\n last_modified=Max('modified'))['last_modified']\n\n def get_resource_count(self):\n return self.main_tree.get_descendants().exclude(kind_id=\n content_kinds.TOPIC).order_by('content_id').distinct('content_id'\n ).count()\n\n def get_human_token(self):\n return self.secret_tokens.get(is_primary=True)\n\n def get_channel_id_token(self):\n return self.secret_tokens.get(token=self.id)\n\n def make_token(self):\n token = self.secret_tokens.create(token=SecretToken.\n generate_new_token(), is_primary=True)\n self.secret_tokens.get_or_create(token=self.id)\n return token\n\n def make_public(self, bypass_signals=False):\n \"\"\"\n Sets the current channel object to be public and viewable by anyone.\n\n If bypass_signals is True, update the model in such a way that we\n prevent any model signals from running due to the update.\n\n Returns the same channel object.\n \"\"\"\n if bypass_signals:\n self.public = True\n Channel.objects.filter(id=self.id).update(public=True)\n delete_public_channel_cache_keys()\n else:\n self.public = True\n self.save()\n return self\n\n def mark_created(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n CREATION)\n\n def mark_publishing(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n PUBLICATION)\n self.main_tree.publishing = True\n self.main_tree.save()\n\n def mark_deleted(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n DELETION)\n self.deleted = True\n self.save()\n\n def mark_recovered(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n RECOVERY)\n self.deleted = False\n self.save()\n\n @property\n def deletion_history(self):\n return self.history.filter(action=channel_history.DELETION)\n\n @property\n def publishing_history(self):\n return self.history.filter(action=channel_history.PUBLICATION)\n\n @classmethod\n def get_public_channels(cls, defer_nonmain_trees=False):\n \"\"\"\n Get all public channels.\n\n If defer_nonmain_trees is True, defer the loading of all\n trees except for the main_tree.\"\"\"\n if defer_nonmain_trees:\n c = Channel.objects.filter(public=True).exclude(deleted=True\n ).select_related('main_tree').prefetch_related('editors'\n ).defer('trash_tree', 'clipboard_tree', 'staging_tree',\n 'chef_tree', 'previous_tree', 'viewers')\n else:\n c = Channel.objects.filter(public=True).exclude(deleted=True)\n return c\n\n\n class Meta:\n verbose_name = 'Channel'\n verbose_name_plural = 'Channels'\n indexes = [models.Index(fields=['name'], name=CHANNEL_NAME_INDEX_NAME)]\n index_together = [['deleted', 'public']]\n\n\n<mask token>\n\n\nclass ChannelHistory(models.Model):\n \"\"\"\n Model for tracking certain actions performed on a channel\n \"\"\"\n channel = models.ForeignKey('Channel', null=False, blank=False,\n related_name='history', on_delete=models.CASCADE)\n actor = models.ForeignKey('User', null=False, blank=False, related_name\n ='channel_history', on_delete=models.CASCADE)\n performed = models.DateTimeField(default=timezone.now)\n action = models.CharField(max_length=50, choices=channel_history.choices)\n\n @classmethod\n def prune(cls):\n \"\"\"\n Prunes history records by keeping the most recent actions for each channel and type,\n and deleting all other older actions\n \"\"\"\n keep_ids = cls.objects.distinct('channel_id', 'action').order_by(\n 'channel_id', 'action', '-performed').values_list('id', flat=True)\n cls.objects.exclude(id__in=keep_ids).delete()\n\n\n class Meta:\n verbose_name = 'Channel history'\n verbose_name_plural = 'Channel histories'\n indexes = [models.Index(fields=['channel_id'], name=\n CHANNEL_HISTORY_CHANNEL_INDEX_NAME)]\n\n\nclass UserHistory(models.Model):\n \"\"\"\n Model that stores the user's action history.\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=\n False, related_name='history', on_delete=models.CASCADE)\n action = models.CharField(max_length=32, choices=user_history.choices)\n performed_at = models.DateTimeField(default=timezone.now)\n\n\nclass ChannelSet(models.Model):\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n name = models.CharField(max_length=200, blank=True)\n description = models.CharField(max_length=400, blank=True)\n public = models.BooleanField(default=False, db_index=True)\n editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name\n ='channel_sets', verbose_name='editors', help_text=\n 'Users with edit rights', blank=True)\n secret_token = models.ForeignKey('SecretToken', null=True, blank=True,\n related_name='channel_sets', on_delete=models.SET_NULL)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n user_id = not user.is_anonymous and user.id\n edit = Exists(User.channel_sets.through.objects.filter(user_id=\n user_id, channelset_id=OuterRef('id')))\n queryset = queryset.annotate(edit=edit)\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n return cls.filter_edit_queryset(queryset, user)\n\n def get_channels(self):\n if self.secret_token:\n return self.secret_token.channels.filter(deleted=False)\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n super(ChannelSet, self).save()\n\n def on_create(self):\n if not self.secret_token:\n self.secret_token = SecretToken.objects.create(token=\n SecretToken.generate_new_token())\n\n def delete(self, *args, **kwargs):\n super(ChannelSet, self).delete(*args, **kwargs)\n if self.secret_token:\n self.secret_token.delete()\n\n\nclass ContentTag(models.Model):\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n tag_name = models.CharField(max_length=50)\n channel = models.ForeignKey('Channel', related_name='tags', blank=True,\n null=True, db_index=True, on_delete=models.SET_NULL)\n objects = CustomManager()\n\n def __str__(self):\n return self.tag_name\n\n\n class Meta:\n unique_together = ['tag_name', 'channel']\n\n\nclass License(models.Model):\n \"\"\"\n Normalize the license of ContentNode model\n \"\"\"\n license_name = models.CharField(max_length=50)\n license_url = models.URLField(blank=True)\n license_description = models.TextField(blank=True)\n copyright_holder_required = models.BooleanField(default=True)\n is_custom = models.BooleanField(default=False)\n exists = models.BooleanField(default=False, verbose_name=\n 'license exists', help_text=\n 'Tells whether or not a content item is licensed to share')\n\n @classmethod\n def validate_name(cls, name):\n if cls.objects.filter(license_name=name).count() == 0:\n raise ValidationError('License `{}` does not exist'.format(name))\n\n def __str__(self):\n return self.license_name\n\n\n<mask token>\n\n\nclass ContentNode(MPTTModel, models.Model):\n \"\"\"\n By default, all nodes have a title and can be used as a topic.\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=\n False, db_index=True)\n node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)\n original_channel_id = UUIDField(primary_key=False, editable=False, null\n =True, db_index=True)\n source_channel_id = UUIDField(primary_key=False, editable=False, null=True)\n original_source_node_id = UUIDField(primary_key=False, editable=False,\n null=True, db_index=True)\n source_node_id = UUIDField(primary_key=False, editable=False, null=True)\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n title = models.CharField(max_length=200, blank=True)\n description = models.TextField(blank=True)\n kind = models.ForeignKey('ContentKind', related_name='contentnodes',\n db_index=True, null=True, blank=True, on_delete=models.SET_NULL)\n license = models.ForeignKey('License', null=True, blank=True, on_delete\n =models.SET_NULL)\n license_description = models.CharField(max_length=400, null=True, blank\n =True)\n prerequisite = models.ManyToManyField('self', related_name=\n 'is_prerequisite_of', through='PrerequisiteContentRelationship',\n symmetrical=False, blank=True)\n is_related = models.ManyToManyField('self', related_name='relate_to',\n through='RelatedContentRelationship', symmetrical=False, blank=True)\n language = models.ForeignKey('Language', null=True, blank=True,\n related_name='content_language', on_delete=models.SET_NULL)\n parent = TreeForeignKey('self', null=True, blank=True, related_name=\n 'children', db_index=True, on_delete=models.CASCADE)\n tags = models.ManyToManyField(ContentTag, symmetrical=False,\n related_name='tagged_content', blank=True)\n sort_order = models.FloatField(max_length=50, default=1, verbose_name=\n 'sort order', help_text='Ascending, lowest number shown first')\n copyright_holder = models.CharField(max_length=200, null=True, blank=\n True, default='', help_text=\n 'Organization of person who holds the essential rights')\n original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=\n True, blank=True, related_name='duplicates')\n cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=\n True, blank=True, related_name='clones')\n thumbnail_encoding = models.TextField(blank=True, null=True)\n created = models.DateTimeField(default=timezone.now, verbose_name='created'\n )\n modified = models.DateTimeField(auto_now=True, verbose_name='modified')\n published = models.BooleanField(default=False)\n publishing = models.BooleanField(default=False)\n complete = models.BooleanField(null=True)\n changed = models.BooleanField(default=True)\n \"\"\"\n Extra fields for exercises:\n - type: mastery model to use to determine completion\n - m: m value for M out of N mastery criteria\n - n: n value for M out of N mastery criteria\n \"\"\"\n extra_fields = JSONField(default=dict, blank=True, null=True)\n author = models.CharField(max_length=200, blank=True, default='',\n help_text='Who created this content?', null=True)\n aggregator = models.CharField(max_length=200, blank=True, default='',\n help_text='Who gathered this content together?', null=True)\n provider = models.CharField(max_length=200, blank=True, default='',\n help_text='Who distributed this content?', null=True)\n role_visibility = models.CharField(max_length=50, choices=roles.choices,\n default=roles.LEARNER)\n freeze_authoring_data = models.BooleanField(default=False)\n grade_levels = models.JSONField(blank=True, null=True)\n resource_types = models.JSONField(blank=True, null=True)\n learning_activities = models.JSONField(blank=True, null=True)\n accessibility_labels = models.JSONField(blank=True, null=True)\n categories = models.JSONField(blank=True, null=True)\n learner_needs = models.JSONField(blank=True, null=True)\n suggested_duration = models.IntegerField(blank=True, null=True,\n help_text='Suggested duration for the content node (in seconds)')\n objects = CustomContentNodeTreeManager()\n _field_updates = FieldTracker()\n _permission_filter = Q(tree_id=OuterRef('tree_id'))\n\n @classmethod\n def _annotate_channel_id(cls, queryset):\n return queryset.annotate(channel_id=Subquery(Channel.objects.filter\n (main_tree__tree_id=OuterRef('tree_id')).values_list('id', flat\n =True)[:1]))\n\n @classmethod\n def filter_by_pk(cls, pk):\n \"\"\"\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always\n returns a queryset filtered by pk.\n\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode\n for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If\n a ContentNode does not exist for `pk` then an empty queryset is returned.\n \"\"\"\n query = ContentNode.objects.filter(pk=pk)\n if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:\n tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))\n if tree_id:\n query = query.filter(tree_id=tree_id)\n else:\n tree_id = ContentNode.objects.filter(pk=pk).values_list(\n 'tree_id', flat=True).first()\n if tree_id:\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk),\n tree_id, None)\n query = query.filter(tree_id=tree_id)\n else:\n query = query.none()\n return query\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('tree_id')).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n @raise_if_unsaved\n def get_root(self):\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return super(ContentNode, self).get_root()\n\n @raise_if_unsaved\n def get_root_id(self):\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return ContentNode.objects.values_list('pk', flat=True).get(tree_id\n =self._mpttfield('tree_id'), parent=None)\n\n def get_tree_data(self, levels=float('inf')):\n \"\"\"\n Returns `levels`-deep tree information starting at current node.\n Args:\n levels (int): depth of tree hierarchy to return\n Returns:\n tree (dict): starting with self, with children list containing either\n the just the children's `node_id`s or full recusive tree.\n \"\"\"\n if self.kind_id == content_kinds.TOPIC:\n node_data = {'title': self.title, 'kind': self.kind_id,\n 'node_id': self.node_id, 'studio_id': self.id}\n children = self.children.all()\n if levels > 0:\n node_data['children'] = [c.get_tree_data(levels=levels - 1) for\n c in children]\n return node_data\n if self.kind_id == content_kinds.EXERCISE:\n return {'title': self.title, 'kind': self.kind_id, 'count':\n self.assessment_items.count(), 'node_id': self.node_id,\n 'studio_id': self.id}\n return {'title': self.title, 'kind': self.kind_id, 'file_size':\n self.files.values('file_size').aggregate(size=Sum('file_size'))\n ['size'], 'node_id': self.node_id, 'studio_id': self.id}\n\n def get_original_node(self):\n original_node = self.original_node or self\n if self.original_channel_id and self.original_source_node_id:\n original_tree_id = Channel.objects.select_related('main_tree').get(\n pk=self.original_channel_id).main_tree.tree_id\n original_node = ContentNode.objects.filter(tree_id=\n original_tree_id, node_id=self.original_source_node_id).first(\n ) or ContentNode.objects.filter(tree_id=original_tree_id,\n content_id=self.content_id).first() or self\n return original_node\n\n def get_associated_presets(self):\n key = 'associated_presets_{}'.format(self.kind_id)\n cached_data = cache.get(key)\n if cached_data:\n return cached_data\n presets = list(FormatPreset.objects.filter(kind=self.kind).values())\n cache.set(key, presets, None)\n return presets\n\n def get_prerequisites(self):\n prerequisite_mapping = {}\n prerequisites = self.prerequisite.all()\n prereqlist = list(prerequisites)\n for prereq in prerequisites:\n prlist, prereqmapping = prereq.get_prerequisites()\n prerequisite_mapping.update({prereq.pk: prereqmapping})\n prereqlist.extend(prlist)\n return prereqlist, prerequisite_mapping\n\n def get_postrequisites(self):\n postrequisite_mapping = {}\n postrequisites = self.is_prerequisite_of.all()\n postreqlist = list(postrequisites)\n for postreq in postrequisites:\n prlist, postreqmapping = postreq.get_postrequisites()\n postrequisite_mapping.update({postreq.pk: postreqmapping})\n postreqlist.extend(prlist)\n return postreqlist, postrequisite_mapping\n\n def get_channel_id(self):\n if hasattr(self, 'channel_id'):\n return self.channel_id\n channel = self.get_channel()\n if channel:\n return channel.id\n return None\n\n def get_channel(self):\n try:\n root = self.get_root()\n if not root:\n return None\n return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=\n root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(\n previous_tree=root)).first()\n except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):\n return None\n\n def get_thumbnail(self):\n if self.thumbnail_encoding:\n thumbnail_data = load_json_string(self.thumbnail_encoding)\n if type(thumbnail_data) is dict and thumbnail_data.get('base64'):\n return thumbnail_data['base64']\n thumbnail = self.files.filter(preset__thumbnail=True).first()\n if thumbnail:\n return generate_storage_url(str(thumbnail))\n return ''\n\n @classmethod\n def get_nodes_with_title(cls, title, limit_to_children_of=None):\n \"\"\"\n Returns all ContentNodes with a given title. If limit_to_children_of\n is passed in with an id, only look at all the children of the node with that id.\n \"\"\"\n if limit_to_children_of:\n root = cls.objects.get(id=limit_to_children_of)\n return root.get_descendants().filter(title=title)\n return cls.objects.filter(title=title)\n\n def get_details(self, channel_id=None):\n \"\"\"\n Returns information about the node and its children, including total size, languages, files, etc.\n\n :return: A dictionary with detailed statistics and information about the node.\n \"\"\"\n from contentcuration.viewsets.common import SQArrayAgg\n from contentcuration.viewsets.common import SQCount\n from contentcuration.viewsets.common import SQRelatedArrayAgg\n from contentcuration.viewsets.common import SQSum\n from contentcuration.viewsets.common import SQJSONBKeyArrayAgg\n node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id\n ).order_by()\n descendants = self.get_descendants().values('id')\n if channel_id:\n channel = Channel.objects.filter(id=channel_id)[0]\n else:\n channel = self.get_channel()\n if not descendants.exists():\n data = {'last_update': pytz.utc.localize(datetime.now()).\n strftime(settings.DATE_TIME_FORMAT), 'created': self.\n created.strftime(settings.DATE_TIME_FORMAT),\n 'resource_count': 0, 'resource_size': 0, 'includes': {\n 'coach_content': 0, 'exercises': 0}, 'kind_count': [],\n 'languages': [], 'accessible_languages': [], 'licenses': [],\n 'tags': [], 'copyright_holders': [], 'authors': [],\n 'aggregators': [], 'providers': [], 'sample_pathway': [],\n 'original_channels': [], 'sample_nodes': [], 'levels': [],\n 'categories': []}\n cache.set('details_{}'.format(self.node_id), json.dumps(data), None\n )\n return data\n resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()\n nodes = With(File.objects.filter(contentnode_id__in=Subquery(\n resources.values('id'))).values('checksum', 'file_size').\n order_by(), name='nodes')\n file_query = nodes.queryset().with_cte(nodes).values('checksum',\n 'file_size').distinct()\n l_nodes = With(File.objects.filter(contentnode_id__in=Subquery(\n resources.values('id'))).values('language_id', 'preset_id').\n order_by(), name='l_nodes')\n accessible_languages_query = l_nodes.queryset().filter(preset_id=\n format_presets.VIDEO_SUBTITLE).with_cte(l_nodes).values(\n 'language__native_name').distinct()\n tags_query = str(ContentTag.objects.filter(tagged_content__pk__in=\n descendants.values_list('pk', flat=True)).values('tag_name').\n annotate(count=Count('tag_name')).query).replace('topic', \"'topic'\"\n )\n kind_count_query = str(resources.values('kind_id').annotate(count=\n Count('kind_id')).query).replace('topic', \"'topic'\")\n node = node.annotate(resource_count=SQCount(resources, field='id'),\n resource_size=SQSum(file_query, field='file_size'),\n copyright_holders=SQArrayAgg(resources.distinct(\n 'copyright_holder').order_by('copyright_holder'), field=\n 'copyright_holder'), authors=SQArrayAgg(resources.distinct(\n 'author').order_by('author'), field='author'), aggregators=\n SQArrayAgg(resources.distinct('aggregator').order_by(\n 'aggregator'), field='aggregator'), providers=SQArrayAgg(\n resources.distinct('provider').order_by('provider'), field=\n 'provider'), languages=SQRelatedArrayAgg(descendants.exclude(\n language=None).distinct('language__native_name').order_by(),\n field='language__native_name', fieldname='native_name'),\n accessible_languages=SQRelatedArrayAgg(\n accessible_languages_query, field='language__native_name',\n fieldname='native_name'), licenses=SQRelatedArrayAgg(resources.\n exclude(license=None).distinct('license__license_name').\n order_by('license__license_name'), field=\n 'license__license_name', fieldname='license_name'), kind_count=\n RawSQL('SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format\n (kind_count_query), ()), tags_list=RawSQL(\n 'SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format(\n tags_query), ()), coach_content=SQCount(resources.filter(\n role_visibility=roles.COACH), field='id'), exercises=SQCount(\n resources.filter(kind_id=content_kinds.EXERCISE), field='id'),\n levels=SQJSONBKeyArrayAgg(descendants.exclude(\n grade_levels__isnull=True), field='grade_levels'),\n all_categories=SQJSONBKeyArrayAgg(descendants.exclude(\n categories__isnull=True), field='categories'))\n max_level = max(resources.values_list('level', flat=True).order_by(\n ).distinct() or [0])\n m_nodes = With(resources.values('id', 'level', 'tree_id', 'lft').\n order_by(), name='m_nodes')\n deepest_node_record = m_nodes.queryset().with_cte(m_nodes).filter(level\n =max_level).values('id').order_by('tree_id', 'lft').first()\n if deepest_node_record:\n deepest_node = ContentNode.objects.get(pk=deepest_node_record['id']\n )\n pathway = list(deepest_node.get_ancestors().order_by().exclude(\n parent=None).values('title', 'node_id', 'kind_id').order_by()\n ) if deepest_node_record else []\n sample_nodes = [{'node_id': n.node_id, 'title': n.title,\n 'description': n.description, 'thumbnail': n.get_thumbnail(),\n 'kind': n.kind_id} for n in deepest_node.get_siblings(\n include_self=True)[0:4]] if deepest_node_record else []\n channel_id = channel and channel.id\n originals = resources.values('original_channel_id').annotate(count=\n Count('original_channel_id')).order_by('original_channel_id')\n originals = {c['original_channel_id']: c['count'] for c in originals}\n original_channels = Channel.objects.exclude(pk=channel_id).filter(\n pk__in=originals.keys(), deleted=False).order_by()\n original_channels = [{'id': c.id, 'name': '{}{}'.format(c.name, _(\n ' (Original)') if channel_id == c.id else ''), 'thumbnail': c.\n get_thumbnail(), 'count': originals[c.id]} for c in\n original_channels]\n node = node.order_by().values('id', 'resource_count',\n 'resource_size', 'copyright_holders', 'authors', 'aggregators',\n 'providers', 'languages', 'accessible_languages',\n 'coach_content', 'licenses', 'tags_list', 'kind_count',\n 'exercises', 'levels', 'all_categories').first()\n for_educators = {'coach_content': node['coach_content'],\n 'exercises': node['exercises']}\n data = {'last_update': pytz.utc.localize(datetime.now()).strftime(\n settings.DATE_TIME_FORMAT), 'created': self.created.strftime(\n settings.DATE_TIME_FORMAT), 'resource_count': node.get(\n 'resource_count', 0), 'resource_size': node.get('resource_size',\n 0), 'includes': for_educators, 'kind_count': node.get(\n 'kind_count') or [], 'languages': node.get('languages') or [],\n 'accessible_languages': node.get('accessible_languages') or [],\n 'licenses': node.get('licenses') or [], 'tags': node.get(\n 'tags_list') or [], 'original_channels': original_channels,\n 'sample_pathway': pathway, 'sample_nodes': sample_nodes,\n 'authors': list(filter(bool, node['authors'])), 'aggregators':\n list(filter(bool, node['aggregators'])), 'providers': list(\n filter(bool, node['providers'])), 'copyright_holders': list(\n filter(bool, node['copyright_holders'])), 'levels': node.get(\n 'levels') or [], 'categories': node.get('all_categories') or []}\n cache.set('details_{}'.format(self.node_id), json.dumps(data), None)\n return data\n\n def has_changes(self):\n mptt_opts = self._mptt_meta\n blacklist = set(['changed', 'modified', 'publishing', mptt_opts.\n tree_id_attr, mptt_opts.left_attr, mptt_opts.right_attr,\n mptt_opts.level_attr])\n original_values = self._field_updates.changed()\n return any(True for field in original_values if field not in blacklist)\n\n def recalculate_editors_storage(self):\n from contentcuration.utils.user import calculate_user_storage\n for editor in self.files.values_list('uploaded_by_id', flat=True\n ).distinct():\n calculate_user_storage(editor)\n\n def mark_complete(self):\n errors = []\n if not (bool(self.title) or self.parent_id is None):\n errors.append('Empty title')\n if self.kind_id != content_kinds.TOPIC:\n if not self.license:\n errors.append('Missing license')\n if (self.license and self.license.is_custom and not self.\n license_description):\n errors.append('Missing license description for custom license')\n if (self.license and self.license.copyright_holder_required and\n not self.copyright_holder):\n errors.append('Missing required copyright holder')\n if (self.kind_id != content_kinds.EXERCISE and not self.files.\n filter(preset__supplementary=False).exists()):\n errors.append('Missing default file')\n if self.kind_id == content_kinds.EXERCISE:\n if not self.assessment_items.filter(~Q(raw_data='') | ~Q(\n question='') & ~Q(answers='[]') & (Q(type=exercises.\n INPUT_QUESTION) | Q(answers__iregex='\"correct\":\\\\s*true'))\n ).exists():\n errors.append(\n 'No questions with question text and complete answers')\n criterion = self.extra_fields.get('options', {}).get(\n 'completion_criteria')\n if not (self.extra_fields.get('mastery_model') or criterion):\n errors.append('Missing mastery criterion')\n if criterion:\n try:\n completion_criteria.validate(criterion, kind=\n content_kinds.EXERCISE)\n except completion_criteria.ValidationError:\n errors.append(\n 'Mastery criterion is defined but is invalid')\n self.complete = not errors\n return errors\n\n def make_content_id_unique(self):\n \"\"\"\n If self is NOT an original contentnode (in other words, a copied contentnode)\n and a contentnode with same content_id exists then we update self's content_id.\n \"\"\"\n is_node_original = (self.original_source_node_id is None or self.\n original_source_node_id == self.node_id)\n node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(\n content_id=self.content_id)\n if not is_node_original and node_same_content_id.exists():\n ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.\n uuid4().hex)\n\n def on_create(self):\n self.changed = True\n self.recalculate_editors_storage()\n self.set_default_learning_activity()\n\n def on_update(self):\n self.changed = self.changed or self.has_changes()\n\n def move_to(self, target, *args, **kwargs):\n parent_was_trashtree = self.parent.channel_trash.exists()\n super(ContentNode, self).move_to(target, *args, **kwargs)\n self.save()\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.\n tree_id, None)\n if target.channel_trash.exists() or parent_was_trashtree:\n self.recalculate_editors_storage()\n\n def set_default_learning_activity(self):\n if self.learning_activities is None:\n if self.kind in kind_activity_map:\n self.learning_activities = {kind_activity_map[self.kind]: True}\n\n def save(self, skip_lock=False, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n old_parent_id = self._field_updates.changed().get('parent_id')\n if self._state.adding and (self.parent_id or self.parent):\n same_order = False\n elif old_parent_id is DeferredAttribute:\n same_order = True\n else:\n same_order = old_parent_id == self.parent_id\n if not same_order:\n changed_ids = list(filter(lambda x: x is not None, set([\n old_parent_id, self.parent_id])))\n else:\n changed_ids = []\n if not same_order and not skip_lock:\n with ContentNode.objects.lock_mptt(*ContentNode.objects.filter(\n id__in=[pid for pid in [old_parent_id, self.parent_id] if\n pid]).values_list('tree_id', flat=True).distinct()):\n super(ContentNode, self).save(*args, **kwargs)\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(\n changed=True)\n else:\n super(ContentNode, self).save(*args, **kwargs)\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(changed\n =True)\n save.alters_data = True\n\n def delete(self, *args, **kwargs):\n parent = self.parent or self._field_updates.changed().get('parent')\n if parent:\n parent.changed = True\n parent.save()\n self.recalculate_editors_storage()\n with ContentNode.objects.lock_mptt(self.tree_id):\n return super(ContentNode, self).delete(*args, **kwargs)\n delete.alters_data = True\n\n def copy_to(self, target=None, position='last-child', pk=None, mods=\n None, excluded_descendants=None, can_edit_source_channel=None,\n batch_size=None, progress_tracker=None):\n return self._tree_manager.copy_node(self, target, position, pk,\n mods, excluded_descendants, can_edit_source_channel, batch_size,\n progress_tracker)[0]\n\n def copy(self):\n return self.copy_to()\n\n def is_publishable(self):\n return self.complete and self.get_descendants(include_self=True\n ).exclude(kind_id=content_kinds.TOPIC).exists()\n\n\n class Meta:\n verbose_name = 'Topic'\n verbose_name_plural = 'Topics'\n indexes = [models.Index(fields=['node_id'], name=NODE_ID_INDEX_NAME\n ), models.Index(fields=['-modified'], name=\n NODE_MODIFIED_DESC_INDEX_NAME)]\n\n\nclass ContentKind(models.Model):\n kind = models.CharField(primary_key=True, max_length=200, choices=\n content_kinds.choices)\n\n def __str__(self):\n return self.kind\n\n\nclass FileFormat(models.Model):\n extension = models.CharField(primary_key=True, max_length=40, choices=\n file_formats.choices)\n mimetype = models.CharField(max_length=200, blank=True)\n\n def __str__(self):\n return self.extension\n\n\nclass FormatPreset(models.Model):\n id = models.CharField(primary_key=True, max_length=150, choices=\n format_presets.choices)\n readable_name = models.CharField(max_length=400)\n multi_language = models.BooleanField(default=False)\n supplementary = models.BooleanField(default=False)\n thumbnail = models.BooleanField(default=False)\n subtitle = models.BooleanField(default=False)\n display = models.BooleanField(default=True)\n order = models.IntegerField(default=0)\n kind = models.ForeignKey(ContentKind, related_name='format_presets',\n null=True, on_delete=models.SET_NULL)\n allowed_formats = models.ManyToManyField(FileFormat, blank=True)\n\n def __str__(self):\n return self.id\n\n @classmethod\n def guess_format_preset(cls, filename):\n \"\"\"\n Guess the format preset of a filename based on its extension.\n\n Return None if format is unknown.\n \"\"\"\n _, ext = os.path.splitext(filename)\n ext = ext.lstrip('.')\n f = FormatPreset.objects.filter(allowed_formats__extension=ext,\n display=True)\n return f.first()\n\n @classmethod\n def get_preset(cls, preset_name):\n \"\"\"\n Get the FormatPreset object with that exact name.\n\n Returns None if that format preset is not found.\n \"\"\"\n try:\n return FormatPreset.objects.get(id=preset_name)\n except FormatPreset.DoesNotExist:\n return None\n\n\nclass Language(models.Model):\n id = models.CharField(max_length=14, primary_key=True)\n lang_code = models.CharField(max_length=3, db_index=True)\n lang_subcode = models.CharField(max_length=10, db_index=True, blank=\n True, null=True)\n readable_name = models.CharField(max_length=100, blank=True)\n native_name = models.CharField(max_length=100, blank=True)\n lang_direction = models.CharField(max_length=3, choices=languages.\n LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])\n\n def ietf_name(self):\n return '{code}-{subcode}'.format(code=self.lang_code, subcode=self.\n lang_subcode) if self.lang_subcode else self.lang_code\n\n def __str__(self):\n return self.ietf_name()\n\n\n<mask token>\n\n\nclass AssessmentItem(models.Model):\n type = models.CharField(max_length=50, default='multiplechoice')\n question = models.TextField(blank=True)\n hints = models.TextField(default='[]')\n answers = models.TextField(default='[]')\n order = models.IntegerField(default=1)\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'assessment_items', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n assessment_id = UUIDField(primary_key=False, default=uuid.uuid4,\n editable=False)\n raw_data = models.TextField(blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n randomize = models.BooleanField(default=False)\n deleted = models.BooleanField(default=False)\n objects = CustomManager()\n _field_updates = FieldTracker()\n\n def has_changes(self):\n return bool(self._field_updates.changed())\n\n\n class Meta:\n indexes = [models.Index(fields=['assessment_id'], name=\n ASSESSMENT_ID_INDEX_NAME)]\n unique_together = ['contentnode', 'assessment_id']\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('contentnode__tree_id'\n )).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n def on_create(self):\n \"\"\"\n When an exercise is added to a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n \"\"\"\n When an exercise is updated of a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n When an exercise is deleted from a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n return super(AssessmentItem, self).delete(*args, **kwargs)\n\n\nclass SlideshowSlide(models.Model):\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'slideshow_slides', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n sort_order = models.FloatField(default=1.0)\n metadata = JSONField(default=dict)\n\n\nclass StagedFile(models.Model):\n \"\"\"\n Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit\n \"\"\"\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='staged_files',\n blank=True, null=True, on_delete=models.CASCADE)\n\n\n<mask token>\n\n\nclass File(models.Model):\n \"\"\"\n The bottom layer of the contentDB schema, defines the basic building brick for content.\n Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n file_on_disk = models.FileField(upload_to=object_storage_name, storage=\n default_storage, max_length=500, blank=True)\n contentnode = models.ForeignKey(ContentNode, related_name='files',\n blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n assessment_item = models.ForeignKey(AssessmentItem, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n slideshow_slide = models.ForeignKey(SlideshowSlide, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n file_format = models.ForeignKey(FileFormat, related_name='files', blank\n =True, null=True, db_index=True, on_delete=models.SET_NULL)\n preset = models.ForeignKey(FormatPreset, related_name='files', blank=\n True, null=True, db_index=True, on_delete=models.SET_NULL)\n language = models.ForeignKey(Language, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n original_filename = models.CharField(max_length=255, blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n modified = models.DateTimeField(auto_now=True, verbose_name='modified',\n null=True)\n duration = models.IntegerField(blank=True, null=True)\n objects = CustomManager()\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id')) | Q(\n tree_id=OuterRef('assessment_item__contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls.\n _permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(edit=True) | Q(uploaded_by=user,\n contentnode__isnull=True, assessment_item__isnull=True))\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True).filter(Q(main_tree__tree_id=OuterRef(\n 'contentnode__tree_id')) | Q(main_tree__tree_id=OuterRef(\n 'assessment_item__contentnode__tree_id'))).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True) |\n Q(uploaded_by=user, contentnode__isnull=True,\n assessment_item__isnull=True))\n\n\n class Admin:\n pass\n\n def __str__(self):\n return '{checksum}{extension}'.format(checksum=self.checksum,\n extension='.' + self.file_format.extension)\n\n def filename(self):\n \"\"\"\n Returns just the filename of the File in storage, without the path\n\n e.g. abcd.mp4\n \"\"\"\n return os.path.basename(self.file_on_disk.name)\n\n def update_contentnode_content_id(self):\n \"\"\"\n If the file is attached to a contentnode and is not a thumbnail\n then update that contentnode's content_id if it's a copied contentnode.\n \"\"\"\n if self.contentnode and self.preset.thumbnail is False:\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n self.modified = timezone.now()\n self.update_contentnode_content_id()\n\n def save(self, set_by_file_on_disk=True, *args, **kwargs):\n \"\"\"\n Overrider the default save method.\n If the file_on_disk FileField gets passed a content copy:\n 1. generate the MD5 from the content copy\n 2. fill the other fields accordingly\n \"\"\"\n from contentcuration.utils.user import calculate_user_storage\n if self.file_format_id:\n if self.file_format_id not in dict(file_formats.choices):\n raise ValidationError('Invalid file_format')\n if set_by_file_on_disk and self.file_on_disk:\n if self.checksum is None or self.checksum == '':\n md5 = hashlib.md5()\n for chunk in self.file_on_disk.chunks():\n md5.update(chunk)\n self.checksum = md5.hexdigest()\n if not self.file_size:\n self.file_size = self.file_on_disk.size\n if not self.file_format_id:\n ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')\n if ext in list(dict(file_formats.choices).keys()):\n self.file_format_id = ext\n else:\n raise ValueError('Files of type `{}` are not supported.'\n .format(ext))\n super(File, self).save(*args, **kwargs)\n if self.uploaded_by_id:\n calculate_user_storage(self.uploaded_by_id)\n\n\n class Meta:\n indexes = [models.Index(fields=['checksum', 'file_size'], name=\n FILE_DISTINCT_INDEX_NAME), models.Index(fields=['-modified'],\n name=FILE_MODIFIED_DESC_INDEX_NAME)]\n constraints = [models.CheckConstraint(check=Q(preset__in=\n MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True), name\n =FILE_DURATION_CONSTRAINT)]\n\n\n<mask token>\n\n\nclass PrerequisiteContentRelationship(models.Model):\n \"\"\"\n Predefine the prerequisite relationship between two ContentNode objects.\n \"\"\"\n target_node = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)\n prerequisite = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['target_node', 'prerequisite']\n\n def clean(self, *args, **kwargs):\n if self.target_node == self.prerequisite:\n raise IntegrityError('Cannot self reference as prerequisite.')\n if PrerequisiteContentRelationship.objects.using(self._state.db\n ).filter(target_node=self.prerequisite, prerequisite=self.\n target_node):\n raise IntegrityError(\n 'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'\n % (self.target_node, self.prerequisite))\n super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n super(PrerequisiteContentRelationship, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n\nclass RelatedContentRelationship(models.Model):\n \"\"\"\n Predefine the related relationship between two ContentNode objects.\n \"\"\"\n contentnode_1 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_1', on_delete=models.CASCADE)\n contentnode_2 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_2', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['contentnode_1', 'contentnode_2']\n\n def save(self, *args, **kwargs):\n if self.contentnode_1 == self.contentnode_2:\n raise IntegrityError('Cannot self reference as related.')\n if RelatedContentRelationship.objects.using(self._state.db).filter(\n contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1\n ):\n return\n super(RelatedContentRelationship, self).save(*args, **kwargs)\n\n\nclass Invitation(models.Model):\n \"\"\" Invitation to edit channel \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n accepted = models.BooleanField(default=False)\n declined = models.BooleanField(default=False)\n revoked = models.BooleanField(default=False)\n invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n SET_NULL, null=True, related_name='sent_to')\n share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)\n email = models.EmailField(max_length=100, null=True)\n sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\n 'sent_by', null=True, on_delete=models.CASCADE)\n channel = models.ForeignKey('Channel', null=True, related_name=\n 'pending_editors', on_delete=models.CASCADE)\n first_name = models.CharField(max_length=100, blank=True)\n last_name = models.CharField(max_length=100, blank=True, null=True)\n\n\n class Meta:\n verbose_name = 'Invitation'\n verbose_name_plural = 'Invitations'\n\n def accept(self):\n user = User.objects.filter(email__iexact=self.email).first()\n if self.channel:\n if self.share_mode == VIEW_ACCESS:\n self.channel.editors.remove(user)\n self.channel.viewers.add(user)\n else:\n self.channel.viewers.remove(user)\n self.channel.editors.add(user)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user)).distinct()\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user) | Q(channel__viewers=user)).distinct()\n\n\nclass Change(models.Model):\n server_rev = models.BigAutoField(primary_key=True)\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,\n blank=True, on_delete=models.SET_NULL, related_name='changes_by_user')\n channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=\n models.CASCADE)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=\n True, on_delete=models.CASCADE, related_name='changes_about_user')\n client_rev = models.IntegerField(null=True, blank=True)\n session = models.ForeignKey(Session, null=True, blank=True, on_delete=\n models.SET_NULL)\n table = models.CharField(max_length=32)\n change_type = models.IntegerField()\n kwargs = JSONField(encoder=JSONEncoder)\n applied = models.BooleanField(default=False)\n errored = models.BooleanField(default=False)\n\n @classmethod\n def _create_from_change(cls, created_by_id=None, channel_id=None,\n user_id=None, session_key=None, applied=False, table=None, rev=None,\n **data):\n change_type = data.pop('type')\n if table is None or table not in ALL_TABLES:\n raise TypeError(\n 'table is a required argument for creating changes and must be a valid table name'\n )\n if change_type is None or change_type not in ALL_CHANGES:\n raise TypeError(\n 'change_type is a required argument for creating changes and must be a valid change type integer'\n )\n return cls(session_id=session_key, created_by_id=created_by_id,\n channel_id=channel_id, user_id=user_id, client_rev=rev, table=\n table, change_type=change_type, kwargs=data, applied=applied)\n\n @classmethod\n def create_changes(cls, changes, created_by_id=None, session_key=None,\n applied=False):\n change_models = []\n for change in changes:\n change_models.append(cls._create_from_change(created_by_id=\n created_by_id, session_key=session_key, applied=applied, **\n change))\n cls.objects.bulk_create(change_models)\n return change_models\n\n @classmethod\n def create_change(cls, change, created_by_id=None, session_key=None,\n applied=False):\n obj = cls._create_from_change(created_by_id=created_by_id,\n session_key=session_key, applied=applied, **change)\n obj.save()\n return obj\n\n @classmethod\n def serialize(cls, change):\n datum = get_attribute(change, ['kwargs']).copy()\n datum.update({'server_rev': get_attribute(change, ['server_rev']),\n 'table': get_attribute(change, ['table']), 'type':\n get_attribute(change, ['change_type']), 'channel_id':\n get_attribute(change, ['channel_id']), 'user_id': get_attribute\n (change, ['user_id']), 'created_by_id': get_attribute(change, [\n 'created_by_id'])})\n return datum\n\n def serialize_to_change_dict(self):\n return self.serialize(self)\n\n\nclass TaskResultCustom(object):\n \"\"\"\n Custom fields to add to django_celery_results's TaskResult model\n\n If adding fields to this class, run `makemigrations` then move the generated migration from the\n `django_celery_results` app to the `contentcuration` app and override the constructor to change\n the app_label. See `0141_add_task_signature` for an example\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='tasks',\n on_delete=models.CASCADE, null=True)\n channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)\n progress = models.IntegerField(null=True, blank=True, validators=[\n MinValueValidator(0), MaxValueValidator(100)])\n signature = models.CharField(null=True, blank=False, max_length=32)\n super_as_dict = TaskResult.as_dict\n\n def as_dict(self):\n \"\"\"\n :return: A dictionary representation\n \"\"\"\n super_dict = self.super_as_dict()\n super_dict.update(user_id=self.user_id, channel_id=self.channel_id,\n progress=self.progress)\n return super_dict\n\n @classmethod\n def contribute_to_class(cls, model_class=TaskResult):\n \"\"\"\n Adds fields to model, by default TaskResult\n :param model_class: TaskResult model\n \"\"\"\n for field in dir(cls):\n if not field.startswith('_') and field not in (\n 'contribute_to_class', 'Meta'):\n model_class.add_to_class(field, getattr(cls, field))\n setattr(model_class._meta, 'indexes', getattr(model_class._meta,\n 'indexes', []) + cls.Meta.indexes)\n\n\n class Meta:\n indexes = [models.Index(fields=['signature'], name=\n 'task_result_signature_idx', condition=Q(status__in=\n celery_states.UNREADY_STATES))]\n\n\n<mask token>\n",
"step-5": "import hashlib\nimport json\nimport logging\nimport os\nimport urllib.parse\nimport uuid\nfrom datetime import datetime\n\nimport pytz\nfrom celery import states as celery_states\nfrom django.conf import settings\nfrom django.contrib.auth.base_user import AbstractBaseUser\nfrom django.contrib.auth.base_user import BaseUserManager\nfrom django.contrib.auth.models import PermissionsMixin\nfrom django.contrib.sessions.models import Session\nfrom django.core.cache import cache\nfrom django.core.exceptions import MultipleObjectsReturned\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.exceptions import ValidationError\nfrom django.core.files.storage import default_storage\nfrom django.core.files.storage import FileSystemStorage\nfrom django.core.mail import send_mail\nfrom django.core.validators import MaxValueValidator\nfrom django.core.validators import MinValueValidator\nfrom django.db import IntegrityError\nfrom django.db import models\nfrom django.db.models import Count\nfrom django.db.models import Exists\nfrom django.db.models import F\nfrom django.db.models import Index\nfrom django.db.models import JSONField\nfrom django.db.models import Max\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import Sum\nfrom django.db.models import UUIDField as DjangoUUIDField\nfrom django.db.models import Value\nfrom django.db.models.expressions import ExpressionList\nfrom django.db.models.expressions import RawSQL\nfrom django.db.models.functions import Lower\nfrom django.db.models.indexes import IndexExpression\nfrom django.db.models.query_utils import DeferredAttribute\nfrom django.db.models.sql import Query\nfrom django.dispatch import receiver\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _\nfrom django_celery_results.models import TaskResult\nfrom django_cte import With\nfrom le_utils import proquint\nfrom le_utils.constants import content_kinds\nfrom le_utils.constants import exercises\nfrom le_utils.constants import file_formats\nfrom le_utils.constants import format_presets\nfrom le_utils.constants import languages\nfrom le_utils.constants import roles\nfrom model_utils import FieldTracker\nfrom mptt.models import MPTTModel\nfrom mptt.models import raise_if_unsaved\nfrom mptt.models import TreeForeignKey\nfrom postmark.core import PMMailInactiveRecipientException\nfrom postmark.core import PMMailUnauthorizedException\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.fields import get_attribute\nfrom rest_framework.utils.encoders import JSONEncoder\n\nfrom contentcuration.constants import channel_history\nfrom contentcuration.constants import completion_criteria\nfrom contentcuration.constants import user_history\nfrom contentcuration.constants.contentnode import kind_activity_map\nfrom contentcuration.db.models.expressions import Array\nfrom contentcuration.db.models.functions import ArrayRemove\nfrom contentcuration.db.models.functions import Unnest\nfrom contentcuration.db.models.manager import CustomContentNodeTreeManager\nfrom contentcuration.db.models.manager import CustomManager\nfrom contentcuration.statistics import record_channel_stats\nfrom contentcuration.utils.cache import delete_public_channel_cache_keys\nfrom contentcuration.utils.parser import load_json_string\nfrom contentcuration.viewsets.sync.constants import ALL_CHANGES\nfrom contentcuration.viewsets.sync.constants import ALL_TABLES\n\n\nEDIT_ACCESS = \"edit\"\nVIEW_ACCESS = \"view\"\n\nDEFAULT_CONTENT_DEFAULTS = {\n 'license': None,\n 'language': None,\n 'author': None,\n 'aggregator': None,\n 'provider': None,\n 'copyright_holder': None,\n 'license_description': None,\n 'mastery_model': exercises.NUM_CORRECT_IN_A_ROW_5,\n 'm_value': 5,\n 'n_value': 5,\n 'auto_derive_video_thumbnail': True,\n 'auto_derive_audio_thumbnail': True,\n 'auto_derive_document_thumbnail': True,\n 'auto_derive_html5_thumbnail': True,\n 'auto_derive_exercise_thumbnail': True,\n 'auto_randomize_questions': True,\n}\nDEFAULT_USER_PREFERENCES = json.dumps(DEFAULT_CONTENT_DEFAULTS, ensure_ascii=False)\n\n\ndef to_pk(model_or_pk):\n if isinstance(model_or_pk, models.Model):\n return model_or_pk.pk\n return model_or_pk\n\n\nclass UserManager(BaseUserManager):\n\n def create_user(self, email, first_name, last_name, password=None):\n if not email:\n raise ValueError('Email address not specified')\n\n new_user = self.model(\n email=self.normalize_email(email),\n )\n\n new_user.set_password(password)\n new_user.first_name = first_name\n new_user.last_name = last_name\n new_user.save(using=self._db)\n return new_user\n\n def create_superuser(self, email, first_name, last_name, password=None):\n new_user = self.create_user(email, first_name, last_name, password=password)\n new_user.is_admin = True\n new_user.save(using=self._db)\n return new_user\n\n\nclass UniqueActiveUserIndex(Index):\n def create_sql(self, model, schema_editor, using='', **kwargs):\n \"\"\"\n This is a vendored and modified version of the Django create_sql method\n We do this so that we can monkey patch in the unique index statement onto the schema_editor\n while we create the statement for this index, and then revert it to normal.\n\n We should remove this as soon as Django natively supports UniqueConstraints with Expressions.\n This should hopefully be the case in Django 3.3.\n \"\"\"\n include = [model._meta.get_field(field_name).column for field_name in self.include]\n condition = self._get_condition_sql(model, schema_editor)\n if self.expressions:\n index_expressions = []\n for expression in self.expressions:\n index_expression = IndexExpression(expression)\n index_expression.set_wrapper_classes(schema_editor.connection)\n index_expressions.append(index_expression)\n expressions = ExpressionList(*index_expressions).resolve_expression(\n Query(model, alias_cols=False),\n )\n fields = None\n col_suffixes = None\n else:\n fields = [\n model._meta.get_field(field_name)\n for field_name, _ in self.fields_orders\n ]\n col_suffixes = [order[1] for order in self.fields_orders]\n expressions = None\n sql = \"CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)%(include)s%(condition)s\"\n # Store the normal SQL statement for indexes\n old_create_index_sql = schema_editor.sql_create_index\n # Replace it with our own unique index so that this index actually adds a constraint\n schema_editor.sql_create_index = sql\n # Generate the SQL staetment that we want to return\n return_statement = schema_editor._create_index_sql(\n model, fields=fields, name=self.name, using=using,\n db_tablespace=self.db_tablespace, col_suffixes=col_suffixes,\n opclasses=self.opclasses, condition=condition, include=include,\n expressions=expressions, **kwargs,\n )\n # Reinstate the previous index SQL statement so that we have done no harm\n schema_editor.sql_create_index = old_create_index_sql\n # Return our SQL statement\n return return_statement\n\n\nclass User(AbstractBaseUser, PermissionsMixin):\n email = models.EmailField(max_length=100, unique=True)\n first_name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n is_admin = models.BooleanField(default=False)\n is_active = models.BooleanField('active', default=False,\n help_text='Designates whether this user should be treated as active.')\n is_staff = models.BooleanField('staff status', default=False,\n help_text='Designates whether the user can log into this admin site.')\n date_joined = models.DateTimeField('date joined', default=timezone.now)\n clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='user_clipboard', on_delete=models.SET_NULL)\n preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)\n disk_space = models.FloatField(default=524288000, help_text='How many bytes a user can upload')\n disk_space_used = models.FloatField(default=0, help_text='How many bytes a user has uploaded')\n\n information = JSONField(null=True)\n content_defaults = JSONField(default=dict)\n policies = JSONField(default=dict, null=True)\n feature_flags = JSONField(default=dict, null=True)\n\n deleted = models.BooleanField(default=False, db_index=True)\n\n _field_updates = FieldTracker(fields=[\n # Field to watch for changes\n \"disk_space\",\n ])\n\n objects = UserManager()\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['first_name', 'last_name']\n\n def __unicode__(self):\n return self.email\n\n def delete(self):\n \"\"\"\n Soft deletes the user account.\n \"\"\"\n self.deleted = True\n # Deactivate the user to disallow authentication and also\n # to let the user verify the email again after recovery.\n self.is_active = False\n self.save()\n self.history.create(user_id=self.pk, action=user_history.DELETION)\n\n def recover(self):\n \"\"\"\n Use this method when we want to recover a user.\n \"\"\"\n self.deleted = False\n self.save()\n self.history.create(user_id=self.pk, action=user_history.RECOVERY)\n\n def hard_delete_user_related_data(self):\n \"\"\"\n Hard delete all user related data. But keeps the user record itself intact.\n\n User related data that gets hard deleted are:\n - sole editor non-public channels.\n - sole editor non-public channelsets.\n - sole editor non-public channels' content nodes and its underlying files that are not\n used by any other channel.\n - all user invitations.\n \"\"\"\n from contentcuration.viewsets.common import SQCount\n\n # Hard delete invitations associated to this account.\n self.sent_to.all().delete()\n self.sent_by.all().delete()\n\n editable_channels_user_query = (\n User.objects.filter(editable_channels__id=OuterRef('id'))\n .values_list('id', flat=True)\n .distinct()\n )\n non_public_channels_sole_editor = self.editable_channels.annotate(num_editors=SQCount(\n editable_channels_user_query, field=\"id\")).filter(num_editors=1, public=False)\n\n # Point sole editor non-public channels' contentnodes to orphan tree to let\n # our garbage collection delete the nodes and underlying files.\n ContentNode._annotate_channel_id(ContentNode.objects).filter(channel_id__in=list(\n non_public_channels_sole_editor.values_list(\"id\", flat=True))).update(parent_id=settings.ORPHANAGE_ROOT_ID)\n\n # Hard delete non-public channels associated with this user (if user is the only editor).\n non_public_channels_sole_editor.delete()\n\n # Hard delete non-public channel collections associated with this user (if user is the only editor).\n user_query = (\n User.objects.filter(channel_sets__id=OuterRef('id'))\n .values_list('id', flat=True)\n .distinct()\n )\n self.channel_sets.annotate(num_editors=SQCount(user_query, field=\"id\")).filter(num_editors=1, public=False).delete()\n\n # Create history!\n self.history.create(user_id=self.pk, action=user_history.RELATED_DATA_HARD_DELETION)\n\n def can_edit(self, channel_id):\n return Channel.filter_edit_queryset(Channel.objects.all(), self).filter(pk=channel_id).exists()\n\n def check_space(self, size, checksum):\n if self.is_admin:\n return True\n\n active_files = self.get_user_active_files()\n if active_files.filter(checksum=checksum).exists():\n return True\n\n space = self.get_available_space(active_files=active_files)\n if space < size:\n raise PermissionDenied(_(\"Not enough space. Check your storage under Settings page.\"))\n\n def check_channel_space(self, channel):\n active_files = self.get_user_active_files()\n staging_tree_id = channel.staging_tree.tree_id\n channel_files = self.files\\\n .filter(contentnode__tree_id=staging_tree_id)\\\n .values('checksum')\\\n .distinct()\\\n .exclude(checksum__in=active_files.values_list('checksum', flat=True))\n staged_size = float(channel_files.aggregate(used=Sum('file_size'))['used'] or 0)\n\n if self.get_available_space(active_files=active_files) < (staged_size):\n raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))\n\n def check_staged_space(self, size, checksum):\n if self.staged_files.filter(checksum=checksum).exists():\n return True\n space = self.get_available_staged_space()\n if space < size:\n raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))\n\n def get_available_staged_space(self):\n space_used = self.staged_files.values('checksum').distinct().aggregate(size=Sum(\"file_size\"))['size'] or 0\n return float(max(self.disk_space - space_used, 0))\n\n def get_available_space(self, active_files=None):\n return float(max(self.disk_space - self.get_space_used(active_files=active_files), 0))\n\n def get_user_active_trees(self):\n return self.editable_channels.exclude(deleted=True)\\\n .values(tree_id=F(\"main_tree__tree_id\"))\n\n def get_user_active_files(self):\n cte = With(self.get_user_active_trees().distinct())\n\n return cte.join(self.files.get_queryset(), contentnode__tree_id=cte.col.tree_id)\\\n .with_cte(cte)\\\n .values('checksum')\\\n .distinct()\n\n def get_space_used(self, active_files=None):\n active_files = active_files or self.get_user_active_files()\n files = active_files.aggregate(total_used=Sum('file_size'))\n return float(files['total_used'] or 0)\n\n def set_space_used(self):\n self.disk_space_used = self.get_space_used()\n self.save()\n return self.disk_space_used\n\n def get_space_used_by_kind(self):\n active_files = self.get_user_active_files()\n files = active_files.values('preset__kind_id')\\\n .annotate(space=Sum('file_size'))\\\n .order_by()\n\n kind_dict = {}\n for item in files:\n kind_dict[item['preset__kind_id']] = item['space']\n return kind_dict\n\n def email_user(self, subject, message, from_email=None, **kwargs):\n try:\n # msg = EmailMultiAlternatives(subject, message, from_email, [self.email])\n # msg.attach_alternative(kwargs[\"html_message\"],\"text/html\")\n # msg.send()\n send_mail(subject, message, from_email, [self.email], **kwargs)\n except (PMMailInactiveRecipientException, PMMailUnauthorizedException) as e:\n logging.error(str(e))\n\n def clean(self):\n super(User, self).clean()\n self.email = self.__class__.objects.normalize_email(self.email)\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_short_name(self):\n \"\"\"\n Returns the short name for the user.\n \"\"\"\n return self.first_name\n\n def get_token(self):\n token, _ = Token.objects.get_or_create(user=self)\n return token.key\n\n def save(self, *args, **kwargs):\n from contentcuration.utils.user import calculate_user_storage\n super(User, self).save(*args, **kwargs)\n\n if 'disk_space' in self._field_updates.changed():\n calculate_user_storage(self.pk)\n\n changed = False\n\n if not self.content_defaults:\n self.content_defaults = DEFAULT_CONTENT_DEFAULTS\n changed = True\n\n if not self.clipboard_tree:\n self.clipboard_tree = ContentNode.objects.create(title=self.email + \" clipboard\", kind_id=content_kinds.TOPIC)\n self.clipboard_tree.save()\n changed = True\n\n if changed:\n self.save()\n\n class Meta:\n verbose_name = \"User\"\n verbose_name_plural = \"Users\"\n indexes = [\n UniqueActiveUserIndex(Lower('email'), condition=Q(is_active=True), name=\"contentcura_email_d4d492_idx\")\n ]\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n\n if user.is_admin:\n return queryset\n\n # all shared editors\n all_editable = User.editable_channels.through.objects.all()\n editable = all_editable.filter(\n channel_id__in=all_editable.filter(user_id=user.pk).values_list(\"channel_id\", flat=True)\n )\n\n # all shared viewers\n all_view_only = User.view_only_channels.through.objects.all()\n view_only = all_view_only.filter(\n channel_id__in=all_view_only.filter(user_id=user.pk).values_list(\"channel_id\", flat=True)\n )\n\n return queryset.filter(\n Q(pk=user.pk)\n | Q(pk__in=editable.values_list(\"user_id\", flat=True))\n | Q(pk__in=view_only.values_list(\"user_id\", flat=True))\n )\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(pk=user.pk)\n\n @classmethod\n def get_for_email(cls, email, deleted=False, **filters):\n \"\"\"\n Returns the appropriate User record given an email, ordered by:\n - those with is_active=True first, which there should only ever be one\n - otherwise by ID DESC so most recent inactive shoud be returned\n\n Filters out deleted User records by default. To include both deleted and\n undeleted user records pass None to the deleted argument.\n\n :param email: A string of the user's email\n :param filters: Additional filters to filter the User queryset\n :return: User or None\n \"\"\"\n user_qs = User.objects.filter(email__iexact=email.strip())\n if deleted is not None:\n user_qs = user_qs.filter(deleted=deleted)\n return user_qs.filter(**filters).order_by(\"-is_active\", \"-id\").first()\n\n\nclass UUIDField(models.CharField):\n\n def __init__(self, *args, **kwargs):\n kwargs['max_length'] = 32\n super(UUIDField, self).__init__(*args, **kwargs)\n\n def prepare_value(self, value):\n if isinstance(value, uuid.UUID):\n return value.hex\n return value\n\n def get_default(self):\n result = super(UUIDField, self).get_default()\n if isinstance(result, uuid.UUID):\n result = result.hex\n return result\n\n def to_python(self, value):\n if isinstance(value, uuid.UUID):\n return value.hex\n return value\n\n\nclass MPTTTreeIDManager(models.Model):\n \"\"\"\n Because MPTT uses plain integers for tree IDs and does not use an auto-incrementing field for them,\n the same ID can sometimes be assigned to two trees if two channel create ops happen concurrently.\n\n As we are using this table only for the ID generation, it does not need any fields.\n\n We resolve this by creating a dummy table and using its ID as the tree index to take advantage of the db's\n concurrency-friendly way of generating sequential integer IDs. There is a custom migration that ensures\n that the number of records (and thus id) matches the max tree ID number when this table gets added.\n \"\"\"\n\n\ndef file_on_disk_name(instance, filename):\n \"\"\"\n Create a name spaced file path from the File obejct's checksum property.\n This path will be used to store the content copy\n :param instance: File (content File model)\n :param filename: str\n :return: str\n \"\"\"\n return generate_file_on_disk_name(instance.checksum, filename)\n\n\ndef generate_file_on_disk_name(checksum, filename):\n \"\"\" Separated from file_on_disk_name to allow for simple way to check if has already exists \"\"\"\n h = checksum\n basename, ext = os.path.splitext(filename)\n directory = os.path.join(settings.STORAGE_ROOT, h[0], h[1])\n if not os.path.exists(directory):\n os.makedirs(directory)\n return os.path.join(directory, h + ext.lower())\n\n\ndef object_storage_name(instance, filename):\n \"\"\"\n Create a name spaced file path from the File obejct's checksum property.\n This path will be used to store the content copy\n\n :param instance: File (content File model)\n :param filename: str\n :return: str\n \"\"\"\n\n default_ext = ''\n if instance.file_format_id:\n default_ext = '.{}'.format(instance.file_format_id)\n\n return generate_object_storage_name(instance.checksum, filename, default_ext)\n\n\ndef generate_object_storage_name(checksum, filename, default_ext=''):\n \"\"\" Separated from file_on_disk_name to allow for simple way to check if has already exists \"\"\"\n h = checksum\n basename, actual_ext = os.path.splitext(filename)\n ext = actual_ext if actual_ext else default_ext\n\n # Use / instead of os.path.join as Windows makes this \\\\\n directory = \"/\".join([settings.STORAGE_ROOT, h[0], h[1]])\n return os.path.join(directory, h + ext.lower())\n\n\ndef generate_storage_url(filename, request=None, *args):\n \"\"\"\n Generate a storage URL for the given content filename.\n \"\"\"\n\n path = generate_object_storage_name(os.path.splitext(filename)[0], filename)\n\n # There are three scenarios where Studio might be run as:\n #\n # 1. In normal kubernetes, nginx will proxy for us. We'll know we're in kubernetes when the\n # environment variable RUN_MODE=k8s\n #\n # 2. In Docker Compose and bare metal runserver, we'll be running in runserver, and minio\n # will be exposed in port 9000 in the host's localhost network.\n\n # Note (aron): returning the true storage URL (e.g. https://storage.googleapis.com/storage/a.mp4)\n # isn't too important, because we have CDN in front of our servers, so it should be cached.\n # But change the logic here in case there is a potential for bandwidth and latency improvement.\n\n # Detect our current state first\n run_mode = os.getenv(\"RUN_MODE\")\n\n # if we're running inside k8s, then just serve the normal /content/{storage,databases} URL,\n # and let nginx handle proper proxying.\n if run_mode == \"k8s\":\n url = \"/content/{path}\".format(\n path=path,\n )\n\n # if we're in docker-compose or in baremetal, just return the object storage URL as localhost:9000\n elif run_mode == \"docker-compose\" or run_mode is None:\n # generate the minio storage URL, so we can get the GET parameters that give everyone\n # access even if they don't need to log in\n params = urllib.parse.urlparse(default_storage.url(path)).query\n host = \"localhost\"\n port = 9000 # hardcoded to the default minio IP address\n url = \"http://{host}:{port}/{bucket}/{path}?{params}\".format(\n host=host,\n port=port,\n bucket=settings.AWS_S3_BUCKET_NAME,\n path=path,\n params=params,\n )\n\n return url\n\n\nclass FileOnDiskStorage(FileSystemStorage):\n \"\"\"\n Overrider FileSystemStorage's default save method to ignore duplicated file.\n \"\"\"\n\n def get_available_name(self, name):\n return name\n\n def _save(self, name, content):\n if self.exists(name):\n # if the file exists, do not call the superclasses _save method\n logging.warn('Content copy \"%s\" already exists!' % name)\n return name\n return super(FileOnDiskStorage, self)._save(name, content)\n\n\nclass SecretToken(models.Model):\n \"\"\"Tokens for channels\"\"\"\n token = models.CharField(max_length=100, unique=True)\n is_primary = models.BooleanField(default=False)\n\n @classmethod\n def exists(cls, token):\n \"\"\"\n Return true when the token string given by string already exists.\n Returns false otherwise.\n \"\"\"\n return cls.objects.filter(token=token).exists()\n\n @classmethod\n def generate_new_token(cls):\n \"\"\"\n Creates a primary secret token for the current channel using a proquint\n string. Creates a secondary token containing the channel id.\n\n These tokens can be used to refer to the channel to download its content\n database.\n \"\"\"\n token = proquint.generate()\n\n # Try 100 times to generate a unique token.\n TRIALS = 100\n for __ in range(TRIALS):\n token = proquint.generate()\n if SecretToken.exists(token):\n continue\n break\n # after TRIALS attempts and we didn't get a unique token,\n # just raise an error.\n # See https://stackoverflow.com/a/9980160 on what for-else loop does.\n else:\n raise ValueError(\"Cannot generate new token\")\n\n # We found a unique token! Save it\n return token\n\n def __str__(self):\n return \"{}-{}\".format(self.token[:5], self.token[5:])\n\n\ndef get_channel_thumbnail(channel):\n if not isinstance(channel, dict):\n channel = channel.__dict__\n if channel.get(\"thumbnail_encoding\"):\n thumbnail_data = channel.get(\"thumbnail_encoding\")\n if thumbnail_data.get(\"base64\"):\n return thumbnail_data[\"base64\"]\n\n if channel.get(\"thumbnail\") and 'static' not in channel.get(\"thumbnail\"):\n return generate_storage_url(channel.get(\"thumbnail\"))\n\n return '/static/img/kolibri_placeholder.png'\n\n\nCHANNEL_NAME_INDEX_NAME = \"channel_name_idx\"\n\n\n# A list of all the FKs from Channel object\n# to ContentNode trees\n# used for permissions filtering\nCHANNEL_TREES = (\n \"main_tree\",\n \"chef_tree\",\n \"trash_tree\",\n \"staging_tree\",\n \"previous_tree\",\n)\n\n\ndef boolean_val(val):\n return Value(val, output_field=models.BooleanField())\n\n\nclass PermissionCTE(With):\n tree_id_fields = [\n \"channel__{}__tree_id\".format(tree_name)\n for tree_name in CHANNEL_TREES\n ]\n\n def __init__(self, model, user_id, **kwargs):\n queryset = model.objects.filter(user_id=user_id)\\\n .annotate(\n tree_id=Unnest(ArrayRemove(Array(*self.tree_id_fields), None), output_field=models.IntegerField())\n )\n super(PermissionCTE, self).__init__(queryset=queryset.values(\"user_id\", \"channel_id\", \"tree_id\"), **kwargs)\n\n @classmethod\n def editable_channels(cls, user_id):\n return PermissionCTE(User.editable_channels.through, user_id, name=\"editable_channels_cte\")\n\n @classmethod\n def view_only_channels(cls, user_id):\n return PermissionCTE(User.view_only_channels.through, user_id, name=\"view_only_channels_cte\")\n\n def exists(self, *filters):\n return Exists(self.queryset().filter(*filters).values(\"user_id\"))\n\n\nclass Channel(models.Model):\n \"\"\" Permissions come from association with organizations \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n name = models.CharField(max_length=200, blank=True)\n description = models.CharField(max_length=400, blank=True)\n tagline = models.CharField(max_length=150, blank=True, null=True)\n version = models.IntegerField(default=0)\n thumbnail = models.TextField(blank=True, null=True)\n thumbnail_encoding = JSONField(default=dict)\n editors = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='editable_channels',\n verbose_name=\"editors\",\n help_text=\"Users with edit rights\",\n blank=True,\n )\n viewers = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='view_only_channels',\n verbose_name=\"viewers\",\n help_text=\"Users with view only rights\",\n blank=True,\n )\n language = models.ForeignKey('Language', null=True, blank=True, related_name='channel_language', on_delete=models.SET_NULL)\n trash_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_trash', on_delete=models.SET_NULL)\n clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_clipboard', on_delete=models.SET_NULL)\n main_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_main', on_delete=models.SET_NULL)\n staging_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_staging', on_delete=models.SET_NULL)\n chef_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_chef', on_delete=models.SET_NULL)\n previous_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_previous', on_delete=models.SET_NULL)\n bookmarked_by = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='bookmarked_channels',\n verbose_name=\"bookmarked by\",\n )\n deleted = models.BooleanField(default=False, db_index=True)\n public = models.BooleanField(default=False, db_index=True)\n preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)\n content_defaults = JSONField(default=dict)\n priority = models.IntegerField(default=0, help_text=\"Order to display public channels\")\n last_published = models.DateTimeField(blank=True, null=True)\n secret_tokens = models.ManyToManyField(\n SecretToken,\n related_name='channels',\n verbose_name=\"secret tokens\",\n blank=True,\n )\n source_url = models.CharField(max_length=200, blank=True, null=True)\n demo_server_url = models.CharField(max_length=200, blank=True, null=True)\n\n # Fields specific to content generated by Ricecooker\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n ricecooker_version = models.CharField(max_length=100, blank=True, null=True)\n\n # Fields to calculate when channel is published\n published_data = JSONField(default=dict)\n icon_encoding = models.TextField(blank=True, null=True)\n total_resource_count = models.IntegerField(default=0)\n published_kind_count = models.TextField(blank=True, null=True)\n published_size = models.FloatField(default=0)\n included_languages = models.ManyToManyField(\n \"Language\",\n related_name='channels',\n verbose_name=\"languages\",\n blank=True,\n )\n\n _field_updates = FieldTracker(fields=[\n # Field to watch for changes\n \"description\",\n \"language_id\",\n \"thumbnail\",\n \"name\",\n \"thumbnail_encoding\",\n # watch these fields for changes\n # but exclude them from setting changed\n # on the main tree\n \"deleted\",\n \"public\",\n \"main_tree_id\",\n \"version\",\n ])\n\n @classmethod\n def get_editable(cls, user, channel_id):\n return cls.filter_edit_queryset(cls.objects.all(), user).get(id=channel_id)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n # it won't return anything\n if not user_id:\n return queryset.none()\n\n edit = Exists(User.editable_channels.through.objects.filter(user_id=user_id, channel_id=OuterRef(\"id\")))\n queryset = queryset.annotate(edit=edit)\n if user.is_admin:\n return queryset\n\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n user_email = not user.is_anonymous and user.email\n\n if user_id:\n filters = dict(user_id=user_id, channel_id=OuterRef(\"id\"))\n edit = Exists(User.editable_channels.through.objects.filter(**filters).values(\"user_id\"))\n view = Exists(User.view_only_channels.through.objects.filter(**filters).values(\"user_id\"))\n else:\n edit = boolean_val(False)\n view = boolean_val(False)\n\n queryset = queryset.annotate(\n edit=edit,\n view=view,\n )\n\n if user_id and user.is_admin:\n return queryset\n\n permission_filter = Q()\n if user_id:\n pending_channels = Invitation.objects.filter(email=user_email, revoked=False, declined=False, accepted=False).values_list(\n \"channel_id\", flat=True\n )\n permission_filter = (\n Q(view=True) | Q(edit=True) | Q(deleted=False, id__in=pending_channels)\n )\n\n return queryset.filter(permission_filter | Q(deleted=False, public=True))\n\n @classmethod\n def get_all_channels(cls):\n return cls.objects.select_related('main_tree').prefetch_related('editors', 'viewers').distinct()\n\n def resource_size_key(self):\n return \"{}_resource_size\".format(self.pk)\n\n # Might be good to display resource size, but need to improve query time first\n\n def get_resource_size(self):\n cached_data = cache.get(self.resource_size_key())\n if cached_data:\n return cached_data\n tree_id = self.main_tree.tree_id\n files = File.objects.select_related('contentnode', 'assessment_item')\\\n .filter(contentnode__tree_id=tree_id)\\\n .values('checksum', 'file_size')\\\n .distinct()\\\n .aggregate(resource_size=Sum('file_size'))\n cache.set(self.resource_size_key(), files['resource_size'] or 0, None)\n return files['resource_size'] or 0\n\n def on_create(self):\n record_channel_stats(self, None)\n if not self.content_defaults:\n self.content_defaults = DEFAULT_CONTENT_DEFAULTS\n\n if not self.main_tree:\n self.main_tree = ContentNode.objects.create(\n title=self.name,\n kind_id=content_kinds.TOPIC,\n content_id=self.id,\n node_id=self.id,\n original_channel_id=self.id,\n source_channel_id=self.id,\n changed=True,\n complete=True,\n )\n # Ensure that locust or unit tests raise if there are any concurrency issues with tree ids.\n if settings.DEBUG:\n if ContentNode.objects.filter(parent=None, tree_id=self.main_tree.tree_id).count() != 1:\n raise AssertionError\n\n if not self.trash_tree:\n self.trash_tree = ContentNode.objects.create(\n title=self.name,\n kind_id=content_kinds.TOPIC,\n content_id=self.id,\n node_id=self.id,\n )\n\n # if this change affects the published channel list, clear the channel cache\n if self.public and (self.main_tree and self.main_tree.published):\n delete_public_channel_cache_keys()\n\n def on_update(self):\n from contentcuration.utils.user import calculate_user_storage\n original_values = self._field_updates.changed()\n record_channel_stats(self, original_values)\n\n blacklist = set([\n \"public\",\n \"main_tree_id\",\n \"version\",\n ])\n\n if self.main_tree and original_values and any((True for field in original_values if field not in blacklist)):\n # Changing channel metadata should also mark main_tree as changed\n self.main_tree.changed = True\n\n # Check if original thumbnail is no longer referenced\n if \"thumbnail\" in original_values and original_values[\"thumbnail\"] and 'static' not in original_values[\"thumbnail\"]:\n filename, ext = os.path.splitext(original_values[\"thumbnail\"])\n delete_empty_file_reference(filename, ext[1:])\n\n # Refresh storage for all editors on the channel\n if \"deleted\" in original_values:\n for editor in self.editors.all():\n calculate_user_storage(editor.pk)\n\n # Delete db if channel has been deleted and mark as unpublished\n if \"deleted\" in original_values and not original_values[\"deleted\"]:\n self.pending_editors.all().delete()\n export_db_storage_path = os.path.join(settings.DB_ROOT, \"{channel_id}.sqlite3\".format(channel_id=self.id))\n if default_storage.exists(export_db_storage_path):\n default_storage.delete(export_db_storage_path)\n if self.main_tree:\n self.main_tree.published = False\n\n if self.main_tree and self.main_tree._field_updates.changed():\n self.main_tree.save()\n\n # if this change affects the published channel list, clear the channel cache\n if \"public\" in original_values and (self.main_tree and self.main_tree.published):\n delete_public_channel_cache_keys()\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n\n super(Channel, self).save(*args, **kwargs)\n\n def get_thumbnail(self):\n return get_channel_thumbnail(self)\n\n def has_changes(self):\n return self.main_tree.get_descendants(include_self=True).filter(changed=True).exists()\n\n def get_date_modified(self):\n return self.main_tree.get_descendants(include_self=True).aggregate(last_modified=Max('modified'))['last_modified']\n\n def get_resource_count(self):\n return self.main_tree.get_descendants().exclude(kind_id=content_kinds.TOPIC).order_by('content_id').distinct('content_id').count()\n\n def get_human_token(self):\n return self.secret_tokens.get(is_primary=True)\n\n def get_channel_id_token(self):\n return self.secret_tokens.get(token=self.id)\n\n def make_token(self):\n token = self.secret_tokens.create(token=SecretToken.generate_new_token(), is_primary=True)\n self.secret_tokens.get_or_create(token=self.id)\n return token\n\n def make_public(self, bypass_signals=False):\n \"\"\"\n Sets the current channel object to be public and viewable by anyone.\n\n If bypass_signals is True, update the model in such a way that we\n prevent any model signals from running due to the update.\n\n Returns the same channel object.\n \"\"\"\n if bypass_signals:\n self.public = True # set this attribute still, so the object will be updated\n Channel.objects.filter(id=self.id).update(public=True)\n # clear the channel cache\n delete_public_channel_cache_keys()\n else:\n self.public = True\n self.save()\n\n return self\n\n def mark_created(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.CREATION)\n\n def mark_publishing(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.PUBLICATION)\n self.main_tree.publishing = True\n self.main_tree.save()\n\n def mark_deleted(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.DELETION)\n self.deleted = True\n self.save()\n\n def mark_recovered(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.RECOVERY)\n self.deleted = False\n self.save()\n\n @property\n def deletion_history(self):\n return self.history.filter(action=channel_history.DELETION)\n\n @property\n def publishing_history(self):\n return self.history.filter(action=channel_history.PUBLICATION)\n\n @classmethod\n def get_public_channels(cls, defer_nonmain_trees=False):\n \"\"\"\n Get all public channels.\n\n If defer_nonmain_trees is True, defer the loading of all\n trees except for the main_tree.\"\"\"\n if defer_nonmain_trees:\n c = (Channel.objects\n .filter(public=True)\n .exclude(deleted=True)\n .select_related('main_tree')\n .prefetch_related('editors')\n .defer('trash_tree', 'clipboard_tree', 'staging_tree', 'chef_tree', 'previous_tree', 'viewers'))\n else:\n c = Channel.objects.filter(public=True).exclude(deleted=True)\n\n return c\n\n class Meta:\n verbose_name = \"Channel\"\n verbose_name_plural = \"Channels\"\n\n indexes = [\n models.Index(fields=[\"name\"], name=CHANNEL_NAME_INDEX_NAME),\n ]\n index_together = [\n [\"deleted\", \"public\"]\n ]\n\n\nCHANNEL_HISTORY_CHANNEL_INDEX_NAME = \"idx_channel_history_channel_id\"\n\n\nclass ChannelHistory(models.Model):\n \"\"\"\n Model for tracking certain actions performed on a channel\n \"\"\"\n channel = models.ForeignKey('Channel', null=False, blank=False, related_name='history', on_delete=models.CASCADE)\n actor = models.ForeignKey('User', null=False, blank=False, related_name='channel_history', on_delete=models.CASCADE)\n performed = models.DateTimeField(default=timezone.now)\n action = models.CharField(max_length=50, choices=channel_history.choices)\n\n @classmethod\n def prune(cls):\n \"\"\"\n Prunes history records by keeping the most recent actions for each channel and type,\n and deleting all other older actions\n \"\"\"\n keep_ids = cls.objects.distinct(\"channel_id\", \"action\").order_by(\"channel_id\", \"action\", \"-performed\").values_list(\"id\", flat=True)\n cls.objects.exclude(id__in=keep_ids).delete()\n\n class Meta:\n verbose_name = \"Channel history\"\n verbose_name_plural = \"Channel histories\"\n\n indexes = [\n models.Index(fields=[\"channel_id\"], name=CHANNEL_HISTORY_CHANNEL_INDEX_NAME),\n ]\n\n\nclass UserHistory(models.Model):\n \"\"\"\n Model that stores the user's action history.\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=False, related_name=\"history\", on_delete=models.CASCADE)\n action = models.CharField(max_length=32, choices=user_history.choices)\n\n performed_at = models.DateTimeField(default=timezone.now)\n\n\nclass ChannelSet(models.Model):\n # NOTE: this is referred to as \"channel collections\" on the front-end, but we need to call it\n # something else as there is already a ChannelCollection model on the front-end\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n name = models.CharField(max_length=200, blank=True)\n description = models.CharField(max_length=400, blank=True)\n public = models.BooleanField(default=False, db_index=True)\n editors = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='channel_sets',\n verbose_name=\"editors\",\n help_text=\"Users with edit rights\",\n blank=True,\n )\n secret_token = models.ForeignKey('SecretToken', null=True, blank=True, related_name='channel_sets', on_delete=models.SET_NULL)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n user_id = not user.is_anonymous and user.id\n edit = Exists(User.channel_sets.through.objects.filter(user_id=user_id, channelset_id=OuterRef(\"id\")))\n queryset = queryset.annotate(edit=edit)\n if user.is_admin:\n return queryset\n\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n return cls.filter_edit_queryset(queryset, user)\n\n def get_channels(self):\n if self.secret_token:\n return self.secret_token.channels.filter(deleted=False)\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n\n super(ChannelSet, self).save()\n\n def on_create(self):\n if not self.secret_token:\n self.secret_token = SecretToken.objects.create(token=SecretToken.generate_new_token())\n\n def delete(self, *args, **kwargs):\n super(ChannelSet, self).delete(*args, **kwargs)\n\n if self.secret_token:\n self.secret_token.delete()\n\n\nclass ContentTag(models.Model):\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n tag_name = models.CharField(max_length=50)\n channel = models.ForeignKey('Channel', related_name='tags', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)\n objects = CustomManager()\n\n def __str__(self):\n return self.tag_name\n\n class Meta:\n unique_together = ['tag_name', 'channel']\n\n\nclass License(models.Model):\n \"\"\"\n Normalize the license of ContentNode model\n \"\"\"\n license_name = models.CharField(max_length=50)\n license_url = models.URLField(blank=True)\n license_description = models.TextField(blank=True)\n copyright_holder_required = models.BooleanField(default=True)\n is_custom = models.BooleanField(default=False)\n exists = models.BooleanField(\n default=False,\n verbose_name=\"license exists\",\n help_text=\"Tells whether or not a content item is licensed to share\",\n )\n\n @classmethod\n def validate_name(cls, name):\n if cls.objects.filter(license_name=name).count() == 0:\n raise ValidationError('License `{}` does not exist'.format(name))\n\n def __str__(self):\n return self.license_name\n\n\nNODE_ID_INDEX_NAME = \"node_id_idx\"\nNODE_MODIFIED_INDEX_NAME = \"node_modified_idx\"\nNODE_MODIFIED_DESC_INDEX_NAME = \"node_modified_desc_idx\"\nCONTENTNODE_TREE_ID_CACHE_KEY = \"contentnode_{pk}__tree_id\"\n\n\nclass ContentNode(MPTTModel, models.Model):\n \"\"\"\n By default, all nodes have a title and can be used as a topic.\n \"\"\"\n # Random id used internally on Studio (See `node_id` for id used in Kolibri)\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n\n # the content_id is used for tracking a user's interaction with a piece of\n # content, in the face of possibly many copies of that content. When a user\n # interacts with a piece of content, all substantially similar pieces of\n # content should be marked as such as well. We track these \"substantially\n # similar\" types of content by having them have the same content_id.\n content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False, db_index=True)\n # Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta\n node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)\n\n # TODO: disallow nulls once existing models have been set\n original_channel_id = UUIDField(primary_key=False, editable=False, null=True,\n db_index=True) # Original channel copied from\n source_channel_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate channel copied from\n # Original node_id of node copied from (TODO: original_node_id clashes with original_node field - temporary)\n original_source_node_id = UUIDField(primary_key=False, editable=False, null=True,\n db_index=True)\n source_node_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate node_id of node copied from\n\n # Fields specific to content generated by Ricecooker\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n\n title = models.CharField(max_length=200, blank=True)\n description = models.TextField(blank=True)\n kind = models.ForeignKey('ContentKind', related_name='contentnodes', db_index=True, null=True, blank=True, on_delete=models.SET_NULL)\n license = models.ForeignKey('License', null=True, blank=True, on_delete=models.SET_NULL)\n license_description = models.CharField(max_length=400, null=True, blank=True)\n prerequisite = models.ManyToManyField('self', related_name='is_prerequisite_of',\n through='PrerequisiteContentRelationship', symmetrical=False, blank=True)\n is_related = models.ManyToManyField('self', related_name='relate_to', through='RelatedContentRelationship',\n symmetrical=False, blank=True)\n language = models.ForeignKey('Language', null=True, blank=True, related_name='content_language', on_delete=models.SET_NULL)\n parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True, on_delete=models.CASCADE)\n tags = models.ManyToManyField(ContentTag, symmetrical=False, related_name='tagged_content', blank=True)\n # No longer used\n sort_order = models.FloatField(max_length=50, default=1, verbose_name=\"sort order\",\n help_text=\"Ascending, lowest number shown first\")\n copyright_holder = models.CharField(max_length=200, null=True, blank=True, default=\"\",\n help_text=\"Organization of person who holds the essential rights\")\n # legacy field...\n original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='duplicates')\n cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='clones')\n\n thumbnail_encoding = models.TextField(blank=True, null=True)\n\n created = models.DateTimeField(default=timezone.now, verbose_name=\"created\")\n modified = models.DateTimeField(auto_now=True, verbose_name=\"modified\")\n published = models.BooleanField(default=False)\n publishing = models.BooleanField(default=False)\n complete = models.BooleanField(null=True)\n\n changed = models.BooleanField(default=True)\n \"\"\"\n Extra fields for exercises:\n - type: mastery model to use to determine completion\n - m: m value for M out of N mastery criteria\n - n: n value for M out of N mastery criteria\n \"\"\"\n extra_fields = JSONField(default=dict, blank=True, null=True)\n author = models.CharField(max_length=200, blank=True, default=\"\", help_text=\"Who created this content?\",\n null=True)\n aggregator = models.CharField(max_length=200, blank=True, default=\"\", help_text=\"Who gathered this content together?\",\n null=True)\n provider = models.CharField(max_length=200, blank=True, default=\"\", help_text=\"Who distributed this content?\",\n null=True)\n\n role_visibility = models.CharField(max_length=50, choices=roles.choices, default=roles.LEARNER)\n freeze_authoring_data = models.BooleanField(default=False)\n\n # Fields for metadata labels\n # These fields use a map to store applied labels\n # {\n # \"<label_id1>\": true,\n # \"<label_id2>\": true,\n # }\n grade_levels = models.JSONField(blank=True, null=True)\n resource_types = models.JSONField(blank=True, null=True)\n learning_activities = models.JSONField(blank=True, null=True)\n accessibility_labels = models.JSONField(blank=True, null=True)\n categories = models.JSONField(blank=True, null=True)\n learner_needs = models.JSONField(blank=True, null=True)\n\n # A field for storing a suggested duration for the content node\n # this duration should be in seconds.\n suggested_duration = models.IntegerField(blank=True, null=True, help_text=\"Suggested duration for the content node (in seconds)\")\n\n objects = CustomContentNodeTreeManager()\n\n # Track all updates and ignore a blacklist of attributes\n # when we check for changes\n _field_updates = FieldTracker()\n\n _permission_filter = Q(tree_id=OuterRef(\"tree_id\"))\n\n @classmethod\n def _annotate_channel_id(cls, queryset):\n # Annotate channel id\n return queryset.annotate(\n channel_id=Subquery(\n Channel.objects.filter(\n main_tree__tree_id=OuterRef(\"tree_id\")\n ).values_list(\"id\", flat=True)[:1]\n )\n )\n\n @classmethod\n def filter_by_pk(cls, pk):\n \"\"\"\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always\n returns a queryset filtered by pk.\n\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode\n for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If\n a ContentNode does not exist for `pk` then an empty queryset is returned.\n \"\"\"\n query = ContentNode.objects.filter(pk=pk)\n\n if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:\n tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))\n\n if tree_id:\n query = query.filter(tree_id=tree_id)\n else:\n tree_id = ContentNode.objects.filter(pk=pk).values_list(\"tree_id\", flat=True).first()\n if tree_id:\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk), tree_id, None)\n query = query.filter(tree_id=tree_id)\n else:\n query = query.none()\n\n return query\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n if not user_id:\n return queryset.none()\n\n edit_cte = PermissionCTE.editable_channels(user_id)\n\n queryset = queryset.with_cte(edit_cte).annotate(\n edit=edit_cte.exists(cls._permission_filter),\n )\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n queryset = queryset.annotate(\n public=Exists(\n Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef(\"tree_id\")\n ).values(\"pk\")\n ),\n )\n\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)\n\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(\n edit=edit_cte.exists(cls._permission_filter),\n view=view_cte.exists(cls._permission_filter),\n )\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(\n Q(view=True)\n | Q(edit=True)\n | Q(public=True)\n )\n\n @raise_if_unsaved\n def get_root(self):\n # Only topics can be root nodes\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return super(ContentNode, self).get_root()\n\n @raise_if_unsaved\n def get_root_id(self):\n # Only topics can be root nodes\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n\n return ContentNode.objects.values_list('pk', flat=True).get(\n tree_id=self._mpttfield('tree_id'),\n parent=None,\n )\n\n def get_tree_data(self, levels=float('inf')):\n \"\"\"\n Returns `levels`-deep tree information starting at current node.\n Args:\n levels (int): depth of tree hierarchy to return\n Returns:\n tree (dict): starting with self, with children list containing either\n the just the children's `node_id`s or full recusive tree.\n \"\"\"\n if self.kind_id == content_kinds.TOPIC:\n node_data = {\n \"title\": self.title,\n \"kind\": self.kind_id,\n \"node_id\": self.node_id,\n \"studio_id\": self.id,\n }\n children = self.children.all()\n if levels > 0:\n node_data[\"children\"] = [c.get_tree_data(levels=levels - 1) for c in children]\n return node_data\n if self.kind_id == content_kinds.EXERCISE:\n return {\n \"title\": self.title,\n \"kind\": self.kind_id,\n \"count\": self.assessment_items.count(),\n \"node_id\": self.node_id,\n \"studio_id\": self.id,\n }\n return {\n \"title\": self.title,\n \"kind\": self.kind_id,\n \"file_size\": self.files.values('file_size').aggregate(size=Sum('file_size'))['size'],\n \"node_id\": self.node_id,\n \"studio_id\": self.id,\n }\n\n def get_original_node(self):\n original_node = self.original_node or self\n if self.original_channel_id and self.original_source_node_id:\n original_tree_id = Channel.objects.select_related(\"main_tree\").get(pk=self.original_channel_id).main_tree.tree_id\n original_node = ContentNode.objects.filter(tree_id=original_tree_id, node_id=self.original_source_node_id).first() or \\\n ContentNode.objects.filter(tree_id=original_tree_id, content_id=self.content_id).first() or self\n return original_node\n\n def get_associated_presets(self):\n key = \"associated_presets_{}\".format(self.kind_id)\n cached_data = cache.get(key)\n if cached_data:\n return cached_data\n presets = list(FormatPreset.objects.filter(kind=self.kind).values())\n cache.set(key, presets, None)\n return presets\n\n def get_prerequisites(self):\n prerequisite_mapping = {}\n prerequisites = self.prerequisite.all()\n prereqlist = list(prerequisites)\n for prereq in prerequisites:\n prlist, prereqmapping = prereq.get_prerequisites()\n prerequisite_mapping.update({prereq.pk: prereqmapping})\n prereqlist.extend(prlist)\n return prereqlist, prerequisite_mapping\n\n def get_postrequisites(self):\n postrequisite_mapping = {}\n postrequisites = self.is_prerequisite_of.all()\n postreqlist = list(postrequisites)\n for postreq in postrequisites:\n prlist, postreqmapping = postreq.get_postrequisites()\n postrequisite_mapping.update({postreq.pk: postreqmapping})\n postreqlist.extend(prlist)\n return postreqlist, postrequisite_mapping\n\n def get_channel_id(self):\n if hasattr(self, \"channel_id\"):\n return self.channel_id\n channel = self.get_channel()\n if channel:\n return channel.id\n return None\n\n def get_channel(self):\n try:\n root = self.get_root()\n if not root:\n return None\n return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(previous_tree=root)).first()\n except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):\n return None\n\n def get_thumbnail(self):\n # Problems with json.loads, so use ast.literal_eval to get dict\n if self.thumbnail_encoding:\n thumbnail_data = load_json_string(self.thumbnail_encoding)\n if type(thumbnail_data) is dict and thumbnail_data.get(\"base64\"):\n return thumbnail_data[\"base64\"]\n\n thumbnail = self.files.filter(preset__thumbnail=True).first()\n if thumbnail:\n return generate_storage_url(str(thumbnail))\n\n return \"\"\n\n @classmethod\n def get_nodes_with_title(cls, title, limit_to_children_of=None):\n \"\"\"\n Returns all ContentNodes with a given title. If limit_to_children_of\n is passed in with an id, only look at all the children of the node with that id.\n \"\"\"\n if limit_to_children_of:\n root = cls.objects.get(id=limit_to_children_of)\n return root.get_descendants().filter(title=title)\n return cls.objects.filter(title=title)\n\n def get_details(self, channel_id=None):\n \"\"\"\n Returns information about the node and its children, including total size, languages, files, etc.\n\n :return: A dictionary with detailed statistics and information about the node.\n \"\"\"\n from contentcuration.viewsets.common import SQArrayAgg\n from contentcuration.viewsets.common import SQCount\n from contentcuration.viewsets.common import SQRelatedArrayAgg\n from contentcuration.viewsets.common import SQSum\n from contentcuration.viewsets.common import SQJSONBKeyArrayAgg\n\n node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id).order_by()\n\n descendants = (\n self.get_descendants()\n .values(\"id\")\n )\n\n if channel_id:\n channel = Channel.objects.filter(id=channel_id)[0]\n else:\n channel = self.get_channel()\n\n if not descendants.exists():\n data = {\n \"last_update\": pytz.utc.localize(datetime.now()).strftime(\n settings.DATE_TIME_FORMAT\n ),\n \"created\": self.created.strftime(settings.DATE_TIME_FORMAT),\n \"resource_count\": 0,\n \"resource_size\": 0,\n \"includes\": {\"coach_content\": 0, \"exercises\": 0},\n \"kind_count\": [],\n \"languages\": [],\n \"accessible_languages\": [],\n \"licenses\": [],\n \"tags\": [],\n \"copyright_holders\": [],\n \"authors\": [],\n \"aggregators\": [],\n \"providers\": [],\n \"sample_pathway\": [],\n \"original_channels\": [],\n \"sample_nodes\": [],\n \"levels\": [],\n \"categories\": [],\n }\n\n # Set cache with latest data\n cache.set(\"details_{}\".format(self.node_id), json.dumps(data), None)\n return data\n\n # Get resources\n resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()\n nodes = With(\n File.objects.filter(contentnode_id__in=Subquery(resources.values(\"id\")))\n .values(\"checksum\", \"file_size\")\n .order_by(),\n name=\"nodes\",\n )\n file_query = (\n nodes.queryset().with_cte(nodes).values(\"checksum\", \"file_size\").distinct()\n )\n l_nodes = With(\n File.objects.filter(contentnode_id__in=Subquery(resources.values(\"id\")))\n .values(\"language_id\", \"preset_id\")\n .order_by(),\n name=\"l_nodes\",\n )\n accessible_languages_query = (\n l_nodes.queryset()\n .filter(preset_id=format_presets.VIDEO_SUBTITLE)\n .with_cte(l_nodes)\n .values(\"language__native_name\")\n .distinct()\n )\n\n tags_query = str(\n ContentTag.objects.filter(\n tagged_content__pk__in=descendants.values_list(\"pk\", flat=True)\n )\n .values(\"tag_name\")\n .annotate(count=Count(\"tag_name\"))\n .query\n ).replace(\"topic\", \"'topic'\")\n kind_count_query = str(\n resources.values(\"kind_id\").annotate(count=Count(\"kind_id\")).query\n ).replace(\"topic\", \"'topic'\")\n\n node = node.annotate(\n resource_count=SQCount(resources, field=\"id\"),\n resource_size=SQSum(file_query, field=\"file_size\"),\n copyright_holders=SQArrayAgg(\n resources.distinct(\"copyright_holder\").order_by(\"copyright_holder\"),\n field=\"copyright_holder\",\n ),\n authors=SQArrayAgg(\n resources.distinct(\"author\").order_by(\"author\"), field=\"author\"\n ),\n aggregators=SQArrayAgg(\n resources.distinct(\"aggregator\").order_by(\"aggregator\"),\n field=\"aggregator\",\n ),\n providers=SQArrayAgg(\n resources.distinct(\"provider\").order_by(\"provider\"), field=\"provider\"\n ),\n languages=SQRelatedArrayAgg(\n descendants.exclude(language=None)\n .distinct(\"language__native_name\")\n .order_by(),\n field=\"language__native_name\",\n fieldname=\"native_name\",\n ),\n accessible_languages=SQRelatedArrayAgg(\n accessible_languages_query,\n field=\"language__native_name\",\n fieldname=\"native_name\",\n ),\n licenses=SQRelatedArrayAgg(\n resources.exclude(license=None)\n .distinct(\"license__license_name\")\n .order_by(\"license__license_name\"),\n field=\"license__license_name\",\n fieldname=\"license_name\",\n ),\n kind_count=RawSQL(\n \"SELECT json_agg(row_to_json (x)) FROM ({}) as x\".format(\n kind_count_query\n ),\n (),\n ),\n tags_list=RawSQL(\n \"SELECT json_agg(row_to_json (x)) FROM ({}) as x\".format(tags_query), ()\n ),\n coach_content=SQCount(\n resources.filter(role_visibility=roles.COACH), field=\"id\"\n ),\n exercises=SQCount(\n resources.filter(kind_id=content_kinds.EXERCISE), field=\"id\"\n ),\n levels=SQJSONBKeyArrayAgg(\n descendants.exclude(grade_levels__isnull=True),\n field=\"grade_levels\",\n ),\n all_categories=SQJSONBKeyArrayAgg(\n descendants.exclude(categories__isnull=True),\n field=\"categories\",\n ),\n )\n\n # Get sample pathway by getting longest path\n # Using resources.aggregate adds a lot of time, use values that have already been fetched\n max_level = max(\n resources.values_list(\"level\", flat=True).order_by().distinct() or [0]\n )\n m_nodes = With(\n resources.values(\"id\", \"level\", \"tree_id\", \"lft\").order_by(),\n name=\"m_nodes\",\n )\n deepest_node_record = (\n m_nodes.queryset()\n .with_cte(m_nodes)\n .filter(level=max_level)\n .values(\"id\")\n .order_by(\"tree_id\", \"lft\")\n .first()\n )\n if deepest_node_record:\n deepest_node = ContentNode.objects.get(pk=deepest_node_record[\"id\"])\n pathway = (\n list(\n deepest_node.get_ancestors()\n .order_by()\n .exclude(parent=None)\n .values(\"title\", \"node_id\", \"kind_id\")\n .order_by()\n )\n if deepest_node_record\n else []\n )\n sample_nodes = (\n [\n {\n \"node_id\": n.node_id,\n \"title\": n.title,\n \"description\": n.description,\n \"thumbnail\": n.get_thumbnail(),\n \"kind\": n.kind_id,\n }\n for n in deepest_node.get_siblings(include_self=True)[0:4]\n ]\n if deepest_node_record\n else []\n )\n\n # Get list of channels nodes were originally imported from (omitting the current channel)\n channel_id = channel and channel.id\n originals = (\n resources.values(\"original_channel_id\")\n .annotate(count=Count(\"original_channel_id\"))\n .order_by(\"original_channel_id\")\n )\n originals = {c[\"original_channel_id\"]: c[\"count\"] for c in originals}\n original_channels = (\n Channel.objects.exclude(pk=channel_id)\n .filter(pk__in=originals.keys(), deleted=False)\n .order_by()\n )\n original_channels = [\n {\n \"id\": c.id,\n \"name\": \"{}{}\".format(\n c.name, _(\" (Original)\") if channel_id == c.id else \"\"\n ),\n \"thumbnail\": c.get_thumbnail(),\n \"count\": originals[c.id],\n }\n for c in original_channels\n ]\n\n node = (\n node.order_by()\n .values(\n \"id\",\n \"resource_count\",\n \"resource_size\",\n \"copyright_holders\",\n \"authors\",\n \"aggregators\",\n \"providers\",\n \"languages\",\n \"accessible_languages\",\n \"coach_content\",\n \"licenses\",\n \"tags_list\",\n \"kind_count\",\n \"exercises\",\n \"levels\",\n \"all_categories\",\n )\n .first()\n )\n for_educators = {\n \"coach_content\": node[\"coach_content\"],\n \"exercises\": node[\"exercises\"],\n }\n # Serialize data\n data = {\n \"last_update\": pytz.utc.localize(datetime.now()).strftime(\n settings.DATE_TIME_FORMAT\n ),\n \"created\": self.created.strftime(settings.DATE_TIME_FORMAT),\n \"resource_count\": node.get(\"resource_count\", 0),\n \"resource_size\": node.get(\"resource_size\", 0),\n \"includes\": for_educators,\n \"kind_count\": node.get(\"kind_count\") or [],\n \"languages\": node.get(\"languages\") or [],\n \"accessible_languages\": node.get(\"accessible_languages\") or [],\n \"licenses\": node.get(\"licenses\") or [],\n \"tags\": node.get(\"tags_list\") or [],\n \"original_channels\": original_channels,\n \"sample_pathway\": pathway,\n \"sample_nodes\": sample_nodes,\n # source model fields for the below default to an empty string, but can also be null\n \"authors\": list(filter(bool, node[\"authors\"])),\n \"aggregators\": list(filter(bool, node[\"aggregators\"])),\n \"providers\": list(filter(bool, node[\"providers\"])),\n \"copyright_holders\": list(filter(bool, node[\"copyright_holders\"])),\n \"levels\": node.get(\"levels\") or [],\n \"categories\": node.get(\"all_categories\") or [],\n }\n\n # Set cache with latest data\n cache.set(\"details_{}\".format(self.node_id), json.dumps(data), None)\n return data\n\n def has_changes(self):\n mptt_opts = self._mptt_meta\n # Ignore fields that are used for dirty tracking, and also mptt fields, as changes to these are tracked in mptt manager methods.\n blacklist = set([\n 'changed',\n 'modified',\n 'publishing',\n mptt_opts.tree_id_attr,\n mptt_opts.left_attr,\n mptt_opts.right_attr,\n mptt_opts.level_attr,\n ])\n original_values = self._field_updates.changed()\n return any((True for field in original_values if field not in blacklist))\n\n def recalculate_editors_storage(self):\n from contentcuration.utils.user import calculate_user_storage\n for editor in self.files.values_list('uploaded_by_id', flat=True).distinct():\n calculate_user_storage(editor)\n\n def mark_complete(self): # noqa C901\n errors = []\n # Is complete if title is falsy but only if not a root node.\n if not (bool(self.title) or self.parent_id is None):\n errors.append(\"Empty title\")\n if self.kind_id != content_kinds.TOPIC:\n if not self.license:\n errors.append(\"Missing license\")\n if self.license and self.license.is_custom and not self.license_description:\n errors.append(\"Missing license description for custom license\")\n if self.license and self.license.copyright_holder_required and not self.copyright_holder:\n errors.append(\"Missing required copyright holder\")\n if self.kind_id != content_kinds.EXERCISE and not self.files.filter(preset__supplementary=False).exists():\n errors.append(\"Missing default file\")\n if self.kind_id == content_kinds.EXERCISE:\n # Check to see if the exercise has at least one assessment item that has:\n if not self.assessment_items.filter(\n # Item with non-blank raw data\n ~Q(raw_data=\"\") | (\n # A non-blank question\n ~Q(question='')\n # Non-blank answers\n & ~Q(answers='[]')\n # With either an input question or one answer marked as correct\n & (Q(type=exercises.INPUT_QUESTION) | Q(answers__iregex=r'\"correct\":\\s*true'))\n )\n ).exists():\n errors.append(\"No questions with question text and complete answers\")\n # Check that it has a mastery model set\n # Either check for the previous location for the mastery model, or rely on our completion criteria validation\n # that if it has been set, then it has been set correctly.\n criterion = self.extra_fields.get(\"options\", {}).get(\"completion_criteria\")\n if not (self.extra_fields.get(\"mastery_model\") or criterion):\n errors.append(\"Missing mastery criterion\")\n if criterion:\n try:\n completion_criteria.validate(criterion, kind=content_kinds.EXERCISE)\n except completion_criteria.ValidationError:\n errors.append(\"Mastery criterion is defined but is invalid\")\n self.complete = not errors\n return errors\n\n def make_content_id_unique(self):\n \"\"\"\n If self is NOT an original contentnode (in other words, a copied contentnode)\n and a contentnode with same content_id exists then we update self's content_id.\n \"\"\"\n is_node_original = self.original_source_node_id is None or self.original_source_node_id == self.node_id\n node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(content_id=self.content_id)\n if (not is_node_original) and node_same_content_id.exists():\n ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.uuid4().hex)\n\n def on_create(self):\n self.changed = True\n self.recalculate_editors_storage()\n self.set_default_learning_activity()\n\n def on_update(self):\n self.changed = self.changed or self.has_changes()\n\n def move_to(self, target, *args, **kwargs):\n parent_was_trashtree = self.parent.channel_trash.exists()\n super(ContentNode, self).move_to(target, *args, **kwargs)\n self.save()\n\n # Update tree_id cache when node is moved to another tree\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.tree_id, None)\n\n # Recalculate storage if node was moved to or from the trash tree\n if target.channel_trash.exists() or parent_was_trashtree:\n self.recalculate_editors_storage()\n\n def set_default_learning_activity(self):\n if self.learning_activities is None:\n if self.kind in kind_activity_map:\n self.learning_activities = {\n kind_activity_map[self.kind]: True\n }\n\n def save(self, skip_lock=False, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n\n # Logic borrowed from mptt - do a simple check to see if we have changed\n # the parent of the node. We use the mptt specific cached fields here\n # because these get updated by the mptt move methods, and so will be up to\n # date, meaning we can avoid locking the DB twice when the fields have already\n # been updated in the database.\n\n # If most moves are being done independently of just changing the parent\n # and then calling a save, locking within the save method itself should rarely\n # be triggered - meaning updates to contentnode metadata should only rarely\n # trigger a write lock on mptt fields.\n\n old_parent_id = self._field_updates.changed().get(\"parent_id\")\n if self._state.adding and (self.parent_id or self.parent):\n same_order = False\n elif old_parent_id is DeferredAttribute:\n same_order = True\n else:\n same_order = old_parent_id == self.parent_id\n\n if not same_order:\n changed_ids = list(filter(lambda x: x is not None, set([old_parent_id, self.parent_id])))\n else:\n changed_ids = []\n\n if not same_order and not skip_lock:\n # Lock the mptt fields for the trees of the old and new parent\n with ContentNode.objects.lock_mptt(*ContentNode.objects\n .filter(id__in=[pid for pid in [old_parent_id, self.parent_id] if pid])\n .values_list('tree_id', flat=True).distinct()):\n super(ContentNode, self).save(*args, **kwargs)\n # Always write to the database for the parent change updates, as we have\n # no persistent object references for the original and new parent to modify\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(changed=True)\n else:\n super(ContentNode, self).save(*args, **kwargs)\n # Always write to the database for the parent change updates, as we have\n # no persistent object references for the original and new parent to modify\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(changed=True)\n\n # Copied from MPTT\n save.alters_data = True\n\n def delete(self, *args, **kwargs):\n parent = self.parent or self._field_updates.changed().get('parent')\n if parent:\n parent.changed = True\n parent.save()\n\n self.recalculate_editors_storage()\n\n # Lock the mptt fields for the tree of this node\n with ContentNode.objects.lock_mptt(self.tree_id):\n return super(ContentNode, self).delete(*args, **kwargs)\n\n # Copied from MPTT\n delete.alters_data = True\n\n def copy_to(\n self,\n target=None,\n position=\"last-child\",\n pk=None,\n mods=None,\n excluded_descendants=None,\n can_edit_source_channel=None,\n batch_size=None,\n progress_tracker=None\n ):\n return self._tree_manager.copy_node(self, target, position, pk, mods, excluded_descendants, can_edit_source_channel, batch_size, progress_tracker)[0]\n\n def copy(self):\n return self.copy_to()\n\n def is_publishable(self):\n return self.complete and self.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists()\n\n class Meta:\n verbose_name = \"Topic\"\n verbose_name_plural = \"Topics\"\n # Do not allow two nodes with the same name on the same level\n # unique_together = ('parent', 'title')\n indexes = [\n models.Index(fields=[\"node_id\"], name=NODE_ID_INDEX_NAME),\n models.Index(fields=[\"-modified\"], name=NODE_MODIFIED_DESC_INDEX_NAME),\n ]\n\n\nclass ContentKind(models.Model):\n kind = models.CharField(primary_key=True, max_length=200, choices=content_kinds.choices)\n\n def __str__(self):\n return self.kind\n\n\nclass FileFormat(models.Model):\n extension = models.CharField(primary_key=True, max_length=40, choices=file_formats.choices)\n mimetype = models.CharField(max_length=200, blank=True)\n\n def __str__(self):\n return self.extension\n\n\nclass FormatPreset(models.Model):\n id = models.CharField(primary_key=True, max_length=150, choices=format_presets.choices)\n readable_name = models.CharField(max_length=400)\n multi_language = models.BooleanField(default=False)\n supplementary = models.BooleanField(default=False)\n thumbnail = models.BooleanField(default=False)\n subtitle = models.BooleanField(default=False)\n display = models.BooleanField(default=True) # Render on client side\n order = models.IntegerField(default=0)\n kind = models.ForeignKey(ContentKind, related_name='format_presets', null=True, on_delete=models.SET_NULL)\n allowed_formats = models.ManyToManyField(FileFormat, blank=True)\n\n def __str__(self):\n return self.id\n\n @classmethod\n def guess_format_preset(cls, filename):\n \"\"\"\n Guess the format preset of a filename based on its extension.\n\n Return None if format is unknown.\n \"\"\"\n\n _, ext = os.path.splitext(filename)\n ext = ext.lstrip(\".\")\n f = FormatPreset.objects.filter(\n allowed_formats__extension=ext,\n display=True\n )\n return f.first()\n\n @classmethod\n def get_preset(cls, preset_name):\n \"\"\"\n Get the FormatPreset object with that exact name.\n\n Returns None if that format preset is not found.\n \"\"\"\n try:\n return FormatPreset.objects.get(id=preset_name)\n except FormatPreset.DoesNotExist:\n return None\n\n\nclass Language(models.Model):\n id = models.CharField(max_length=14, primary_key=True)\n lang_code = models.CharField(max_length=3, db_index=True)\n lang_subcode = models.CharField(max_length=10, db_index=True, blank=True, null=True)\n readable_name = models.CharField(max_length=100, blank=True)\n native_name = models.CharField(max_length=100, blank=True)\n lang_direction = models.CharField(max_length=3, choices=languages.LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])\n\n def ietf_name(self):\n return \"{code}-{subcode}\".format(code=self.lang_code,\n subcode=self.lang_subcode) if self.lang_subcode else self.lang_code\n\n def __str__(self):\n return self.ietf_name()\n\n\nASSESSMENT_ID_INDEX_NAME = \"assessment_id_idx\"\n\n\nclass AssessmentItem(models.Model):\n type = models.CharField(max_length=50, default=\"multiplechoice\")\n question = models.TextField(blank=True)\n hints = models.TextField(default=\"[]\")\n answers = models.TextField(default=\"[]\")\n order = models.IntegerField(default=1)\n contentnode = models.ForeignKey('ContentNode', related_name=\"assessment_items\", blank=True, null=True,\n db_index=True, on_delete=models.CASCADE)\n # Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta\n assessment_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)\n raw_data = models.TextField(blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n randomize = models.BooleanField(default=False)\n deleted = models.BooleanField(default=False)\n\n objects = CustomManager()\n # Track all updates\n _field_updates = FieldTracker()\n\n def has_changes(self):\n return bool(self._field_updates.changed())\n\n class Meta:\n indexes = [\n models.Index(fields=[\"assessment_id\"], name=ASSESSMENT_ID_INDEX_NAME),\n ]\n\n unique_together = ['contentnode', 'assessment_id']\n\n _permission_filter = Q(tree_id=OuterRef(\"contentnode__tree_id\"))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n if not user_id:\n return queryset.none()\n\n edit_cte = PermissionCTE.editable_channels(user_id)\n\n queryset = queryset.with_cte(edit_cte).annotate(\n edit=edit_cte.exists(cls._permission_filter),\n )\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n queryset = queryset.annotate(\n public=Exists(\n Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef(\"contentnode__tree_id\")\n ).values(\"pk\")\n ),\n )\n\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)\n\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(\n edit=edit_cte.exists(cls._permission_filter),\n view=view_cte.exists(cls._permission_filter),\n )\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n def on_create(self):\n \"\"\"\n When an exercise is added to a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n \"\"\"\n When an exercise is updated of a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n When an exercise is deleted from a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n return super(AssessmentItem, self).delete(*args, **kwargs)\n\n\nclass SlideshowSlide(models.Model):\n contentnode = models.ForeignKey('ContentNode', related_name=\"slideshow_slides\", blank=True, null=True,\n db_index=True, on_delete=models.CASCADE)\n sort_order = models.FloatField(default=1.0)\n metadata = JSONField(default=dict)\n\n\nclass StagedFile(models.Model):\n \"\"\"\n Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit\n \"\"\"\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='staged_files', blank=True, null=True, on_delete=models.CASCADE)\n\n\nFILE_DISTINCT_INDEX_NAME = \"file_checksum_file_size_idx\"\nFILE_MODIFIED_DESC_INDEX_NAME = \"file_modified_desc_idx\"\nFILE_DURATION_CONSTRAINT = \"file_media_duration_int\"\nMEDIA_PRESETS = [\n format_presets.AUDIO,\n format_presets.AUDIO_DEPENDENCY,\n format_presets.VIDEO_HIGH_RES,\n format_presets.VIDEO_LOW_RES,\n format_presets.VIDEO_DEPENDENCY,\n]\n\n\nclass File(models.Model):\n \"\"\"\n The bottom layer of the contentDB schema, defines the basic building brick for content.\n Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n file_on_disk = models.FileField(upload_to=object_storage_name, storage=default_storage, max_length=500,\n blank=True)\n contentnode = models.ForeignKey(ContentNode, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n assessment_item = models.ForeignKey(AssessmentItem, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n slideshow_slide = models.ForeignKey(SlideshowSlide, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n file_format = models.ForeignKey(FileFormat, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)\n preset = models.ForeignKey(FormatPreset, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)\n language = models.ForeignKey(Language, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)\n original_filename = models.CharField(max_length=255, blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)\n\n modified = models.DateTimeField(auto_now=True, verbose_name=\"modified\", null=True)\n duration = models.IntegerField(blank=True, null=True)\n\n objects = CustomManager()\n\n _permission_filter = Q(tree_id=OuterRef(\"contentnode__tree_id\")) | Q(tree_id=OuterRef(\"assessment_item__contentnode__tree_id\"))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n if not user_id:\n return queryset.none()\n\n cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls._permission_filter))\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(\n Q(edit=True) | Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)\n )\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n queryset = queryset.annotate(\n public=Exists(\n Channel.objects.filter(public=True).filter(\n Q(main_tree__tree_id=OuterRef(\"contentnode__tree_id\"))\n | Q(main_tree__tree_id=OuterRef(\"assessment_item__contentnode__tree_id\"))\n ).values(\"pk\")\n ),\n )\n\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)\n\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(\n edit=edit_cte.exists(cls._permission_filter),\n view=view_cte.exists(cls._permission_filter),\n )\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(\n Q(view=True)\n | Q(edit=True)\n | Q(public=True)\n | Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)\n )\n\n class Admin:\n pass\n\n def __str__(self):\n return '{checksum}{extension}'.format(checksum=self.checksum, extension='.' + self.file_format.extension)\n\n def filename(self):\n \"\"\"\n Returns just the filename of the File in storage, without the path\n\n e.g. abcd.mp4\n \"\"\"\n # TODO(aron): write tests for this\n\n return os.path.basename(self.file_on_disk.name)\n\n def update_contentnode_content_id(self):\n \"\"\"\n If the file is attached to a contentnode and is not a thumbnail\n then update that contentnode's content_id if it's a copied contentnode.\n \"\"\"\n if self.contentnode and self.preset.thumbnail is False:\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n # since modified was added later as a nullable field to File, we don't use a default but\n # instead we'll just make sure it's always updated through our serializers\n self.modified = timezone.now()\n self.update_contentnode_content_id()\n\n def save(self, set_by_file_on_disk=True, *args, **kwargs):\n \"\"\"\n Overrider the default save method.\n If the file_on_disk FileField gets passed a content copy:\n 1. generate the MD5 from the content copy\n 2. fill the other fields accordingly\n \"\"\"\n from contentcuration.utils.user import calculate_user_storage\n\n # check if the file format exists in file_formats.choices\n if self.file_format_id:\n if self.file_format_id not in dict(file_formats.choices):\n raise ValidationError(\"Invalid file_format\")\n\n if set_by_file_on_disk and self.file_on_disk: # if file_on_disk is supplied, hash out the file\n if self.checksum is None or self.checksum == \"\":\n md5 = hashlib.md5()\n for chunk in self.file_on_disk.chunks():\n md5.update(chunk)\n\n self.checksum = md5.hexdigest()\n if not self.file_size:\n self.file_size = self.file_on_disk.size\n if not self.file_format_id:\n ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')\n if ext in list(dict(file_formats.choices).keys()):\n self.file_format_id = ext\n else:\n raise ValueError(\"Files of type `{}` are not supported.\".format(ext))\n\n super(File, self).save(*args, **kwargs)\n\n if self.uploaded_by_id:\n calculate_user_storage(self.uploaded_by_id)\n\n class Meta:\n indexes = [\n models.Index(fields=['checksum', 'file_size'], name=FILE_DISTINCT_INDEX_NAME),\n models.Index(fields=[\"-modified\"], name=FILE_MODIFIED_DESC_INDEX_NAME),\n ]\n constraints = [\n # enforces that duration is null when not a media preset, but the duration may be null for media presets\n # but if not-null, should be greater than 0\n models.CheckConstraint(\n check=(Q(preset__in=MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True)),\n name=FILE_DURATION_CONSTRAINT\n )\n ]\n\n\n@receiver(models.signals.post_delete, sender=File)\ndef auto_delete_file_on_delete(sender, instance, **kwargs):\n \"\"\"\n Deletes file from filesystem if no other File objects are referencing the same file on disk\n when corresponding `File` object is deleted.\n Be careful! we don't know if this will work when perform bash delete on File obejcts.\n \"\"\"\n # Recalculate storage\n from contentcuration.utils.user import calculate_user_storage\n if instance.uploaded_by_id:\n calculate_user_storage(instance.uploaded_by_id)\n\n\ndef delete_empty_file_reference(checksum, extension):\n filename = checksum + '.' + extension\n if not File.objects.filter(checksum=checksum).exists() and not Channel.objects.filter(thumbnail=filename).exists():\n storage_path = generate_object_storage_name(checksum, filename)\n if default_storage.exists(storage_path):\n default_storage.delete(storage_path)\n\n\nclass PrerequisiteContentRelationship(models.Model):\n \"\"\"\n Predefine the prerequisite relationship between two ContentNode objects.\n \"\"\"\n target_node = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)\n prerequisite = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)\n\n class Meta:\n unique_together = ['target_node', 'prerequisite']\n\n def clean(self, *args, **kwargs):\n # self reference exception\n if self.target_node == self.prerequisite:\n raise IntegrityError('Cannot self reference as prerequisite.')\n # immediate cyclic exception\n if PrerequisiteContentRelationship.objects.using(self._state.db) \\\n .filter(target_node=self.prerequisite, prerequisite=self.target_node):\n raise IntegrityError(\n 'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'\n % (self.target_node, self.prerequisite))\n # distant cyclic exception\n # elif <this is a nice to have exception, may implement in the future when the priority raises.>\n # raise Exception('Note: Prerequisite relationship is acyclic! %s and %s forms a closed loop!' % (\n # self.target_node, self.prerequisite\n # ))\n super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n super(PrerequisiteContentRelationship, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % (self.pk)\n\n\nclass RelatedContentRelationship(models.Model):\n \"\"\"\n Predefine the related relationship between two ContentNode objects.\n \"\"\"\n contentnode_1 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_1', on_delete=models.CASCADE)\n contentnode_2 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_2', on_delete=models.CASCADE)\n\n class Meta:\n unique_together = ['contentnode_1', 'contentnode_2']\n\n def save(self, *args, **kwargs):\n # self reference exception\n if self.contentnode_1 == self.contentnode_2:\n raise IntegrityError('Cannot self reference as related.')\n # handle immediate cyclic\n if RelatedContentRelationship.objects.using(self._state.db) \\\n .filter(contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1):\n return # silently cancel the save\n super(RelatedContentRelationship, self).save(*args, **kwargs)\n\n\nclass Invitation(models.Model):\n \"\"\" Invitation to edit channel \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n accepted = models.BooleanField(default=False)\n declined = models.BooleanField(default=False)\n revoked = models.BooleanField(default=False)\n invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, related_name='sent_to')\n share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)\n email = models.EmailField(max_length=100, null=True)\n sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='sent_by', null=True, on_delete=models.CASCADE)\n channel = models.ForeignKey('Channel', null=True, related_name='pending_editors', on_delete=models.CASCADE)\n first_name = models.CharField(max_length=100, blank=True)\n last_name = models.CharField(max_length=100, blank=True, null=True)\n\n class Meta:\n verbose_name = \"Invitation\"\n verbose_name_plural = \"Invitations\"\n\n def accept(self):\n user = User.objects.filter(email__iexact=self.email).first()\n if self.channel:\n # channel is a nullable field, so check that it exists.\n if self.share_mode == VIEW_ACCESS:\n self.channel.editors.remove(user)\n self.channel.viewers.add(user)\n else:\n self.channel.viewers.remove(user)\n self.channel.editors.add(user)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(\n Q(email__iexact=user.email)\n | Q(sender=user)\n | Q(channel__editors=user)\n ).distinct()\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n\n if user.is_admin:\n return queryset\n return queryset.filter(\n Q(email__iexact=user.email)\n | Q(sender=user)\n | Q(channel__editors=user)\n | Q(channel__viewers=user)\n ).distinct()\n\n\nclass Change(models.Model):\n server_rev = models.BigAutoField(primary_key=True)\n # We need to store the user who is applying this change\n # so that we can validate they have permissions to do so\n # allow to be null so that we don't lose changes if a user\n # account is hard deleted.\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.SET_NULL, related_name=\"changes_by_user\")\n # Almost all changes are related to channels, but some are specific only to users\n # so we allow this to be nullable for these edge cases.\n # Indexed by default because it's a ForeignKey field.\n channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=models.CASCADE)\n # For those changes related to users, store a user value instead of channel\n # this may be different to created_by, as changes to invitations affect individual users.\n # Indexed by default because it's a ForeignKey field.\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.CASCADE, related_name=\"changes_about_user\")\n # Use client_rev to keep track of changes coming from the client side\n # but let it be blank or null for changes we generate on the server side\n client_rev = models.IntegerField(null=True, blank=True)\n # client_rev numbers are by session, we add the session key here for bookkeeping\n # to allow a check within the same session to return whether a change has been applied\n # or not, and hence remove it from the frontend\n session = models.ForeignKey(Session, null=True, blank=True, on_delete=models.SET_NULL)\n table = models.CharField(max_length=32)\n change_type = models.IntegerField()\n # Use the DRF JSONEncoder class as the encoder here\n # so that we can handle anything that has been deserialized by DRF\n # or that will be later be serialized by DRF\n kwargs = JSONField(encoder=JSONEncoder)\n applied = models.BooleanField(default=False)\n errored = models.BooleanField(default=False)\n\n @classmethod\n def _create_from_change(cls, created_by_id=None, channel_id=None, user_id=None, session_key=None, applied=False, table=None, rev=None, **data):\n change_type = data.pop(\"type\")\n if table is None or table not in ALL_TABLES:\n raise TypeError(\"table is a required argument for creating changes and must be a valid table name\")\n if change_type is None or change_type not in ALL_CHANGES:\n raise TypeError(\"change_type is a required argument for creating changes and must be a valid change type integer\")\n return cls(\n session_id=session_key,\n created_by_id=created_by_id,\n channel_id=channel_id,\n user_id=user_id,\n client_rev=rev,\n table=table,\n change_type=change_type,\n kwargs=data,\n applied=applied\n )\n\n @classmethod\n def create_changes(cls, changes, created_by_id=None, session_key=None, applied=False):\n change_models = []\n for change in changes:\n change_models.append(cls._create_from_change(created_by_id=created_by_id, session_key=session_key, applied=applied, **change))\n\n cls.objects.bulk_create(change_models)\n return change_models\n\n @classmethod\n def create_change(cls, change, created_by_id=None, session_key=None, applied=False):\n obj = cls._create_from_change(created_by_id=created_by_id, session_key=session_key, applied=applied, **change)\n obj.save()\n return obj\n\n @classmethod\n def serialize(cls, change):\n datum = get_attribute(change, [\"kwargs\"]).copy()\n datum.update({\n \"server_rev\": get_attribute(change, [\"server_rev\"]),\n \"table\": get_attribute(change, [\"table\"]),\n \"type\": get_attribute(change, [\"change_type\"]),\n \"channel_id\": get_attribute(change, [\"channel_id\"]),\n \"user_id\": get_attribute(change, [\"user_id\"]),\n \"created_by_id\": get_attribute(change, [\"created_by_id\"])\n })\n return datum\n\n def serialize_to_change_dict(self):\n return self.serialize(self)\n\n\nclass TaskResultCustom(object):\n \"\"\"\n Custom fields to add to django_celery_results's TaskResult model\n\n If adding fields to this class, run `makemigrations` then move the generated migration from the\n `django_celery_results` app to the `contentcuration` app and override the constructor to change\n the app_label. See `0141_add_task_signature` for an example\n \"\"\"\n # user shouldn't be null, but in order to append the field, this needs to be allowed\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\"tasks\", on_delete=models.CASCADE, null=True)\n channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)\n progress = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0), MaxValueValidator(100)])\n # a hash of the task name and kwargs for identifying repeat tasks\n signature = models.CharField(null=True, blank=False, max_length=32)\n\n super_as_dict = TaskResult.as_dict\n\n def as_dict(self):\n \"\"\"\n :return: A dictionary representation\n \"\"\"\n super_dict = self.super_as_dict()\n super_dict.update(\n user_id=self.user_id,\n channel_id=self.channel_id,\n progress=self.progress,\n )\n return super_dict\n\n @classmethod\n def contribute_to_class(cls, model_class=TaskResult):\n \"\"\"\n Adds fields to model, by default TaskResult\n :param model_class: TaskResult model\n \"\"\"\n for field in dir(cls):\n if not field.startswith(\"_\") and field not in ('contribute_to_class', 'Meta'):\n model_class.add_to_class(field, getattr(cls, field))\n\n # manually add Meta afterwards\n setattr(model_class._meta, 'indexes', getattr(model_class._meta, 'indexes', []) + cls.Meta.indexes)\n\n class Meta:\n indexes = [\n # add index that matches query usage for signature\n models.Index(\n fields=['signature'],\n name='task_result_signature_idx',\n condition=Q(status__in=celery_states.UNREADY_STATES),\n ),\n ]\n\n\n# trigger class contributions immediately\nTaskResultCustom.contribute_to_class()\n",
"step-ids": [
65,
102,
158,
169,
216
]
}
|
[
65,
102,
158,
169,
216
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def lis(n1, n2):
"""
Generate and print last 5 element in list.
param:n1,n2
"""
i = 0
if n1 and n2 <= 20:
for x in range(n1, n2 + 1):
lis1.append(x * x)
lis1.reverse()
for y in lis1:
if i <= 4:
lis2.append(y)
i += 1
print(lis2)
else:
print('Value out of range')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def lis(n1, n2):
"""
Generate and print last 5 element in list.
param:n1,n2
"""
i = 0
if n1 and n2 <= 20:
for x in range(n1, n2 + 1):
lis1.append(x * x)
lis1.reverse()
for y in lis1:
if i <= 4:
lis2.append(y)
i += 1
print(lis2)
else:
print('Value out of range')
lis(input_num[0], input_num[1])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
input_num = input('Write number:')
lis1 = []
lis2 = []
def lis(n1, n2):
"""
Generate and print last 5 element in list.
param:n1,n2
"""
i = 0
if n1 and n2 <= 20:
for x in range(n1, n2 + 1):
lis1.append(x * x)
lis1.reverse()
for y in lis1:
if i <= 4:
lis2.append(y)
i += 1
print(lis2)
else:
print('Value out of range')
lis(input_num[0], input_num[1])
<|reserved_special_token_1|>
"""
Question 39:
Define a function which can generate a list where the values are square of numbers between 1 and
20 (both included). Then the function needs to print the last 5 elements in the list.
"""
#To get a value from console input.
input_num = input("Write number:")
lis1=[]
lis2=[]
def lis(n1,n2):
"""
Generate and print last 5 element in list.
param:n1,n2
"""
i = 0
if n1 and n2 <= 20:
for x in range(n1,n2+1):
lis1.append(x*x)
lis1.reverse()
for y in lis1:
if i <=4:
lis2.append(y)
i +=1
print(lis2)
else:
print("Value out of range")
# Calling function.
lis(input_num[0],input_num[1])
|
flexible
|
{
"blob_id": "24c1f5195bad17f995fb97a03218fc9bbe5ce4cd",
"index": 2476,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef lis(n1, n2):\n \"\"\"\n\tGenerate and print last 5 element in list.\n\tparam:n1,n2\n\t\"\"\"\n i = 0\n if n1 and n2 <= 20:\n for x in range(n1, n2 + 1):\n lis1.append(x * x)\n lis1.reverse()\n for y in lis1:\n if i <= 4:\n lis2.append(y)\n i += 1\n print(lis2)\n else:\n print('Value out of range')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef lis(n1, n2):\n \"\"\"\n\tGenerate and print last 5 element in list.\n\tparam:n1,n2\n\t\"\"\"\n i = 0\n if n1 and n2 <= 20:\n for x in range(n1, n2 + 1):\n lis1.append(x * x)\n lis1.reverse()\n for y in lis1:\n if i <= 4:\n lis2.append(y)\n i += 1\n print(lis2)\n else:\n print('Value out of range')\n\n\nlis(input_num[0], input_num[1])\n",
"step-4": "<mask token>\ninput_num = input('Write number:')\nlis1 = []\nlis2 = []\n\n\ndef lis(n1, n2):\n \"\"\"\n\tGenerate and print last 5 element in list.\n\tparam:n1,n2\n\t\"\"\"\n i = 0\n if n1 and n2 <= 20:\n for x in range(n1, n2 + 1):\n lis1.append(x * x)\n lis1.reverse()\n for y in lis1:\n if i <= 4:\n lis2.append(y)\n i += 1\n print(lis2)\n else:\n print('Value out of range')\n\n\nlis(input_num[0], input_num[1])\n",
"step-5": "\"\"\"\nQuestion 39:\nDefine a function which can generate a list where the values are square of numbers between 1 and\n20 (both included). Then the function needs to print the last 5 elements in the list.\n\"\"\"\n\n#To get a value from console input.\ninput_num = input(\"Write number:\")\nlis1=[]\nlis2=[]\n\ndef lis(n1,n2):\n\t\"\"\"\n\tGenerate and print last 5 element in list.\n\tparam:n1,n2\n\t\"\"\"\n\ti = 0\n\tif n1 and n2 <= 20:\n\t\tfor x in range(n1,n2+1):\n\t\t\tlis1.append(x*x)\n\t\tlis1.reverse()\n\t\t\n\t\tfor y in lis1:\n\t\t\tif i <=4:\n\t\t\t\tlis2.append(y)\n\t\t\t\ti +=1\n\t\tprint(lis2)\n\telse:\n\t\tprint(\"Value out of range\")\n\n# Calling function.\nlis(input_num[0],input_num[1])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
import pyglet
from pyglet.gl import *
win = pyglet.window.Window()
@win.event
def on_draw():
# Clear buffers
glClear(GL_COLOR_BUFFER_BIT)
# Draw outlines only
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# Draw some stuff
glBegin(GL_TRIANGLES)
glVertex3i(0, 0, 0)
glVertex3i(300, 0, 0)
glVertex3i(0, 300, 0)
glEnd()
pyglet.app.run()
|
normal
|
{
"blob_id": "86c4193ec0fee8a0c06858913ec8153fcf0df6d9",
"index": 4114,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@win.event\ndef on_draw():\n glClear(GL_COLOR_BUFFER_BIT)\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n glBegin(GL_TRIANGLES)\n glVertex3i(0, 0, 0)\n glVertex3i(300, 0, 0)\n glVertex3i(0, 300, 0)\n glEnd()\n\n\npyglet.app.run()\n",
"step-3": "<mask token>\nwin = pyglet.window.Window()\n\n\n@win.event\ndef on_draw():\n glClear(GL_COLOR_BUFFER_BIT)\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n glBegin(GL_TRIANGLES)\n glVertex3i(0, 0, 0)\n glVertex3i(300, 0, 0)\n glVertex3i(0, 300, 0)\n glEnd()\n\n\npyglet.app.run()\n",
"step-4": "import pyglet\nfrom pyglet.gl import *\nwin = pyglet.window.Window()\n\n\n@win.event\ndef on_draw():\n glClear(GL_COLOR_BUFFER_BIT)\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n glBegin(GL_TRIANGLES)\n glVertex3i(0, 0, 0)\n glVertex3i(300, 0, 0)\n glVertex3i(0, 300, 0)\n glEnd()\n\n\npyglet.app.run()\n",
"step-5": "#!/usr/bin/python\n\nimport pyglet\nfrom pyglet.gl import *\n\nwin = pyglet.window.Window()\n\n@win.event\ndef on_draw():\n\n\t# Clear buffers\n\tglClear(GL_COLOR_BUFFER_BIT)\n\n\t# Draw outlines only\n\tglPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n\n\t# Draw some stuff\n\tglBegin(GL_TRIANGLES)\n\tglVertex3i(0, 0, 0)\n\tglVertex3i(300, 0, 0)\n\tglVertex3i(0, 300, 0)\n\tglEnd()\n\npyglet.app.run()\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def is_prime(x):
if x < 2:
return False
for i in range(2, int(sqrt(x)) + 1):
if x % i == 0:
return False
return True
def primes(x):
return islice((p for p in count() if is_prime(p)), x)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def is_prime(x):
if x < 2:
return False
for i in range(2, int(sqrt(x)) + 1):
if x % i == 0:
return False
return True
def primes(x):
return islice((p for p in count() if is_prime(p)), x)
print(list(primes(1000))[-10:])
print(sum(primes(1000)))
print(any([True, True]))
print(any([True, False]))
print(any([False, False]))
print(all([True, True]))
print(all([True, False]))
print(all([False, False]))
print('Is there a prime between 1328 and 1361:', any(is_prime(x) for x in
range(1328, 1361)))
<|reserved_special_token_0|>
print(monday, tuesday)
for item in zip(monday, tuesday):
print(item, type(item))
for d1, d2 in zip(monday, tuesday):
print(f'Hourly average is {(d1 + d2) / 2}°C')
<|reserved_special_token_0|>
for temps in zip(monday, tuesday, wednesday):
print(
f'min={min(temps):4.1f}\t max={max(temps):4.1f}\t avg={sum(temps) / len(temps):4.1f}'
)
<|reserved_special_token_0|>
print(monday, tuesday, wednesday)
print(list(temperatures))
<|reserved_special_token_0|>
for x in (p for p in lucas() if is_prime(p)):
print(x, 'time:', tc() - start)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def is_prime(x):
if x < 2:
return False
for i in range(2, int(sqrt(x)) + 1):
if x % i == 0:
return False
return True
def primes(x):
return islice((p for p in count() if is_prime(p)), x)
print(list(primes(1000))[-10:])
print(sum(primes(1000)))
print(any([True, True]))
print(any([True, False]))
print(any([False, False]))
print(all([True, True]))
print(all([True, False]))
print(all([False, False]))
print('Is there a prime between 1328 and 1361:', any(is_prime(x) for x in
range(1328, 1361)))
monday = [11, 12, 13, 14, 15, 16, 17, 17, 17, 16, 16, 15, 14, 13, 12, 11, 11]
tuesday = [(x * 2 - 10) for x in monday]
print(monday, tuesday)
for item in zip(monday, tuesday):
print(item, type(item))
for d1, d2 in zip(monday, tuesday):
print(f'Hourly average is {(d1 + d2) / 2}°C')
wednesday = [(x * 2 - 20) for x in tuesday]
for temps in zip(monday, tuesday, wednesday):
print(
f'min={min(temps):4.1f}\t max={max(temps):4.1f}\t avg={sum(temps) / len(temps):4.1f}'
)
<|reserved_special_token_0|>
temperatures = chain(monday, tuesday, wednesday)
print(monday, tuesday, wednesday)
print(list(temperatures))
<|reserved_special_token_0|>
start = tc()
for x in (p for p in lucas() if is_prime(p)):
print(x, 'time:', tc() - start)
<|reserved_special_token_1|>
from itertools import count, islice
from math import sqrt
def is_prime(x):
if x < 2:
return False
for i in range(2, int(sqrt(x)) + 1):
if x % i == 0:
return False
return True
def primes(x):
return islice((p for p in count() if is_prime(p)), x)
print(list(primes(1000))[-10:])
print(sum(primes(1000)))
print(any([True, True]))
print(any([True, False]))
print(any([False, False]))
print(all([True, True]))
print(all([True, False]))
print(all([False, False]))
print('Is there a prime between 1328 and 1361:', any(is_prime(x) for x in
range(1328, 1361)))
monday = [11, 12, 13, 14, 15, 16, 17, 17, 17, 16, 16, 15, 14, 13, 12, 11, 11]
tuesday = [(x * 2 - 10) for x in monday]
print(monday, tuesday)
for item in zip(monday, tuesday):
print(item, type(item))
for d1, d2 in zip(monday, tuesday):
print(f'Hourly average is {(d1 + d2) / 2}°C')
wednesday = [(x * 2 - 20) for x in tuesday]
for temps in zip(monday, tuesday, wednesday):
print(
f'min={min(temps):4.1f}\t max={max(temps):4.1f}\t avg={sum(temps) / len(temps):4.1f}'
)
from itertools import chain
temperatures = chain(monday, tuesday, wednesday)
print(monday, tuesday, wednesday)
print(list(temperatures))
from md_lucas import lucas
from time import perf_counter as tc
start = tc()
for x in (p for p in lucas() if is_prime(p)):
print(x, 'time:', tc() - start)
<|reserved_special_token_1|>
from itertools import count, islice
from math import sqrt
def is_prime(x):
if x<2:
return False
for i in range(2, int(sqrt(x)) + 1):
if x%i == 0:
return False
return True
def primes(x):
return islice((p for p in count() if is_prime(p)), x)
print(list(primes(1000))[-10:])
print(sum(primes(1000)))
print(any([True, True]))
print(any([True, False]))
print(any([False, False])) # is there a TRUE
print(all([True, True])) # are all of them TRUE
print(all([True, False]))
print(all([False, False]))
print("Is there a prime between 1328 and 1361:", any(is_prime(x) for x in range(1328, 1361)))
monday = [11, 12, 13, 14, 15, 16, 17, 17, 17, 16, 16, 15, 14, 13, 12, 11, 11]
tuesday = [x*2-10 for x in monday]
print(monday, tuesday)
for item in zip(monday, tuesday):
print(item, type(item))
for d1, d2 in zip(monday, tuesday):
print(f"Hourly average is {(d1 + d2)/2}°C")
wednesday = [x*2-20 for x in tuesday]
for temps in zip(monday, tuesday, wednesday):
print(f"min={min(temps):4.1f}\t max={max(temps):4.1f}\t avg={sum(temps)/len(temps):4.1f}")
from itertools import chain
temperatures = chain(monday, tuesday, wednesday)
print(monday, tuesday, wednesday) # concatenation
print(list(temperatures)) # lazy concatenation
from md_lucas import lucas
from time import perf_counter as tc
start = tc()
for x in (p for p in lucas() if is_prime(p)):
print(x, "time:", tc()-start)
|
flexible
|
{
"blob_id": "0f1bad350faaff6aab339944b4d24c4801fa8c64",
"index": 4965,
"step-1": "<mask token>\n\n\ndef is_prime(x):\n if x < 2:\n return False\n for i in range(2, int(sqrt(x)) + 1):\n if x % i == 0:\n return False\n return True\n\n\ndef primes(x):\n return islice((p for p in count() if is_prime(p)), x)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_prime(x):\n if x < 2:\n return False\n for i in range(2, int(sqrt(x)) + 1):\n if x % i == 0:\n return False\n return True\n\n\ndef primes(x):\n return islice((p for p in count() if is_prime(p)), x)\n\n\nprint(list(primes(1000))[-10:])\nprint(sum(primes(1000)))\nprint(any([True, True]))\nprint(any([True, False]))\nprint(any([False, False]))\nprint(all([True, True]))\nprint(all([True, False]))\nprint(all([False, False]))\nprint('Is there a prime between 1328 and 1361:', any(is_prime(x) for x in\n range(1328, 1361)))\n<mask token>\nprint(monday, tuesday)\nfor item in zip(monday, tuesday):\n print(item, type(item))\nfor d1, d2 in zip(monday, tuesday):\n print(f'Hourly average is {(d1 + d2) / 2}°C')\n<mask token>\nfor temps in zip(monday, tuesday, wednesday):\n print(\n f'min={min(temps):4.1f}\\t max={max(temps):4.1f}\\t avg={sum(temps) / len(temps):4.1f}'\n )\n<mask token>\nprint(monday, tuesday, wednesday)\nprint(list(temperatures))\n<mask token>\nfor x in (p for p in lucas() if is_prime(p)):\n print(x, 'time:', tc() - start)\n",
"step-3": "<mask token>\n\n\ndef is_prime(x):\n if x < 2:\n return False\n for i in range(2, int(sqrt(x)) + 1):\n if x % i == 0:\n return False\n return True\n\n\ndef primes(x):\n return islice((p for p in count() if is_prime(p)), x)\n\n\nprint(list(primes(1000))[-10:])\nprint(sum(primes(1000)))\nprint(any([True, True]))\nprint(any([True, False]))\nprint(any([False, False]))\nprint(all([True, True]))\nprint(all([True, False]))\nprint(all([False, False]))\nprint('Is there a prime between 1328 and 1361:', any(is_prime(x) for x in\n range(1328, 1361)))\nmonday = [11, 12, 13, 14, 15, 16, 17, 17, 17, 16, 16, 15, 14, 13, 12, 11, 11]\ntuesday = [(x * 2 - 10) for x in monday]\nprint(monday, tuesday)\nfor item in zip(monday, tuesday):\n print(item, type(item))\nfor d1, d2 in zip(monday, tuesday):\n print(f'Hourly average is {(d1 + d2) / 2}°C')\nwednesday = [(x * 2 - 20) for x in tuesday]\nfor temps in zip(monday, tuesday, wednesday):\n print(\n f'min={min(temps):4.1f}\\t max={max(temps):4.1f}\\t avg={sum(temps) / len(temps):4.1f}'\n )\n<mask token>\ntemperatures = chain(monday, tuesday, wednesday)\nprint(monday, tuesday, wednesday)\nprint(list(temperatures))\n<mask token>\nstart = tc()\nfor x in (p for p in lucas() if is_prime(p)):\n print(x, 'time:', tc() - start)\n",
"step-4": "from itertools import count, islice\nfrom math import sqrt\n\n\ndef is_prime(x):\n if x < 2:\n return False\n for i in range(2, int(sqrt(x)) + 1):\n if x % i == 0:\n return False\n return True\n\n\ndef primes(x):\n return islice((p for p in count() if is_prime(p)), x)\n\n\nprint(list(primes(1000))[-10:])\nprint(sum(primes(1000)))\nprint(any([True, True]))\nprint(any([True, False]))\nprint(any([False, False]))\nprint(all([True, True]))\nprint(all([True, False]))\nprint(all([False, False]))\nprint('Is there a prime between 1328 and 1361:', any(is_prime(x) for x in\n range(1328, 1361)))\nmonday = [11, 12, 13, 14, 15, 16, 17, 17, 17, 16, 16, 15, 14, 13, 12, 11, 11]\ntuesday = [(x * 2 - 10) for x in monday]\nprint(monday, tuesday)\nfor item in zip(monday, tuesday):\n print(item, type(item))\nfor d1, d2 in zip(monday, tuesday):\n print(f'Hourly average is {(d1 + d2) / 2}°C')\nwednesday = [(x * 2 - 20) for x in tuesday]\nfor temps in zip(monday, tuesday, wednesday):\n print(\n f'min={min(temps):4.1f}\\t max={max(temps):4.1f}\\t avg={sum(temps) / len(temps):4.1f}'\n )\nfrom itertools import chain\ntemperatures = chain(monday, tuesday, wednesday)\nprint(monday, tuesday, wednesday)\nprint(list(temperatures))\nfrom md_lucas import lucas\nfrom time import perf_counter as tc\nstart = tc()\nfor x in (p for p in lucas() if is_prime(p)):\n print(x, 'time:', tc() - start)\n",
"step-5": "from itertools import count, islice\nfrom math import sqrt\n\ndef is_prime(x):\n if x<2:\n return False\n for i in range(2, int(sqrt(x)) + 1):\n if x%i == 0:\n return False\n return True\n\ndef primes(x):\n return islice((p for p in count() if is_prime(p)), x)\n\nprint(list(primes(1000))[-10:])\n\nprint(sum(primes(1000)))\n\nprint(any([True, True]))\nprint(any([True, False]))\nprint(any([False, False])) # is there a TRUE\nprint(all([True, True])) # are all of them TRUE\nprint(all([True, False]))\nprint(all([False, False]))\n\nprint(\"Is there a prime between 1328 and 1361:\", any(is_prime(x) for x in range(1328, 1361)))\n\nmonday = [11, 12, 13, 14, 15, 16, 17, 17, 17, 16, 16, 15, 14, 13, 12, 11, 11]\ntuesday = [x*2-10 for x in monday]\nprint(monday, tuesday)\nfor item in zip(monday, tuesday):\n print(item, type(item))\n\nfor d1, d2 in zip(monday, tuesday):\n print(f\"Hourly average is {(d1 + d2)/2}°C\")\n\nwednesday = [x*2-20 for x in tuesday]\n\nfor temps in zip(monday, tuesday, wednesday):\n print(f\"min={min(temps):4.1f}\\t max={max(temps):4.1f}\\t avg={sum(temps)/len(temps):4.1f}\")\n\nfrom itertools import chain\ntemperatures = chain(monday, tuesday, wednesday)\nprint(monday, tuesday, wednesday) # concatenation\nprint(list(temperatures)) # lazy concatenation\n\nfrom md_lucas import lucas\nfrom time import perf_counter as tc\nstart = tc()\nfor x in (p for p in lucas() if is_prime(p)):\n print(x, \"time:\", tc()-start)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os.path
from flask import url_for
from sqlalchemy import Column, Integer, String, Sequence, ForeignKey
from sqlalchemy.orm import relationship
from tuneful import app
from .database import Base, engine, session
class Song(Base):
__tablename__ = 'songs'
id = Column(Integer, primary_key=True)
file_id = Column(Integer, ForeignKey('files.id'), nullable=False)
def as_dictionary(self):
file_data = session.query(File).get(self.file_id)
song_dict = {
"id": self.id,
"file": {
"id": file_data.id,
"filename": file_data.filename
}
}
return song_dict
class File(Base):
__tablename__ = 'files'
id = Column(Integer, primary_key=True)
filename = Column(String, nullable=False)
song = relationship("Song", uselist=False, backref="song")
def as_dictionary(self):
file_dict = {
"id": self.id,
"filename": self.filename
}
return file_dict
|
normal
|
{
"blob_id": "d5c2b73c202c9944cd64798ef5ddc08ce68a4a9a",
"index": 3446,
"step-1": "<mask token>\n\n\nclass Song(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass File(Base):\n __tablename__ = 'files'\n id = Column(Integer, primary_key=True)\n filename = Column(String, nullable=False)\n song = relationship('Song', uselist=False, backref='song')\n\n def as_dictionary(self):\n file_dict = {'id': self.id, 'filename': self.filename}\n return file_dict\n",
"step-2": "<mask token>\n\n\nclass Song(Base):\n <mask token>\n <mask token>\n <mask token>\n\n def as_dictionary(self):\n file_data = session.query(File).get(self.file_id)\n song_dict = {'id': self.id, 'file': {'id': file_data.id, 'filename':\n file_data.filename}}\n return song_dict\n\n\nclass File(Base):\n __tablename__ = 'files'\n id = Column(Integer, primary_key=True)\n filename = Column(String, nullable=False)\n song = relationship('Song', uselist=False, backref='song')\n\n def as_dictionary(self):\n file_dict = {'id': self.id, 'filename': self.filename}\n return file_dict\n",
"step-3": "<mask token>\n\n\nclass Song(Base):\n __tablename__ = 'songs'\n id = Column(Integer, primary_key=True)\n file_id = Column(Integer, ForeignKey('files.id'), nullable=False)\n\n def as_dictionary(self):\n file_data = session.query(File).get(self.file_id)\n song_dict = {'id': self.id, 'file': {'id': file_data.id, 'filename':\n file_data.filename}}\n return song_dict\n\n\nclass File(Base):\n __tablename__ = 'files'\n id = Column(Integer, primary_key=True)\n filename = Column(String, nullable=False)\n song = relationship('Song', uselist=False, backref='song')\n\n def as_dictionary(self):\n file_dict = {'id': self.id, 'filename': self.filename}\n return file_dict\n",
"step-4": "import os.path\nfrom flask import url_for\nfrom sqlalchemy import Column, Integer, String, Sequence, ForeignKey\nfrom sqlalchemy.orm import relationship\nfrom tuneful import app\nfrom .database import Base, engine, session\n\n\nclass Song(Base):\n __tablename__ = 'songs'\n id = Column(Integer, primary_key=True)\n file_id = Column(Integer, ForeignKey('files.id'), nullable=False)\n\n def as_dictionary(self):\n file_data = session.query(File).get(self.file_id)\n song_dict = {'id': self.id, 'file': {'id': file_data.id, 'filename':\n file_data.filename}}\n return song_dict\n\n\nclass File(Base):\n __tablename__ = 'files'\n id = Column(Integer, primary_key=True)\n filename = Column(String, nullable=False)\n song = relationship('Song', uselist=False, backref='song')\n\n def as_dictionary(self):\n file_dict = {'id': self.id, 'filename': self.filename}\n return file_dict\n",
"step-5": "import os.path\n\nfrom flask import url_for\nfrom sqlalchemy import Column, Integer, String, Sequence, ForeignKey\nfrom sqlalchemy.orm import relationship\n\nfrom tuneful import app\nfrom .database import Base, engine, session\n\nclass Song(Base):\n __tablename__ = 'songs'\n id = Column(Integer, primary_key=True)\n file_id = Column(Integer, ForeignKey('files.id'), nullable=False)\n \n def as_dictionary(self):\n file_data = session.query(File).get(self.file_id)\n song_dict = {\n \"id\": self.id,\n \"file\": {\n \"id\": file_data.id,\n \"filename\": file_data.filename\n }\n }\n return song_dict\n\nclass File(Base):\n __tablename__ = 'files'\n id = Column(Integer, primary_key=True)\n filename = Column(String, nullable=False)\n song = relationship(\"Song\", uselist=False, backref=\"song\")\n\n def as_dictionary(self):\n file_dict = {\n \"id\": self.id,\n \"filename\": self.filename\n }\n return file_dict",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#####
# Created on Oct 15 13:13:11 2019
#
# @author: inesverissimo
#
# Do pRF fit on median run, make iterative fit and save outputs
####
import os
# issue with tensorflow, try this suggestion
#NUM_PARALLEL_EXEC_UNITS = 16
#os.environ['OMP_NUM_THREADS'] = str(NUM_PARALLEL_EXEC_UNITS)
#os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
##
import json
import sys
import glob
import re
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import scipy as sp
import scipy.stats as stats
import nibabel as nb
from nilearn.image import mean_img
from nilearn import surface
from utils import * # import script to use relevante functions
# requires pfpy be installed - preferably with python setup.py develop
from prfpy.rf import *
from prfpy.timecourse import *
from prfpy.stimulus import PRFStimulus2D
from prfpy.grid import Iso2DGaussianGridder
from prfpy.fit import Iso2DGaussianFitter
from popeye import utilities
# define participant number and open json parameter file
if len(sys.argv) < 2:
raise NameError('Please add subject number (ex:1) '
'as 1st argument in the command line!')
elif len(sys.argv) < 3:
raise NameError('Please select server being used (ex: aeneas or cartesius) '
'as 2nd argument in the command line!')
else:
# fill subject number with 0 in case user forgets
sj = str(sys.argv[1]).zfill(2)
json_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(
sys.argv[2]) == 'cartesius' else 'analysis_params.json'
with open(json_dir, 'r') as json_file:
analysis_params = json.load(json_file)
# use smoothed data?
with_smooth = analysis_params['with_smooth']
# define paths and list of files
if str(sys.argv[2]) == 'cartesius':
filepath = glob.glob(os.path.join(
analysis_params['post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],'shift_crop')
elif str(sys.argv[2]) == 'aeneas':
print(os.path.join(
analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
filepath = glob.glob(os.path.join(
analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir'],'shift_crop')
# changes depending on data used
if with_smooth == 'True':
# last part of filename to use
file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params['smooth_fwhm']
# compute median run, per hemifield
median_path = os.path.join(
out_dir, 'sub-{sj}'.format(sj=sj), 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],'iterative_fit')
else:
# last part of filename to use
file_extension = 'cropped_sg_psc.func.gii'
# compute median run, per hemifield
median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj), 'run-median','iterative_fit')
# list of functional files
filename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and run.endswith(
file_extension)]
filename.sort()
if not os.path.exists(median_path): # check if path to save median run exist
os.makedirs(median_path)
med_gii = []
for field in ['hemi-L', 'hemi-R']:
hemi = [h for h in filename if field in h]
# set name for median run (now numpy array)
med_file = os.path.join(median_path, re.sub(
'run-\d{2}_', 'run-median_', os.path.split(hemi[0])[-1]))
# if file doesn't exist
if not os.path.exists(med_file):
med_gii.append(median_gii(hemi, median_path)) # create it
print('computed %s' % (med_gii))
else:
med_gii.append(med_file)
print('median file %s already exists, skipping' % (med_gii))
# create/load design matrix
png_path = '/home/inesv/SB-ref/scripts/imgs/' if str(
sys.argv[2]) == 'cartesius' else analysis_params['imgs_dir']
png_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]
png_filename.sort()
dm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')
#if not os.path.exists(dm_filename): # if not exists
screenshot2DM(png_filename, 0.1,
analysis_params['screenRes'], dm_filename,dm_shape = 'square') # create it
print('computed %s' % (dm_filename))
#else:
# print('loading %s' % dm_filename)
prf_dm = np.load(dm_filename)
prf_dm = prf_dm.T # then it'll be (x, y, t)
# change DM to see if fit is better like that
# do new one which is average of every 2 TRs
prf_dm = shift_DM(prf_dm)
prf_dm = prf_dm[:,:,analysis_params['crop_pRF_TR']:] # crop DM because functional data also cropped now
# define model params
fit_model = analysis_params["fit_model"]
TR = analysis_params["TR"]
hrf = utilities.spm_hrf(0,TR)
# make stimulus object, which takes an input design matrix and sets up its real-world dimensions
prf_stim = PRFStimulus2D(screen_size_cm=analysis_params["screen_width"],
screen_distance_cm=analysis_params["screen_distance"],
design_matrix=prf_dm,
TR=TR)
# sets up stimulus and hrf for this gridder
gg = Iso2DGaussianGridder(stimulus=prf_stim,
hrf=hrf,
filter_predictions=False,
window_length=analysis_params["sg_filt_window_length"],
polyorder=analysis_params["sg_filt_polyorder"],
highpass=False,
add_mean=False)
# set grid parameters
grid_nr = analysis_params["grid_steps"]
sizes = analysis_params["max_size"] * np.linspace(np.sqrt(analysis_params["min_size"]/analysis_params["max_size"]),1,grid_nr)**2
eccs = analysis_params["max_eccen"] * np.linspace(np.sqrt(analysis_params["min_eccen"]/analysis_params["max_eccen"]),1,grid_nr)**2
polars = np.linspace(0, 2*np.pi, grid_nr)
for gii_file in med_gii:
print('loading data from %s' % gii_file)
data = np.array(surface.load_surf_data(gii_file))
print('data array with shape %s'%str(data.shape))
gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)
#filename for the numpy array with the estimates of the grid fit
grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')
if not os.path.isfile(grid_estimates_filename): # if estimates file doesn't exist
print('%s not found, fitting grid'%grid_estimates_filename)
# do grid fit and save estimates
gf.grid_fit(ecc_grid=eccs,
polar_grid=polars,
size_grid=sizes)
np.savez(grid_estimates_filename,
x = gf.gridsearch_params[..., 0],
y = gf.gridsearch_params[..., 1],
size = gf.gridsearch_params[..., 2],
betas = gf.gridsearch_params[...,3],
baseline = gf.gridsearch_params[..., 4],
ns = gf.gridsearch_params[..., 5],
r2 = gf.gridsearch_params[..., 6])
loaded_gf_pars = np.load(grid_estimates_filename)
gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x', 'y', 'size', 'betas', 'baseline','ns','r2']])
gf.gridsearch_params = np.transpose(gf.gridsearch_params)
# do iterative fit
iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')
if not os.path.isfile(iterative_out): # if estimates file doesn't exist
print('doing iterative fit')
gf.iterative_fit(rsq_threshold=0.1, verbose=False)
np.savez(iterative_out,
it_output=gf.iterative_search_params)
else:
print('%s already exists'%iterative_out)
## do iterative fit again, now with css, n=1 (isn't that just gaussian?)
#print('doing iterative fit with css ')
#gf.fit_css = True
#gf.iterative_fit(rsq_threshold=0.1, verbose=False)
#iterative_css_out = gii_file.replace('.func.gii', '_iterative_css_output.npz')
#np.savez(iterative_css_out,
# it_output=gf.iterative_search_params)
|
normal
|
{
"blob_id": "d9156e240d49e0a6570a5bc2315f95a7a670fd4f",
"index": 6327,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) < 2:\n raise NameError(\n 'Please add subject number (ex:1) as 1st argument in the command line!'\n )\nelif len(sys.argv) < 3:\n raise NameError(\n 'Please select server being used (ex: aeneas or cartesius) as 2nd argument in the command line!'\n )\nelse:\n sj = str(sys.argv[1]).zfill(2)\n<mask token>\nwith open(json_dir, 'r') as json_file:\n analysis_params = json.load(json_file)\n<mask token>\nif str(sys.argv[2]) == 'cartesius':\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj),\n '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],\n 'shift_crop')\nelif str(sys.argv[2]) == 'aeneas':\n print(os.path.join(analysis_params['post_fmriprep_outdir'], 'prf',\n 'sub-{sj}'.format(sj=sj), '*'))\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir'], 'shift_crop')\nif with_smooth == 'True':\n file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params[\n 'smooth_fwhm']\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],\n 'iterative_fit')\nelse:\n file_extension = 'cropped_sg_psc.func.gii'\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'iterative_fit')\n<mask token>\nfilename.sort()\nif not os.path.exists(median_path):\n os.makedirs(median_path)\n<mask token>\nfor field in ['hemi-L', 'hemi-R']:\n hemi = [h for h in filename if field in h]\n med_file = os.path.join(median_path, re.sub('run-\\\\d{2}_',\n 'run-median_', os.path.split(hemi[0])[-1]))\n if not os.path.exists(med_file):\n med_gii.append(median_gii(hemi, median_path))\n print('computed %s' % med_gii)\n else:\n med_gii.append(med_file)\n print('median file %s already exists, skipping' % med_gii)\n<mask token>\npng_filename.sort()\n<mask token>\nscreenshot2DM(png_filename, 0.1, analysis_params['screenRes'], dm_filename,\n dm_shape='square')\nprint('computed %s' % dm_filename)\n<mask token>\nfor gii_file in med_gii:\n print('loading data from %s' % gii_file)\n data = np.array(surface.load_surf_data(gii_file))\n print('data array with shape %s' % str(data.shape))\n gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)\n grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')\n if not os.path.isfile(grid_estimates_filename):\n print('%s not found, fitting grid' % grid_estimates_filename)\n gf.grid_fit(ecc_grid=eccs, polar_grid=polars, size_grid=sizes)\n np.savez(grid_estimates_filename, x=gf.gridsearch_params[..., 0], y\n =gf.gridsearch_params[..., 1], size=gf.gridsearch_params[..., 2\n ], betas=gf.gridsearch_params[..., 3], baseline=gf.\n gridsearch_params[..., 4], ns=gf.gridsearch_params[..., 5], r2=\n gf.gridsearch_params[..., 6])\n loaded_gf_pars = np.load(grid_estimates_filename)\n gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x',\n 'y', 'size', 'betas', 'baseline', 'ns', 'r2']])\n gf.gridsearch_params = np.transpose(gf.gridsearch_params)\n iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')\n if not os.path.isfile(iterative_out):\n print('doing iterative fit')\n gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n np.savez(iterative_out, it_output=gf.iterative_search_params)\n else:\n print('%s already exists' % iterative_out)\n",
"step-3": "<mask token>\nif len(sys.argv) < 2:\n raise NameError(\n 'Please add subject number (ex:1) as 1st argument in the command line!'\n )\nelif len(sys.argv) < 3:\n raise NameError(\n 'Please select server being used (ex: aeneas or cartesius) as 2nd argument in the command line!'\n )\nelse:\n sj = str(sys.argv[1]).zfill(2)\njson_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(sys.argv[2]\n ) == 'cartesius' else 'analysis_params.json'\nwith open(json_dir, 'r') as json_file:\n analysis_params = json.load(json_file)\nwith_smooth = analysis_params['with_smooth']\nif str(sys.argv[2]) == 'cartesius':\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj),\n '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],\n 'shift_crop')\nelif str(sys.argv[2]) == 'aeneas':\n print(os.path.join(analysis_params['post_fmriprep_outdir'], 'prf',\n 'sub-{sj}'.format(sj=sj), '*'))\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir'], 'shift_crop')\nif with_smooth == 'True':\n file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params[\n 'smooth_fwhm']\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],\n 'iterative_fit')\nelse:\n file_extension = 'cropped_sg_psc.func.gii'\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'iterative_fit')\nfilename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and\n run.endswith(file_extension)]\nfilename.sort()\nif not os.path.exists(median_path):\n os.makedirs(median_path)\nmed_gii = []\nfor field in ['hemi-L', 'hemi-R']:\n hemi = [h for h in filename if field in h]\n med_file = os.path.join(median_path, re.sub('run-\\\\d{2}_',\n 'run-median_', os.path.split(hemi[0])[-1]))\n if not os.path.exists(med_file):\n med_gii.append(median_gii(hemi, median_path))\n print('computed %s' % med_gii)\n else:\n med_gii.append(med_file)\n print('median file %s already exists, skipping' % med_gii)\npng_path = '/home/inesv/SB-ref/scripts/imgs/' if str(sys.argv[2]\n ) == 'cartesius' else analysis_params['imgs_dir']\npng_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]\npng_filename.sort()\ndm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')\nscreenshot2DM(png_filename, 0.1, analysis_params['screenRes'], dm_filename,\n dm_shape='square')\nprint('computed %s' % dm_filename)\nprf_dm = np.load(dm_filename)\nprf_dm = prf_dm.T\nprf_dm = shift_DM(prf_dm)\nprf_dm = prf_dm[:, :, analysis_params['crop_pRF_TR']:]\nfit_model = analysis_params['fit_model']\nTR = analysis_params['TR']\nhrf = utilities.spm_hrf(0, TR)\nprf_stim = PRFStimulus2D(screen_size_cm=analysis_params['screen_width'],\n screen_distance_cm=analysis_params['screen_distance'], design_matrix=\n prf_dm, TR=TR)\ngg = Iso2DGaussianGridder(stimulus=prf_stim, hrf=hrf, filter_predictions=\n False, window_length=analysis_params['sg_filt_window_length'],\n polyorder=analysis_params['sg_filt_polyorder'], highpass=False,\n add_mean=False)\ngrid_nr = analysis_params['grid_steps']\nsizes = analysis_params['max_size'] * np.linspace(np.sqrt(analysis_params[\n 'min_size'] / analysis_params['max_size']), 1, grid_nr) ** 2\neccs = analysis_params['max_eccen'] * np.linspace(np.sqrt(analysis_params[\n 'min_eccen'] / analysis_params['max_eccen']), 1, grid_nr) ** 2\npolars = np.linspace(0, 2 * np.pi, grid_nr)\nfor gii_file in med_gii:\n print('loading data from %s' % gii_file)\n data = np.array(surface.load_surf_data(gii_file))\n print('data array with shape %s' % str(data.shape))\n gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)\n grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')\n if not os.path.isfile(grid_estimates_filename):\n print('%s not found, fitting grid' % grid_estimates_filename)\n gf.grid_fit(ecc_grid=eccs, polar_grid=polars, size_grid=sizes)\n np.savez(grid_estimates_filename, x=gf.gridsearch_params[..., 0], y\n =gf.gridsearch_params[..., 1], size=gf.gridsearch_params[..., 2\n ], betas=gf.gridsearch_params[..., 3], baseline=gf.\n gridsearch_params[..., 4], ns=gf.gridsearch_params[..., 5], r2=\n gf.gridsearch_params[..., 6])\n loaded_gf_pars = np.load(grid_estimates_filename)\n gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x',\n 'y', 'size', 'betas', 'baseline', 'ns', 'r2']])\n gf.gridsearch_params = np.transpose(gf.gridsearch_params)\n iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')\n if not os.path.isfile(iterative_out):\n print('doing iterative fit')\n gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n np.savez(iterative_out, it_output=gf.iterative_search_params)\n else:\n print('%s already exists' % iterative_out)\n",
"step-4": "import os\nimport json\nimport sys\nimport glob\nimport re\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport numpy as np\nimport scipy as sp\nimport scipy.stats as stats\nimport nibabel as nb\nfrom nilearn.image import mean_img\nfrom nilearn import surface\nfrom utils import *\nfrom prfpy.rf import *\nfrom prfpy.timecourse import *\nfrom prfpy.stimulus import PRFStimulus2D\nfrom prfpy.grid import Iso2DGaussianGridder\nfrom prfpy.fit import Iso2DGaussianFitter\nfrom popeye import utilities\nif len(sys.argv) < 2:\n raise NameError(\n 'Please add subject number (ex:1) as 1st argument in the command line!'\n )\nelif len(sys.argv) < 3:\n raise NameError(\n 'Please select server being used (ex: aeneas or cartesius) as 2nd argument in the command line!'\n )\nelse:\n sj = str(sys.argv[1]).zfill(2)\njson_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(sys.argv[2]\n ) == 'cartesius' else 'analysis_params.json'\nwith open(json_dir, 'r') as json_file:\n analysis_params = json.load(json_file)\nwith_smooth = analysis_params['with_smooth']\nif str(sys.argv[2]) == 'cartesius':\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj),\n '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],\n 'shift_crop')\nelif str(sys.argv[2]) == 'aeneas':\n print(os.path.join(analysis_params['post_fmriprep_outdir'], 'prf',\n 'sub-{sj}'.format(sj=sj), '*'))\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir'], 'shift_crop')\nif with_smooth == 'True':\n file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params[\n 'smooth_fwhm']\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],\n 'iterative_fit')\nelse:\n file_extension = 'cropped_sg_psc.func.gii'\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'iterative_fit')\nfilename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and\n run.endswith(file_extension)]\nfilename.sort()\nif not os.path.exists(median_path):\n os.makedirs(median_path)\nmed_gii = []\nfor field in ['hemi-L', 'hemi-R']:\n hemi = [h for h in filename if field in h]\n med_file = os.path.join(median_path, re.sub('run-\\\\d{2}_',\n 'run-median_', os.path.split(hemi[0])[-1]))\n if not os.path.exists(med_file):\n med_gii.append(median_gii(hemi, median_path))\n print('computed %s' % med_gii)\n else:\n med_gii.append(med_file)\n print('median file %s already exists, skipping' % med_gii)\npng_path = '/home/inesv/SB-ref/scripts/imgs/' if str(sys.argv[2]\n ) == 'cartesius' else analysis_params['imgs_dir']\npng_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]\npng_filename.sort()\ndm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')\nscreenshot2DM(png_filename, 0.1, analysis_params['screenRes'], dm_filename,\n dm_shape='square')\nprint('computed %s' % dm_filename)\nprf_dm = np.load(dm_filename)\nprf_dm = prf_dm.T\nprf_dm = shift_DM(prf_dm)\nprf_dm = prf_dm[:, :, analysis_params['crop_pRF_TR']:]\nfit_model = analysis_params['fit_model']\nTR = analysis_params['TR']\nhrf = utilities.spm_hrf(0, TR)\nprf_stim = PRFStimulus2D(screen_size_cm=analysis_params['screen_width'],\n screen_distance_cm=analysis_params['screen_distance'], design_matrix=\n prf_dm, TR=TR)\ngg = Iso2DGaussianGridder(stimulus=prf_stim, hrf=hrf, filter_predictions=\n False, window_length=analysis_params['sg_filt_window_length'],\n polyorder=analysis_params['sg_filt_polyorder'], highpass=False,\n add_mean=False)\ngrid_nr = analysis_params['grid_steps']\nsizes = analysis_params['max_size'] * np.linspace(np.sqrt(analysis_params[\n 'min_size'] / analysis_params['max_size']), 1, grid_nr) ** 2\neccs = analysis_params['max_eccen'] * np.linspace(np.sqrt(analysis_params[\n 'min_eccen'] / analysis_params['max_eccen']), 1, grid_nr) ** 2\npolars = np.linspace(0, 2 * np.pi, grid_nr)\nfor gii_file in med_gii:\n print('loading data from %s' % gii_file)\n data = np.array(surface.load_surf_data(gii_file))\n print('data array with shape %s' % str(data.shape))\n gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)\n grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')\n if not os.path.isfile(grid_estimates_filename):\n print('%s not found, fitting grid' % grid_estimates_filename)\n gf.grid_fit(ecc_grid=eccs, polar_grid=polars, size_grid=sizes)\n np.savez(grid_estimates_filename, x=gf.gridsearch_params[..., 0], y\n =gf.gridsearch_params[..., 1], size=gf.gridsearch_params[..., 2\n ], betas=gf.gridsearch_params[..., 3], baseline=gf.\n gridsearch_params[..., 4], ns=gf.gridsearch_params[..., 5], r2=\n gf.gridsearch_params[..., 6])\n loaded_gf_pars = np.load(grid_estimates_filename)\n gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x',\n 'y', 'size', 'betas', 'baseline', 'ns', 'r2']])\n gf.gridsearch_params = np.transpose(gf.gridsearch_params)\n iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')\n if not os.path.isfile(iterative_out):\n print('doing iterative fit')\n gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n np.savez(iterative_out, it_output=gf.iterative_search_params)\n else:\n print('%s already exists' % iterative_out)\n",
"step-5": "\n#####\n# Created on Oct 15 13:13:11 2019\n#\n# @author: inesverissimo\n#\n# Do pRF fit on median run, make iterative fit and save outputs\n####\n\nimport os\n\n# issue with tensorflow, try this suggestion\n#NUM_PARALLEL_EXEC_UNITS = 16\n#os.environ['OMP_NUM_THREADS'] = str(NUM_PARALLEL_EXEC_UNITS)\n#os.environ[\"KMP_AFFINITY\"] = \"granularity=fine,verbose,compact,1,0\"\n##\n\nimport json\nimport sys\nimport glob\nimport re\n\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\n\nimport numpy as np\nimport scipy as sp\nimport scipy.stats as stats\nimport nibabel as nb\nfrom nilearn.image import mean_img\n\nfrom nilearn import surface\n\nfrom utils import * # import script to use relevante functions\n\n# requires pfpy be installed - preferably with python setup.py develop\nfrom prfpy.rf import *\nfrom prfpy.timecourse import *\nfrom prfpy.stimulus import PRFStimulus2D\nfrom prfpy.grid import Iso2DGaussianGridder\nfrom prfpy.fit import Iso2DGaussianFitter\n\nfrom popeye import utilities \n\n# define participant number and open json parameter file\nif len(sys.argv) < 2:\n raise NameError('Please add subject number (ex:1) '\n 'as 1st argument in the command line!')\n\nelif len(sys.argv) < 3:\n raise NameError('Please select server being used (ex: aeneas or cartesius) '\n 'as 2nd argument in the command line!')\n\nelse:\n # fill subject number with 0 in case user forgets\n sj = str(sys.argv[1]).zfill(2)\n\n\njson_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(\n sys.argv[2]) == 'cartesius' else 'analysis_params.json'\n\nwith open(json_dir, 'r') as json_file:\n analysis_params = json.load(json_file)\n\n# use smoothed data?\nwith_smooth = analysis_params['with_smooth']\n\n\n# define paths and list of files\nif str(sys.argv[2]) == 'cartesius':\n filepath = glob.glob(os.path.join(\n analysis_params['post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],'shift_crop')\n\nelif str(sys.argv[2]) == 'aeneas':\n print(os.path.join(\n analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n filepath = glob.glob(os.path.join(\n analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir'],'shift_crop')\n\n# changes depending on data used\nif with_smooth == 'True':\n # last part of filename to use\n file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params['smooth_fwhm']\n # compute median run, per hemifield\n median_path = os.path.join(\n out_dir, 'sub-{sj}'.format(sj=sj), 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],'iterative_fit')\nelse:\n # last part of filename to use\n file_extension = 'cropped_sg_psc.func.gii'\n # compute median run, per hemifield\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj), 'run-median','iterative_fit')\n\n# list of functional files\nfilename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and run.endswith(\n file_extension)]\nfilename.sort()\nif not os.path.exists(median_path): # check if path to save median run exist\n os.makedirs(median_path)\n\n\nmed_gii = []\nfor field in ['hemi-L', 'hemi-R']:\n hemi = [h for h in filename if field in h]\n\n # set name for median run (now numpy array)\n med_file = os.path.join(median_path, re.sub(\n 'run-\\d{2}_', 'run-median_', os.path.split(hemi[0])[-1]))\n # if file doesn't exist\n if not os.path.exists(med_file):\n med_gii.append(median_gii(hemi, median_path)) # create it\n print('computed %s' % (med_gii))\n else:\n med_gii.append(med_file)\n print('median file %s already exists, skipping' % (med_gii))\n\n\n# create/load design matrix\npng_path = '/home/inesv/SB-ref/scripts/imgs/' if str(\n sys.argv[2]) == 'cartesius' else analysis_params['imgs_dir']\npng_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]\npng_filename.sort()\n\ndm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')\n\n#if not os.path.exists(dm_filename): # if not exists\nscreenshot2DM(png_filename, 0.1,\n analysis_params['screenRes'], dm_filename,dm_shape = 'square') # create it\nprint('computed %s' % (dm_filename))\n\n#else:\n# print('loading %s' % dm_filename)\n\nprf_dm = np.load(dm_filename)\nprf_dm = prf_dm.T # then it'll be (x, y, t)\n\n# change DM to see if fit is better like that\n# do new one which is average of every 2 TRs\n\nprf_dm = shift_DM(prf_dm)\n\nprf_dm = prf_dm[:,:,analysis_params['crop_pRF_TR']:] # crop DM because functional data also cropped now\n\n# define model params\nfit_model = analysis_params[\"fit_model\"]\n\nTR = analysis_params[\"TR\"]\n\nhrf = utilities.spm_hrf(0,TR)\n\n# make stimulus object, which takes an input design matrix and sets up its real-world dimensions\nprf_stim = PRFStimulus2D(screen_size_cm=analysis_params[\"screen_width\"], \n screen_distance_cm=analysis_params[\"screen_distance\"], \n design_matrix=prf_dm, \n TR=TR)\n\n# sets up stimulus and hrf for this gridder\ngg = Iso2DGaussianGridder(stimulus=prf_stim,\n hrf=hrf,\n filter_predictions=False,\n window_length=analysis_params[\"sg_filt_window_length\"],\n polyorder=analysis_params[\"sg_filt_polyorder\"],\n highpass=False,\n add_mean=False)\n\n# set grid parameters\ngrid_nr = analysis_params[\"grid_steps\"]\nsizes = analysis_params[\"max_size\"] * np.linspace(np.sqrt(analysis_params[\"min_size\"]/analysis_params[\"max_size\"]),1,grid_nr)**2\neccs = analysis_params[\"max_eccen\"] * np.linspace(np.sqrt(analysis_params[\"min_eccen\"]/analysis_params[\"max_eccen\"]),1,grid_nr)**2\npolars = np.linspace(0, 2*np.pi, grid_nr)\n\nfor gii_file in med_gii:\n print('loading data from %s' % gii_file)\n data = np.array(surface.load_surf_data(gii_file))\n print('data array with shape %s'%str(data.shape))\n\n gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)\n\n #filename for the numpy array with the estimates of the grid fit\n grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')\n\n if not os.path.isfile(grid_estimates_filename): # if estimates file doesn't exist\n print('%s not found, fitting grid'%grid_estimates_filename)\n # do grid fit and save estimates\n gf.grid_fit(ecc_grid=eccs,\n polar_grid=polars,\n size_grid=sizes)\n\n np.savez(grid_estimates_filename,\n x = gf.gridsearch_params[..., 0],\n y = gf.gridsearch_params[..., 1],\n size = gf.gridsearch_params[..., 2],\n betas = gf.gridsearch_params[...,3],\n baseline = gf.gridsearch_params[..., 4],\n ns = gf.gridsearch_params[..., 5],\n r2 = gf.gridsearch_params[..., 6])\n\n\n loaded_gf_pars = np.load(grid_estimates_filename)\n\n gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x', 'y', 'size', 'betas', 'baseline','ns','r2']]) \n gf.gridsearch_params = np.transpose(gf.gridsearch_params)\n\n # do iterative fit\n iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')\n\n if not os.path.isfile(iterative_out): # if estimates file doesn't exist\n print('doing iterative fit')\n gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n\n \n np.savez(iterative_out,\n it_output=gf.iterative_search_params)\n else:\n print('%s already exists'%iterative_out)\n\n ## do iterative fit again, now with css, n=1 (isn't that just gaussian?)\n #print('doing iterative fit with css ')\n #gf.fit_css = True\n #gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n\n #iterative_css_out = gii_file.replace('.func.gii', '_iterative_css_output.npz')\n #np.savez(iterative_css_out,\n # it_output=gf.iterative_search_params)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2
cam = cv2.VideoCapture("./bebop.sdp")
while True:
ret, frame = cam.read()
cv2.imshow("frame", frame)
cv2.waitKey(1)
|
normal
|
{
"blob_id": "d13b402b90bb948e5722f45096a8c0a33e4cac67",
"index": 6968,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n ret, frame = cam.read()\n cv2.imshow('frame', frame)\n cv2.waitKey(1)\n",
"step-3": "<mask token>\ncam = cv2.VideoCapture('./bebop.sdp')\nwhile True:\n ret, frame = cam.read()\n cv2.imshow('frame', frame)\n cv2.waitKey(1)\n",
"step-4": "import cv2\ncam = cv2.VideoCapture('./bebop.sdp')\nwhile True:\n ret, frame = cam.read()\n cv2.imshow('frame', frame)\n cv2.waitKey(1)\n",
"step-5": "import cv2\n\ncam = cv2.VideoCapture(\"./bebop.sdp\")\n\nwhile True:\n ret, frame = cam.read()\n cv2.imshow(\"frame\", frame)\n cv2.waitKey(1)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('backend', '0001_initial')]
operations = [migrations.CreateModel(name='Aro', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('nombre', models.CharField(max_length=255,
unique=True))]), migrations.AddField(model_name='bicicleta', name=
'modelo', field=models.CharField(default=1, max_length=255),
preserve_default=False), migrations.AddField(model_name='bicicleta',
name='numero_serie', field=models.CharField(default=1, max_length=
255), preserve_default=False), migrations.AddField(model_name=
'bicicleta', name='aro', field=models.ForeignKey(default=1,
on_delete=django.db.models.deletion.CASCADE, to='backend.Aro'),
preserve_default=False)]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('backend', '0001_initial')]
operations = [migrations.CreateModel(name='Aro', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('nombre', models.CharField(max_length=255,
unique=True))]), migrations.AddField(model_name='bicicleta', name=
'modelo', field=models.CharField(default=1, max_length=255),
preserve_default=False), migrations.AddField(model_name='bicicleta',
name='numero_serie', field=models.CharField(default=1, max_length=
255), preserve_default=False), migrations.AddField(model_name=
'bicicleta', name='aro', field=models.ForeignKey(default=1,
on_delete=django.db.models.deletion.CASCADE, to='backend.Aro'),
preserve_default=False)]
<|reserved_special_token_1|>
# Generated by Django 2.1.1 on 2018-09-24 04:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('backend', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Aro',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=255, unique=True)),
],
),
migrations.AddField(
model_name='bicicleta',
name='modelo',
field=models.CharField(default=1, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='bicicleta',
name='numero_serie',
field=models.CharField(default=1, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='bicicleta',
name='aro',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='backend.Aro'),
preserve_default=False,
),
]
|
flexible
|
{
"blob_id": "8dff22249abbae9e30ba1ad423457270e0cd9b20",
"index": 7027,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('backend', '0001_initial')]\n operations = [migrations.CreateModel(name='Aro', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('nombre', models.CharField(max_length=255,\n unique=True))]), migrations.AddField(model_name='bicicleta', name=\n 'modelo', field=models.CharField(default=1, max_length=255),\n preserve_default=False), migrations.AddField(model_name='bicicleta',\n name='numero_serie', field=models.CharField(default=1, max_length=\n 255), preserve_default=False), migrations.AddField(model_name=\n 'bicicleta', name='aro', field=models.ForeignKey(default=1,\n on_delete=django.db.models.deletion.CASCADE, to='backend.Aro'),\n preserve_default=False)]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('backend', '0001_initial')]\n operations = [migrations.CreateModel(name='Aro', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('nombre', models.CharField(max_length=255,\n unique=True))]), migrations.AddField(model_name='bicicleta', name=\n 'modelo', field=models.CharField(default=1, max_length=255),\n preserve_default=False), migrations.AddField(model_name='bicicleta',\n name='numero_serie', field=models.CharField(default=1, max_length=\n 255), preserve_default=False), migrations.AddField(model_name=\n 'bicicleta', name='aro', field=models.ForeignKey(default=1,\n on_delete=django.db.models.deletion.CASCADE, to='backend.Aro'),\n preserve_default=False)]\n",
"step-5": "# Generated by Django 2.1.1 on 2018-09-24 04:59\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('backend', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Aro',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('nombre', models.CharField(max_length=255, unique=True)),\n ],\n ),\n migrations.AddField(\n model_name='bicicleta',\n name='modelo',\n field=models.CharField(default=1, max_length=255),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='bicicleta',\n name='numero_serie',\n field=models.CharField(default=1, max_length=255),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='bicicleta',\n name='aro',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='backend.Aro'),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def ping_calculate_pong(expression, operator_index):
"""The function takes two arguments.
Argument 1: an expression from which we will extract one subexpression.
Argument 2: the index of the mathematical operator around which function takes the subexpression to extract.
The function:
1. takes the expression and extract one subexpression around math operator (like 2 + 2) - ping;
2. calculates subexpression result using function math_operation();
3. replaces in expression: subexpression to subexpression result - pong.
"""
if len(expression) < 3 or operator_index == len(expression
) - 1 or operator_index == 0:
raise ValueError(
f'{expression} - check this fragment, something wrong.')
sub_expression = expression[operator_index - 1:operator_index + 2]
sub_result = math_operation(sub_expression)
expression[operator_index + 1] = sub_result
del expression[operator_index - 1:operator_index + 1]
def calculator_without_parentheses(expression):
"""The function:
1. prioritizes mathematical operations in expression without any parentheses;
2. transfers expression and indexes of math operators to the function ping_calculate_pong();
3. returns result of calculations.
"""
j = 1
while len(expression) > j:
if '**' in expression:
ping_calculate_pong(expression, expression.index('**'))
elif '*' in expression or '/' in expression:
if '*' in expression and '/' in expression:
if expression.index('*') < expression.index('/'):
ping_calculate_pong(expression, expression.index('*'))
else:
ping_calculate_pong(expression, expression.index('/'))
elif '/' not in expression:
ping_calculate_pong(expression, expression.index('*'))
elif '*' not in expression:
ping_calculate_pong(expression, expression.index('/'))
elif '+' in expression or '-' in expression:
if '+' in expression and '-' in expression:
if expression.index('+') < expression.index('-'):
ping_calculate_pong(expression, expression.index('+'))
else:
ping_calculate_pong(expression, expression.index('-'))
elif '-' not in expression:
ping_calculate_pong(expression, expression.index('+'))
elif '+' not in expression:
ping_calculate_pong(expression, expression.index('-'))
else:
j += 1
return expression
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def math_operation(expression):
"""Simple calculator for two numbers in expression like 3 + 3."""
if not str(expression[0]).isdigit() or not str(expression[2]).isdigit():
if not str(expression[0]).replace('.', '1').replace('-', '1').isdigit(
) or not str(expression[2]).replace('.', '1').replace('-', '1'
).isdigit():
raise ValueError(
f'{expression} - check this fragment, something wrong.')
if expression[2] == 0 and expression[1] == '/':
raise ValueError(f'{expression} - division by zero.')
operator = expression[1]
if operator == '**':
return expression[0] ** expression[2]
elif operator == '*':
return expression[0] * expression[2]
elif operator == '/':
return expression[0] / expression[2]
elif operator == '+':
return expression[0] + expression[2]
elif operator == '-':
return expression[0] - expression[2]
def ping_calculate_pong(expression, operator_index):
"""The function takes two arguments.
Argument 1: an expression from which we will extract one subexpression.
Argument 2: the index of the mathematical operator around which function takes the subexpression to extract.
The function:
1. takes the expression and extract one subexpression around math operator (like 2 + 2) - ping;
2. calculates subexpression result using function math_operation();
3. replaces in expression: subexpression to subexpression result - pong.
"""
if len(expression) < 3 or operator_index == len(expression
) - 1 or operator_index == 0:
raise ValueError(
f'{expression} - check this fragment, something wrong.')
sub_expression = expression[operator_index - 1:operator_index + 2]
sub_result = math_operation(sub_expression)
expression[operator_index + 1] = sub_result
del expression[operator_index - 1:operator_index + 1]
def calculator_without_parentheses(expression):
"""The function:
1. prioritizes mathematical operations in expression without any parentheses;
2. transfers expression and indexes of math operators to the function ping_calculate_pong();
3. returns result of calculations.
"""
j = 1
while len(expression) > j:
if '**' in expression:
ping_calculate_pong(expression, expression.index('**'))
elif '*' in expression or '/' in expression:
if '*' in expression and '/' in expression:
if expression.index('*') < expression.index('/'):
ping_calculate_pong(expression, expression.index('*'))
else:
ping_calculate_pong(expression, expression.index('/'))
elif '/' not in expression:
ping_calculate_pong(expression, expression.index('*'))
elif '*' not in expression:
ping_calculate_pong(expression, expression.index('/'))
elif '+' in expression or '-' in expression:
if '+' in expression and '-' in expression:
if expression.index('+') < expression.index('-'):
ping_calculate_pong(expression, expression.index('+'))
else:
ping_calculate_pong(expression, expression.index('-'))
elif '-' not in expression:
ping_calculate_pong(expression, expression.index('+'))
elif '+' not in expression:
ping_calculate_pong(expression, expression.index('-'))
else:
j += 1
return expression
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def math_operation(expression):
"""Simple calculator for two numbers in expression like 3 + 3."""
if not str(expression[0]).isdigit() or not str(expression[2]).isdigit():
if not str(expression[0]).replace('.', '1').replace('-', '1').isdigit(
) or not str(expression[2]).replace('.', '1').replace('-', '1'
).isdigit():
raise ValueError(
f'{expression} - check this fragment, something wrong.')
if expression[2] == 0 and expression[1] == '/':
raise ValueError(f'{expression} - division by zero.')
operator = expression[1]
if operator == '**':
return expression[0] ** expression[2]
elif operator == '*':
return expression[0] * expression[2]
elif operator == '/':
return expression[0] / expression[2]
elif operator == '+':
return expression[0] + expression[2]
elif operator == '-':
return expression[0] - expression[2]
def ping_calculate_pong(expression, operator_index):
"""The function takes two arguments.
Argument 1: an expression from which we will extract one subexpression.
Argument 2: the index of the mathematical operator around which function takes the subexpression to extract.
The function:
1. takes the expression and extract one subexpression around math operator (like 2 + 2) - ping;
2. calculates subexpression result using function math_operation();
3. replaces in expression: subexpression to subexpression result - pong.
"""
if len(expression) < 3 or operator_index == len(expression
) - 1 or operator_index == 0:
raise ValueError(
f'{expression} - check this fragment, something wrong.')
sub_expression = expression[operator_index - 1:operator_index + 2]
sub_result = math_operation(sub_expression)
expression[operator_index + 1] = sub_result
del expression[operator_index - 1:operator_index + 1]
def calculator_without_parentheses(expression):
"""The function:
1. prioritizes mathematical operations in expression without any parentheses;
2. transfers expression and indexes of math operators to the function ping_calculate_pong();
3. returns result of calculations.
"""
j = 1
while len(expression) > j:
if '**' in expression:
ping_calculate_pong(expression, expression.index('**'))
elif '*' in expression or '/' in expression:
if '*' in expression and '/' in expression:
if expression.index('*') < expression.index('/'):
ping_calculate_pong(expression, expression.index('*'))
else:
ping_calculate_pong(expression, expression.index('/'))
elif '/' not in expression:
ping_calculate_pong(expression, expression.index('*'))
elif '*' not in expression:
ping_calculate_pong(expression, expression.index('/'))
elif '+' in expression or '-' in expression:
if '+' in expression and '-' in expression:
if expression.index('+') < expression.index('-'):
ping_calculate_pong(expression, expression.index('+'))
else:
ping_calculate_pong(expression, expression.index('-'))
elif '-' not in expression:
ping_calculate_pong(expression, expression.index('+'))
elif '+' not in expression:
ping_calculate_pong(expression, expression.index('-'))
else:
j += 1
return expression
def clear_and_convert(string_math_expression):
"""This function takes string expression and converts it to list with int, float, and 'math signs'."""
cleared_expression = list(filter(lambda x: x != ' ',
string_math_expression))
check_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+',
'-', '*', '/', '(', ')', '.']
for element in cleared_expression:
if element not in check_list:
raise ValueError(
f'Houston, we have a problem. Element "{element}" in expression is not correct.'
)
num_exp = []
number = ''
len_cleared_expression = len(cleared_expression)
for i, element in enumerate(cleared_expression):
if element.isdigit():
number += element
if i == len_cleared_expression - 1 or not cleared_expression[i + 1
].isdigit():
num_exp.append(int(number))
number = ''
else:
num_exp.append(element)
while '.' in num_exp:
i = num_exp.index('.')
if i != 0 and i != len(num_exp) - 1 and isinstance(num_exp[i - 1], int
) and isinstance(num_exp[i + 1], int):
float_number = float(str(num_exp[i - 1]) + num_exp[i] + str(
num_exp[i + 1]))
num_exp[i + 1] = float_number
del num_exp[i - 1:i + 1]
else:
raise ValueError('Something wrong with ".".')
neg_exp = []
excluded_index = None
neg_check_list = ['+', '-', '*', '/', '(']
len_num_exp = len(num_exp)
for i, element in enumerate(num_exp):
if element == '-':
if i == len_num_exp - 1:
raise ValueError('Something wrong with "-".')
elif isinstance(num_exp[i + 1], int) and (i == 0 or num_exp[i -
1] in neg_check_list):
n_number = int('-' + str(num_exp[i + 1]))
neg_exp.append(n_number)
excluded_index = i + 1
elif isinstance(num_exp[i + 1], float) and (i == 0 or num_exp[i -
1] in neg_check_list):
n_number = float('-' + str(num_exp[i + 1]))
neg_exp.append(n_number)
excluded_index = i + 1
else:
neg_exp.append(element)
elif i != excluded_index:
neg_exp.append(element)
converted_expression = []
i = 0
len_neg_exp = len(neg_exp)
while i < len_neg_exp:
if (i == 0 or i == len_neg_exp - 1) and neg_exp[i] == '*':
raise ValueError('Something wrong with "*".')
elif neg_exp[i] == '*' and neg_exp[i + 1] == '*':
converted_expression.append('**')
i += 2
else:
converted_expression.append(neg_exp[i])
i += 1
return converted_expression
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def math_operation(expression):
"""Simple calculator for two numbers in expression like 3 + 3."""
if not str(expression[0]).isdigit() or not str(expression[2]).isdigit():
if not str(expression[0]).replace('.', '1').replace('-', '1').isdigit(
) or not str(expression[2]).replace('.', '1').replace('-', '1'
).isdigit():
raise ValueError(
f'{expression} - check this fragment, something wrong.')
if expression[2] == 0 and expression[1] == '/':
raise ValueError(f'{expression} - division by zero.')
operator = expression[1]
if operator == '**':
return expression[0] ** expression[2]
elif operator == '*':
return expression[0] * expression[2]
elif operator == '/':
return expression[0] / expression[2]
elif operator == '+':
return expression[0] + expression[2]
elif operator == '-':
return expression[0] - expression[2]
def ping_calculate_pong(expression, operator_index):
"""The function takes two arguments.
Argument 1: an expression from which we will extract one subexpression.
Argument 2: the index of the mathematical operator around which function takes the subexpression to extract.
The function:
1. takes the expression and extract one subexpression around math operator (like 2 + 2) - ping;
2. calculates subexpression result using function math_operation();
3. replaces in expression: subexpression to subexpression result - pong.
"""
if len(expression) < 3 or operator_index == len(expression
) - 1 or operator_index == 0:
raise ValueError(
f'{expression} - check this fragment, something wrong.')
sub_expression = expression[operator_index - 1:operator_index + 2]
sub_result = math_operation(sub_expression)
expression[operator_index + 1] = sub_result
del expression[operator_index - 1:operator_index + 1]
def calculator_without_parentheses(expression):
"""The function:
1. prioritizes mathematical operations in expression without any parentheses;
2. transfers expression and indexes of math operators to the function ping_calculate_pong();
3. returns result of calculations.
"""
j = 1
while len(expression) > j:
if '**' in expression:
ping_calculate_pong(expression, expression.index('**'))
elif '*' in expression or '/' in expression:
if '*' in expression and '/' in expression:
if expression.index('*') < expression.index('/'):
ping_calculate_pong(expression, expression.index('*'))
else:
ping_calculate_pong(expression, expression.index('/'))
elif '/' not in expression:
ping_calculate_pong(expression, expression.index('*'))
elif '*' not in expression:
ping_calculate_pong(expression, expression.index('/'))
elif '+' in expression or '-' in expression:
if '+' in expression and '-' in expression:
if expression.index('+') < expression.index('-'):
ping_calculate_pong(expression, expression.index('+'))
else:
ping_calculate_pong(expression, expression.index('-'))
elif '-' not in expression:
ping_calculate_pong(expression, expression.index('+'))
elif '+' not in expression:
ping_calculate_pong(expression, expression.index('-'))
else:
j += 1
return expression
def clear_and_convert(string_math_expression):
"""This function takes string expression and converts it to list with int, float, and 'math signs'."""
cleared_expression = list(filter(lambda x: x != ' ',
string_math_expression))
check_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+',
'-', '*', '/', '(', ')', '.']
for element in cleared_expression:
if element not in check_list:
raise ValueError(
f'Houston, we have a problem. Element "{element}" in expression is not correct.'
)
num_exp = []
number = ''
len_cleared_expression = len(cleared_expression)
for i, element in enumerate(cleared_expression):
if element.isdigit():
number += element
if i == len_cleared_expression - 1 or not cleared_expression[i + 1
].isdigit():
num_exp.append(int(number))
number = ''
else:
num_exp.append(element)
while '.' in num_exp:
i = num_exp.index('.')
if i != 0 and i != len(num_exp) - 1 and isinstance(num_exp[i - 1], int
) and isinstance(num_exp[i + 1], int):
float_number = float(str(num_exp[i - 1]) + num_exp[i] + str(
num_exp[i + 1]))
num_exp[i + 1] = float_number
del num_exp[i - 1:i + 1]
else:
raise ValueError('Something wrong with ".".')
neg_exp = []
excluded_index = None
neg_check_list = ['+', '-', '*', '/', '(']
len_num_exp = len(num_exp)
for i, element in enumerate(num_exp):
if element == '-':
if i == len_num_exp - 1:
raise ValueError('Something wrong with "-".')
elif isinstance(num_exp[i + 1], int) and (i == 0 or num_exp[i -
1] in neg_check_list):
n_number = int('-' + str(num_exp[i + 1]))
neg_exp.append(n_number)
excluded_index = i + 1
elif isinstance(num_exp[i + 1], float) and (i == 0 or num_exp[i -
1] in neg_check_list):
n_number = float('-' + str(num_exp[i + 1]))
neg_exp.append(n_number)
excluded_index = i + 1
else:
neg_exp.append(element)
elif i != excluded_index:
neg_exp.append(element)
converted_expression = []
i = 0
len_neg_exp = len(neg_exp)
while i < len_neg_exp:
if (i == 0 or i == len_neg_exp - 1) and neg_exp[i] == '*':
raise ValueError('Something wrong with "*".')
elif neg_exp[i] == '*' and neg_exp[i + 1] == '*':
converted_expression.append('**')
i += 2
else:
converted_expression.append(neg_exp[i])
i += 1
return converted_expression
def calculate_expression(str_math_expression):
"""This function:
1. uses clear_and_convert() to prepare the string math expression for further calculations;
2. finds all subexpressions inside parentheses (if there are such);
3. transfers subexpression to calculator_without_parentheses() for further calculations;
4. replaces subexpression with the result;
5. returns final result of all calculations.
"""
expression = clear_and_convert(str_math_expression)
for element in expression.copy():
if ')' in expression:
if '(' in expression:
if expression.index(')') > expression.index('('):
z = expression.index(')')
a = z
while expression[a] != '(':
a -= 1
fragment = expression[a + 1:z]
fr_result = calculator_without_parentheses(fragment)
if len(fr_result) != 1:
raise ValueError(
f'{fr_result} - check this fragment, something wrong.'
)
expression[z] = fr_result[0]
del expression[a:z]
else:
raise ValueError('Something wrong with parentheses.')
else:
raise ValueError('Something wrong with parentheses.')
else:
expression = calculator_without_parentheses(expression)
if len(expression) != 1:
raise ValueError('Something wrong in your expression.')
if len(expression) == 1:
return str(round(expression[0], 5))
<|reserved_special_token_1|>
"""Calculator is built using "ping pong" algorithm, without eval() etc.
Main final function: calculate_expression().
calculate_expression() uses two functions in utils.py: clear_and_convert() and calculator_without_parentheses().
calculator_without_parentheses() uses two remaining functions:
math_operation() -> ping_calculate_pong() -> calculator_without_parentheses().
Allowed operations: +, -, *, /, **, use of parentheses. Spaces don't matter.
Negative numbers should be written as: (-34), float numbers: 3.4
Expression example: ((-2.3) + 3 ** (2 - 2)) * 2.2 + (6/(3 + 3)* (-2)) ** 2
"""
def math_operation(expression):
"""Simple calculator for two numbers in expression like 3 + 3."""
if not str(expression[0]).isdigit() or not str(expression[2]).isdigit():
# eliminates the error call for float and negative numbers
if not str(expression[0]).replace('.', '1').replace('-', '1').isdigit() or \
not str(expression[2]).replace('.', '1').replace('-', '1').isdigit():
raise ValueError(f'{expression} - check this fragment, something wrong.')
if expression[2] == 0 and expression[1] == '/':
raise ValueError(f'{expression} - division by zero.')
operator = expression[1]
if operator == '**':
return expression[0]**expression[2]
elif operator == '*':
return expression[0]*expression[2]
elif operator == '/':
return expression[0]/expression[2]
elif operator == '+':
return expression[0]+expression[2]
elif operator == '-':
return expression[0]-expression[2]
def ping_calculate_pong(expression, operator_index):
"""The function takes two arguments.
Argument 1: an expression from which we will extract one subexpression.
Argument 2: the index of the mathematical operator around which function takes the subexpression to extract.
The function:
1. takes the expression and extract one subexpression around math operator (like 2 + 2) - ping;
2. calculates subexpression result using function math_operation();
3. replaces in expression: subexpression to subexpression result - pong.
"""
if len(expression) < 3 or operator_index == len(expression)-1 or operator_index == 0:
raise ValueError(f'{expression} - check this fragment, something wrong.')
sub_expression = expression[operator_index - 1:operator_index + 2]
sub_result = math_operation(sub_expression)
expression[operator_index+1] = sub_result
del expression[operator_index-1:operator_index+1]
def calculator_without_parentheses(expression):
"""The function:
1. prioritizes mathematical operations in expression without any parentheses;
2. transfers expression and indexes of math operators to the function ping_calculate_pong();
3. returns result of calculations.
"""
j = 1
while len(expression) > j:
if "**" in expression:
ping_calculate_pong(expression, expression.index('**'))
elif '*' in expression or '/' in expression:
if '*' in expression and '/' in expression:
if expression.index('*') < expression.index('/'):
ping_calculate_pong(expression, expression.index('*'))
else:
ping_calculate_pong(expression, expression.index('/'))
elif '/' not in expression:
ping_calculate_pong(expression, expression.index('*'))
elif '*' not in expression:
ping_calculate_pong(expression, expression.index('/'))
elif '+' in expression or '-' in expression:
if '+' in expression and '-' in expression:
if expression.index('+') < expression.index('-'):
ping_calculate_pong(expression, expression.index('+'))
else:
ping_calculate_pong(expression, expression.index('-'))
elif '-' not in expression:
ping_calculate_pong(expression, expression.index('+'))
elif '+' not in expression:
ping_calculate_pong(expression, expression.index('-'))
else:
j += 1 # protection against a possible eternal loop when an incorrect expression is entered
return expression
def clear_and_convert(string_math_expression):
"""This function takes string expression and converts it to list with int, float, and 'math signs'."""
# clear the expression of spaces and convert it to the list
cleared_expression = list(filter(lambda x: x != ' ', string_math_expression))
# check characters in the expression for correctness
check_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '-', '*', '/', '(', ')', '.']
for element in cleared_expression:
if element not in check_list:
raise ValueError(f'Houston, we have a problem. Element "{element}" in expression is not correct.')
# find multi-digit numbers and create new list num_exp with int
num_exp = []
number = ''
len_cleared_expression = len(cleared_expression)
for i, element in enumerate(cleared_expression):
if element.isdigit():
number += element
if i == len_cleared_expression - 1 or not cleared_expression[i+1].isdigit():
num_exp.append(int(number))
number = ''
else:
num_exp.append(element)
# find float numbers and update list num_exp
while '.' in num_exp:
i = num_exp.index('.')
if (i != 0 and i != len(num_exp) - 1
and isinstance(num_exp[i-1], int)
and isinstance(num_exp[i+1], int)):
float_number = float(str(num_exp[i-1]) + num_exp[i] + str(num_exp[i+1]))
num_exp[i+1] = float_number
del num_exp[i-1:i+1]
else:
raise ValueError('Something wrong with ".".')
# find negative numbers and create new list with negative numbers
neg_exp = []
excluded_index = None
neg_check_list = ['+', '-', '*', '/', '(']
len_num_exp = len(num_exp)
for i, element in enumerate(num_exp):
if element == '-':
if i == len_num_exp - 1:
raise ValueError('Something wrong with "-".')
elif isinstance(num_exp[i+1], int) and (i == 0 or num_exp[i-1] in neg_check_list):
n_number = int('-' + str(num_exp[i+1]))
neg_exp.append(n_number)
excluded_index = i + 1
elif isinstance(num_exp[i+1], float) and (i == 0 or num_exp[i-1] in neg_check_list):
n_number = float('-' + str(num_exp[i+1]))
neg_exp.append(n_number)
excluded_index = i + 1
else:
neg_exp.append(element)
elif i != excluded_index:
neg_exp.append(element)
# find exponent operator and create new list with final converted expression
converted_expression = []
i = 0
len_neg_exp = len(neg_exp)
while i < len_neg_exp:
if (i == 0 or i == len_neg_exp - 1) and neg_exp[i] == '*':
raise ValueError('Something wrong with "*".')
elif neg_exp[i] == '*' and neg_exp[i+1] == '*':
converted_expression.append('**')
i += 2
else:
converted_expression.append(neg_exp[i])
i += 1
return converted_expression
def calculate_expression(str_math_expression):
"""This function:
1. uses clear_and_convert() to prepare the string math expression for further calculations;
2. finds all subexpressions inside parentheses (if there are such);
3. transfers subexpression to calculator_without_parentheses() for further calculations;
4. replaces subexpression with the result;
5. returns final result of all calculations.
"""
expression = clear_and_convert(str_math_expression)
for element in expression.copy():
if ')' in expression:
if '(' in expression:
if expression.index(')') > expression.index('('):
z = expression.index(')')
a = z
while expression[a] != '(':
a -= 1
fragment = expression[a+1:z]
fr_result = calculator_without_parentheses(fragment)
if len(fr_result) != 1: # checking for an input error in a fragment of the expression like ((()))
raise ValueError(f'{fr_result} - check this fragment, something wrong.')
expression[z] = fr_result[0]
del expression[a:z]
else:
raise ValueError('Something wrong with parentheses.')
else:
raise ValueError('Something wrong with parentheses.')
else:
expression = calculator_without_parentheses(expression)
if len(expression) != 1:
raise ValueError('Something wrong in your expression.')
if len(expression) == 1:
return str(round(expression[0], 5))
|
flexible
|
{
"blob_id": "c336bb6cdadfb836ab68ebd5bbb210f63af3d084",
"index": 2287,
"step-1": "<mask token>\n\n\ndef ping_calculate_pong(expression, operator_index):\n \"\"\"The function takes two arguments.\n Argument 1: an expression from which we will extract one subexpression.\n Argument 2: the index of the mathematical operator around which function takes the subexpression to extract.\n The function:\n 1. takes the expression and extract one subexpression around math operator (like 2 + 2) - ping;\n 2. calculates subexpression result using function math_operation();\n 3. replaces in expression: subexpression to subexpression result - pong.\n \"\"\"\n if len(expression) < 3 or operator_index == len(expression\n ) - 1 or operator_index == 0:\n raise ValueError(\n f'{expression} - check this fragment, something wrong.')\n sub_expression = expression[operator_index - 1:operator_index + 2]\n sub_result = math_operation(sub_expression)\n expression[operator_index + 1] = sub_result\n del expression[operator_index - 1:operator_index + 1]\n\n\ndef calculator_without_parentheses(expression):\n \"\"\"The function:\n 1. prioritizes mathematical operations in expression without any parentheses;\n 2. transfers expression and indexes of math operators to the function ping_calculate_pong();\n 3. returns result of calculations.\n \"\"\"\n j = 1\n while len(expression) > j:\n if '**' in expression:\n ping_calculate_pong(expression, expression.index('**'))\n elif '*' in expression or '/' in expression:\n if '*' in expression and '/' in expression:\n if expression.index('*') < expression.index('/'):\n ping_calculate_pong(expression, expression.index('*'))\n else:\n ping_calculate_pong(expression, expression.index('/'))\n elif '/' not in expression:\n ping_calculate_pong(expression, expression.index('*'))\n elif '*' not in expression:\n ping_calculate_pong(expression, expression.index('/'))\n elif '+' in expression or '-' in expression:\n if '+' in expression and '-' in expression:\n if expression.index('+') < expression.index('-'):\n ping_calculate_pong(expression, expression.index('+'))\n else:\n ping_calculate_pong(expression, expression.index('-'))\n elif '-' not in expression:\n ping_calculate_pong(expression, expression.index('+'))\n elif '+' not in expression:\n ping_calculate_pong(expression, expression.index('-'))\n else:\n j += 1\n return expression\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef math_operation(expression):\n \"\"\"Simple calculator for two numbers in expression like 3 + 3.\"\"\"\n if not str(expression[0]).isdigit() or not str(expression[2]).isdigit():\n if not str(expression[0]).replace('.', '1').replace('-', '1').isdigit(\n ) or not str(expression[2]).replace('.', '1').replace('-', '1'\n ).isdigit():\n raise ValueError(\n f'{expression} - check this fragment, something wrong.')\n if expression[2] == 0 and expression[1] == '/':\n raise ValueError(f'{expression} - division by zero.')\n operator = expression[1]\n if operator == '**':\n return expression[0] ** expression[2]\n elif operator == '*':\n return expression[0] * expression[2]\n elif operator == '/':\n return expression[0] / expression[2]\n elif operator == '+':\n return expression[0] + expression[2]\n elif operator == '-':\n return expression[0] - expression[2]\n\n\ndef ping_calculate_pong(expression, operator_index):\n \"\"\"The function takes two arguments.\n Argument 1: an expression from which we will extract one subexpression.\n Argument 2: the index of the mathematical operator around which function takes the subexpression to extract.\n The function:\n 1. takes the expression and extract one subexpression around math operator (like 2 + 2) - ping;\n 2. calculates subexpression result using function math_operation();\n 3. replaces in expression: subexpression to subexpression result - pong.\n \"\"\"\n if len(expression) < 3 or operator_index == len(expression\n ) - 1 or operator_index == 0:\n raise ValueError(\n f'{expression} - check this fragment, something wrong.')\n sub_expression = expression[operator_index - 1:operator_index + 2]\n sub_result = math_operation(sub_expression)\n expression[operator_index + 1] = sub_result\n del expression[operator_index - 1:operator_index + 1]\n\n\ndef calculator_without_parentheses(expression):\n \"\"\"The function:\n 1. prioritizes mathematical operations in expression without any parentheses;\n 2. transfers expression and indexes of math operators to the function ping_calculate_pong();\n 3. returns result of calculations.\n \"\"\"\n j = 1\n while len(expression) > j:\n if '**' in expression:\n ping_calculate_pong(expression, expression.index('**'))\n elif '*' in expression or '/' in expression:\n if '*' in expression and '/' in expression:\n if expression.index('*') < expression.index('/'):\n ping_calculate_pong(expression, expression.index('*'))\n else:\n ping_calculate_pong(expression, expression.index('/'))\n elif '/' not in expression:\n ping_calculate_pong(expression, expression.index('*'))\n elif '*' not in expression:\n ping_calculate_pong(expression, expression.index('/'))\n elif '+' in expression or '-' in expression:\n if '+' in expression and '-' in expression:\n if expression.index('+') < expression.index('-'):\n ping_calculate_pong(expression, expression.index('+'))\n else:\n ping_calculate_pong(expression, expression.index('-'))\n elif '-' not in expression:\n ping_calculate_pong(expression, expression.index('+'))\n elif '+' not in expression:\n ping_calculate_pong(expression, expression.index('-'))\n else:\n j += 1\n return expression\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef math_operation(expression):\n \"\"\"Simple calculator for two numbers in expression like 3 + 3.\"\"\"\n if not str(expression[0]).isdigit() or not str(expression[2]).isdigit():\n if not str(expression[0]).replace('.', '1').replace('-', '1').isdigit(\n ) or not str(expression[2]).replace('.', '1').replace('-', '1'\n ).isdigit():\n raise ValueError(\n f'{expression} - check this fragment, something wrong.')\n if expression[2] == 0 and expression[1] == '/':\n raise ValueError(f'{expression} - division by zero.')\n operator = expression[1]\n if operator == '**':\n return expression[0] ** expression[2]\n elif operator == '*':\n return expression[0] * expression[2]\n elif operator == '/':\n return expression[0] / expression[2]\n elif operator == '+':\n return expression[0] + expression[2]\n elif operator == '-':\n return expression[0] - expression[2]\n\n\ndef ping_calculate_pong(expression, operator_index):\n \"\"\"The function takes two arguments.\n Argument 1: an expression from which we will extract one subexpression.\n Argument 2: the index of the mathematical operator around which function takes the subexpression to extract.\n The function:\n 1. takes the expression and extract one subexpression around math operator (like 2 + 2) - ping;\n 2. calculates subexpression result using function math_operation();\n 3. replaces in expression: subexpression to subexpression result - pong.\n \"\"\"\n if len(expression) < 3 or operator_index == len(expression\n ) - 1 or operator_index == 0:\n raise ValueError(\n f'{expression} - check this fragment, something wrong.')\n sub_expression = expression[operator_index - 1:operator_index + 2]\n sub_result = math_operation(sub_expression)\n expression[operator_index + 1] = sub_result\n del expression[operator_index - 1:operator_index + 1]\n\n\ndef calculator_without_parentheses(expression):\n \"\"\"The function:\n 1. prioritizes mathematical operations in expression without any parentheses;\n 2. transfers expression and indexes of math operators to the function ping_calculate_pong();\n 3. returns result of calculations.\n \"\"\"\n j = 1\n while len(expression) > j:\n if '**' in expression:\n ping_calculate_pong(expression, expression.index('**'))\n elif '*' in expression or '/' in expression:\n if '*' in expression and '/' in expression:\n if expression.index('*') < expression.index('/'):\n ping_calculate_pong(expression, expression.index('*'))\n else:\n ping_calculate_pong(expression, expression.index('/'))\n elif '/' not in expression:\n ping_calculate_pong(expression, expression.index('*'))\n elif '*' not in expression:\n ping_calculate_pong(expression, expression.index('/'))\n elif '+' in expression or '-' in expression:\n if '+' in expression and '-' in expression:\n if expression.index('+') < expression.index('-'):\n ping_calculate_pong(expression, expression.index('+'))\n else:\n ping_calculate_pong(expression, expression.index('-'))\n elif '-' not in expression:\n ping_calculate_pong(expression, expression.index('+'))\n elif '+' not in expression:\n ping_calculate_pong(expression, expression.index('-'))\n else:\n j += 1\n return expression\n\n\ndef clear_and_convert(string_math_expression):\n \"\"\"This function takes string expression and converts it to list with int, float, and 'math signs'.\"\"\"\n cleared_expression = list(filter(lambda x: x != ' ',\n string_math_expression))\n check_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+',\n '-', '*', '/', '(', ')', '.']\n for element in cleared_expression:\n if element not in check_list:\n raise ValueError(\n f'Houston, we have a problem. Element \"{element}\" in expression is not correct.'\n )\n num_exp = []\n number = ''\n len_cleared_expression = len(cleared_expression)\n for i, element in enumerate(cleared_expression):\n if element.isdigit():\n number += element\n if i == len_cleared_expression - 1 or not cleared_expression[i + 1\n ].isdigit():\n num_exp.append(int(number))\n number = ''\n else:\n num_exp.append(element)\n while '.' in num_exp:\n i = num_exp.index('.')\n if i != 0 and i != len(num_exp) - 1 and isinstance(num_exp[i - 1], int\n ) and isinstance(num_exp[i + 1], int):\n float_number = float(str(num_exp[i - 1]) + num_exp[i] + str(\n num_exp[i + 1]))\n num_exp[i + 1] = float_number\n del num_exp[i - 1:i + 1]\n else:\n raise ValueError('Something wrong with \".\".')\n neg_exp = []\n excluded_index = None\n neg_check_list = ['+', '-', '*', '/', '(']\n len_num_exp = len(num_exp)\n for i, element in enumerate(num_exp):\n if element == '-':\n if i == len_num_exp - 1:\n raise ValueError('Something wrong with \"-\".')\n elif isinstance(num_exp[i + 1], int) and (i == 0 or num_exp[i -\n 1] in neg_check_list):\n n_number = int('-' + str(num_exp[i + 1]))\n neg_exp.append(n_number)\n excluded_index = i + 1\n elif isinstance(num_exp[i + 1], float) and (i == 0 or num_exp[i -\n 1] in neg_check_list):\n n_number = float('-' + str(num_exp[i + 1]))\n neg_exp.append(n_number)\n excluded_index = i + 1\n else:\n neg_exp.append(element)\n elif i != excluded_index:\n neg_exp.append(element)\n converted_expression = []\n i = 0\n len_neg_exp = len(neg_exp)\n while i < len_neg_exp:\n if (i == 0 or i == len_neg_exp - 1) and neg_exp[i] == '*':\n raise ValueError('Something wrong with \"*\".')\n elif neg_exp[i] == '*' and neg_exp[i + 1] == '*':\n converted_expression.append('**')\n i += 2\n else:\n converted_expression.append(neg_exp[i])\n i += 1\n return converted_expression\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef math_operation(expression):\n \"\"\"Simple calculator for two numbers in expression like 3 + 3.\"\"\"\n if not str(expression[0]).isdigit() or not str(expression[2]).isdigit():\n if not str(expression[0]).replace('.', '1').replace('-', '1').isdigit(\n ) or not str(expression[2]).replace('.', '1').replace('-', '1'\n ).isdigit():\n raise ValueError(\n f'{expression} - check this fragment, something wrong.')\n if expression[2] == 0 and expression[1] == '/':\n raise ValueError(f'{expression} - division by zero.')\n operator = expression[1]\n if operator == '**':\n return expression[0] ** expression[2]\n elif operator == '*':\n return expression[0] * expression[2]\n elif operator == '/':\n return expression[0] / expression[2]\n elif operator == '+':\n return expression[0] + expression[2]\n elif operator == '-':\n return expression[0] - expression[2]\n\n\ndef ping_calculate_pong(expression, operator_index):\n \"\"\"The function takes two arguments.\n Argument 1: an expression from which we will extract one subexpression.\n Argument 2: the index of the mathematical operator around which function takes the subexpression to extract.\n The function:\n 1. takes the expression and extract one subexpression around math operator (like 2 + 2) - ping;\n 2. calculates subexpression result using function math_operation();\n 3. replaces in expression: subexpression to subexpression result - pong.\n \"\"\"\n if len(expression) < 3 or operator_index == len(expression\n ) - 1 or operator_index == 0:\n raise ValueError(\n f'{expression} - check this fragment, something wrong.')\n sub_expression = expression[operator_index - 1:operator_index + 2]\n sub_result = math_operation(sub_expression)\n expression[operator_index + 1] = sub_result\n del expression[operator_index - 1:operator_index + 1]\n\n\ndef calculator_without_parentheses(expression):\n \"\"\"The function:\n 1. prioritizes mathematical operations in expression without any parentheses;\n 2. transfers expression and indexes of math operators to the function ping_calculate_pong();\n 3. returns result of calculations.\n \"\"\"\n j = 1\n while len(expression) > j:\n if '**' in expression:\n ping_calculate_pong(expression, expression.index('**'))\n elif '*' in expression or '/' in expression:\n if '*' in expression and '/' in expression:\n if expression.index('*') < expression.index('/'):\n ping_calculate_pong(expression, expression.index('*'))\n else:\n ping_calculate_pong(expression, expression.index('/'))\n elif '/' not in expression:\n ping_calculate_pong(expression, expression.index('*'))\n elif '*' not in expression:\n ping_calculate_pong(expression, expression.index('/'))\n elif '+' in expression or '-' in expression:\n if '+' in expression and '-' in expression:\n if expression.index('+') < expression.index('-'):\n ping_calculate_pong(expression, expression.index('+'))\n else:\n ping_calculate_pong(expression, expression.index('-'))\n elif '-' not in expression:\n ping_calculate_pong(expression, expression.index('+'))\n elif '+' not in expression:\n ping_calculate_pong(expression, expression.index('-'))\n else:\n j += 1\n return expression\n\n\ndef clear_and_convert(string_math_expression):\n \"\"\"This function takes string expression and converts it to list with int, float, and 'math signs'.\"\"\"\n cleared_expression = list(filter(lambda x: x != ' ',\n string_math_expression))\n check_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+',\n '-', '*', '/', '(', ')', '.']\n for element in cleared_expression:\n if element not in check_list:\n raise ValueError(\n f'Houston, we have a problem. Element \"{element}\" in expression is not correct.'\n )\n num_exp = []\n number = ''\n len_cleared_expression = len(cleared_expression)\n for i, element in enumerate(cleared_expression):\n if element.isdigit():\n number += element\n if i == len_cleared_expression - 1 or not cleared_expression[i + 1\n ].isdigit():\n num_exp.append(int(number))\n number = ''\n else:\n num_exp.append(element)\n while '.' in num_exp:\n i = num_exp.index('.')\n if i != 0 and i != len(num_exp) - 1 and isinstance(num_exp[i - 1], int\n ) and isinstance(num_exp[i + 1], int):\n float_number = float(str(num_exp[i - 1]) + num_exp[i] + str(\n num_exp[i + 1]))\n num_exp[i + 1] = float_number\n del num_exp[i - 1:i + 1]\n else:\n raise ValueError('Something wrong with \".\".')\n neg_exp = []\n excluded_index = None\n neg_check_list = ['+', '-', '*', '/', '(']\n len_num_exp = len(num_exp)\n for i, element in enumerate(num_exp):\n if element == '-':\n if i == len_num_exp - 1:\n raise ValueError('Something wrong with \"-\".')\n elif isinstance(num_exp[i + 1], int) and (i == 0 or num_exp[i -\n 1] in neg_check_list):\n n_number = int('-' + str(num_exp[i + 1]))\n neg_exp.append(n_number)\n excluded_index = i + 1\n elif isinstance(num_exp[i + 1], float) and (i == 0 or num_exp[i -\n 1] in neg_check_list):\n n_number = float('-' + str(num_exp[i + 1]))\n neg_exp.append(n_number)\n excluded_index = i + 1\n else:\n neg_exp.append(element)\n elif i != excluded_index:\n neg_exp.append(element)\n converted_expression = []\n i = 0\n len_neg_exp = len(neg_exp)\n while i < len_neg_exp:\n if (i == 0 or i == len_neg_exp - 1) and neg_exp[i] == '*':\n raise ValueError('Something wrong with \"*\".')\n elif neg_exp[i] == '*' and neg_exp[i + 1] == '*':\n converted_expression.append('**')\n i += 2\n else:\n converted_expression.append(neg_exp[i])\n i += 1\n return converted_expression\n\n\ndef calculate_expression(str_math_expression):\n \"\"\"This function:\n 1. uses clear_and_convert() to prepare the string math expression for further calculations;\n 2. finds all subexpressions inside parentheses (if there are such);\n 3. transfers subexpression to calculator_without_parentheses() for further calculations;\n 4. replaces subexpression with the result;\n 5. returns final result of all calculations.\n \"\"\"\n expression = clear_and_convert(str_math_expression)\n for element in expression.copy():\n if ')' in expression:\n if '(' in expression:\n if expression.index(')') > expression.index('('):\n z = expression.index(')')\n a = z\n while expression[a] != '(':\n a -= 1\n fragment = expression[a + 1:z]\n fr_result = calculator_without_parentheses(fragment)\n if len(fr_result) != 1:\n raise ValueError(\n f'{fr_result} - check this fragment, something wrong.'\n )\n expression[z] = fr_result[0]\n del expression[a:z]\n else:\n raise ValueError('Something wrong with parentheses.')\n else:\n raise ValueError('Something wrong with parentheses.')\n else:\n expression = calculator_without_parentheses(expression)\n if len(expression) != 1:\n raise ValueError('Something wrong in your expression.')\n if len(expression) == 1:\n return str(round(expression[0], 5))\n",
"step-5": "\"\"\"Calculator is built using \"ping pong\" algorithm, without eval() etc.\nMain final function: calculate_expression().\ncalculate_expression() uses two functions in utils.py: clear_and_convert() and calculator_without_parentheses().\ncalculator_without_parentheses() uses two remaining functions:\nmath_operation() -> ping_calculate_pong() -> calculator_without_parentheses().\n\nAllowed operations: +, -, *, /, **, use of parentheses. Spaces don't matter.\nNegative numbers should be written as: (-34), float numbers: 3.4\nExpression example: ((-2.3) + 3 ** (2 - 2)) * 2.2 + (6/(3 + 3)* (-2)) ** 2\n\"\"\"\n\n\ndef math_operation(expression):\n \"\"\"Simple calculator for two numbers in expression like 3 + 3.\"\"\"\n if not str(expression[0]).isdigit() or not str(expression[2]).isdigit():\n # eliminates the error call for float and negative numbers\n if not str(expression[0]).replace('.', '1').replace('-', '1').isdigit() or \\\n not str(expression[2]).replace('.', '1').replace('-', '1').isdigit():\n raise ValueError(f'{expression} - check this fragment, something wrong.')\n if expression[2] == 0 and expression[1] == '/':\n raise ValueError(f'{expression} - division by zero.')\n operator = expression[1]\n if operator == '**':\n return expression[0]**expression[2]\n elif operator == '*':\n return expression[0]*expression[2]\n elif operator == '/':\n return expression[0]/expression[2]\n elif operator == '+':\n return expression[0]+expression[2]\n elif operator == '-':\n return expression[0]-expression[2]\n\n\ndef ping_calculate_pong(expression, operator_index):\n \"\"\"The function takes two arguments.\n Argument 1: an expression from which we will extract one subexpression.\n Argument 2: the index of the mathematical operator around which function takes the subexpression to extract.\n The function:\n 1. takes the expression and extract one subexpression around math operator (like 2 + 2) - ping;\n 2. calculates subexpression result using function math_operation();\n 3. replaces in expression: subexpression to subexpression result - pong.\n \"\"\"\n if len(expression) < 3 or operator_index == len(expression)-1 or operator_index == 0:\n raise ValueError(f'{expression} - check this fragment, something wrong.')\n sub_expression = expression[operator_index - 1:operator_index + 2]\n sub_result = math_operation(sub_expression)\n expression[operator_index+1] = sub_result\n del expression[operator_index-1:operator_index+1]\n\n\ndef calculator_without_parentheses(expression):\n \"\"\"The function:\n 1. prioritizes mathematical operations in expression without any parentheses;\n 2. transfers expression and indexes of math operators to the function ping_calculate_pong();\n 3. returns result of calculations.\n \"\"\"\n j = 1\n while len(expression) > j:\n if \"**\" in expression:\n ping_calculate_pong(expression, expression.index('**'))\n elif '*' in expression or '/' in expression:\n if '*' in expression and '/' in expression:\n if expression.index('*') < expression.index('/'):\n ping_calculate_pong(expression, expression.index('*'))\n else:\n ping_calculate_pong(expression, expression.index('/'))\n elif '/' not in expression:\n ping_calculate_pong(expression, expression.index('*'))\n elif '*' not in expression:\n ping_calculate_pong(expression, expression.index('/'))\n elif '+' in expression or '-' in expression:\n if '+' in expression and '-' in expression:\n if expression.index('+') < expression.index('-'):\n ping_calculate_pong(expression, expression.index('+'))\n else:\n ping_calculate_pong(expression, expression.index('-'))\n elif '-' not in expression:\n ping_calculate_pong(expression, expression.index('+'))\n elif '+' not in expression:\n ping_calculate_pong(expression, expression.index('-'))\n else:\n j += 1 # protection against a possible eternal loop when an incorrect expression is entered\n return expression\n\n\ndef clear_and_convert(string_math_expression):\n \"\"\"This function takes string expression and converts it to list with int, float, and 'math signs'.\"\"\"\n # clear the expression of spaces and convert it to the list\n cleared_expression = list(filter(lambda x: x != ' ', string_math_expression))\n # check characters in the expression for correctness\n check_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '-', '*', '/', '(', ')', '.']\n for element in cleared_expression:\n if element not in check_list:\n raise ValueError(f'Houston, we have a problem. Element \"{element}\" in expression is not correct.')\n # find multi-digit numbers and create new list num_exp with int\n num_exp = []\n number = ''\n len_cleared_expression = len(cleared_expression)\n for i, element in enumerate(cleared_expression):\n if element.isdigit():\n number += element\n if i == len_cleared_expression - 1 or not cleared_expression[i+1].isdigit():\n num_exp.append(int(number))\n number = ''\n else:\n num_exp.append(element)\n # find float numbers and update list num_exp\n while '.' in num_exp:\n i = num_exp.index('.')\n if (i != 0 and i != len(num_exp) - 1\n and isinstance(num_exp[i-1], int)\n and isinstance(num_exp[i+1], int)):\n float_number = float(str(num_exp[i-1]) + num_exp[i] + str(num_exp[i+1]))\n num_exp[i+1] = float_number\n del num_exp[i-1:i+1]\n else:\n raise ValueError('Something wrong with \".\".')\n # find negative numbers and create new list with negative numbers\n neg_exp = []\n excluded_index = None\n neg_check_list = ['+', '-', '*', '/', '(']\n len_num_exp = len(num_exp)\n for i, element in enumerate(num_exp):\n if element == '-':\n if i == len_num_exp - 1:\n raise ValueError('Something wrong with \"-\".')\n elif isinstance(num_exp[i+1], int) and (i == 0 or num_exp[i-1] in neg_check_list):\n n_number = int('-' + str(num_exp[i+1]))\n neg_exp.append(n_number)\n excluded_index = i + 1\n elif isinstance(num_exp[i+1], float) and (i == 0 or num_exp[i-1] in neg_check_list):\n n_number = float('-' + str(num_exp[i+1]))\n neg_exp.append(n_number)\n excluded_index = i + 1\n else:\n neg_exp.append(element)\n elif i != excluded_index:\n neg_exp.append(element)\n # find exponent operator and create new list with final converted expression\n converted_expression = []\n i = 0\n len_neg_exp = len(neg_exp)\n while i < len_neg_exp:\n if (i == 0 or i == len_neg_exp - 1) and neg_exp[i] == '*':\n raise ValueError('Something wrong with \"*\".')\n elif neg_exp[i] == '*' and neg_exp[i+1] == '*':\n converted_expression.append('**')\n i += 2\n else:\n converted_expression.append(neg_exp[i])\n i += 1\n return converted_expression\n\n\ndef calculate_expression(str_math_expression):\n \"\"\"This function:\n 1. uses clear_and_convert() to prepare the string math expression for further calculations;\n 2. finds all subexpressions inside parentheses (if there are such);\n 3. transfers subexpression to calculator_without_parentheses() for further calculations;\n 4. replaces subexpression with the result;\n 5. returns final result of all calculations.\n \"\"\"\n expression = clear_and_convert(str_math_expression)\n for element in expression.copy():\n if ')' in expression:\n if '(' in expression:\n if expression.index(')') > expression.index('('):\n z = expression.index(')')\n a = z\n while expression[a] != '(':\n a -= 1\n fragment = expression[a+1:z]\n fr_result = calculator_without_parentheses(fragment)\n if len(fr_result) != 1: # checking for an input error in a fragment of the expression like ((()))\n raise ValueError(f'{fr_result} - check this fragment, something wrong.')\n expression[z] = fr_result[0]\n del expression[a:z]\n else:\n raise ValueError('Something wrong with parentheses.')\n else:\n raise ValueError('Something wrong with parentheses.')\n else:\n expression = calculator_without_parentheses(expression)\n if len(expression) != 1:\n raise ValueError('Something wrong in your expression.')\n if len(expression) == 1:\n return str(round(expression[0], 5))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import numpy as np
from numpy import random
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from numpy.random import shuffle
import matplotlib.pyplot as plt
import numpy.linalg as la
import sklearn.preprocessing as proc
import csv
def get_accuracy(a, b, X_test, y_test):
size = len(y_test)
count = 0
for i in range(size):
x = X_test[i]
real = y_test[i]
x = np.array(x)
x = x.reshape(1, 6)
prediction = x.dot(a.T) + b
if prediction > 0 and real == 1:
count += 1
elif prediction < 0 and real == -1:
count += 1
return count / size
data = []
with open("train.txt") as file:
data = [line.split() for line in file]
X = []
y = []
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]), \
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
if line[14] == '<=50K':
y.append(-1)
else:
y.append(1)
a = random.dirichlet(np.ones(6)*1000, size = 1)
b = 0
#scale X
scaler = StandardScaler()
X = scaler.fit_transform(X)
X = X - np.mean(X)
#10% test data and 90% train data
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)
lambdas = [0.001, 0.01, 0.1, 1]
dict_accuracy = {}
for lamb in lambdas:
dict_accuracy[lamb] = []
dict_a = {}
for lamb in lambdas:
dict_a[lamb] = []
dict_b = {}
for lamb in lambdas:
dict_b[lamb] = []
a = 0
b = 0
for lamb in lambdas:
#a = random.dirichlet(np.ones(6)*1000, size = 1)
a = np.zeros(6)
b = 0
for epoch in range(50):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)
if epoch == 49:
result = get_accuracy(a, b, X_test, y_test)
print(str(lamb) + ' : ' + str(result))
shuffle(X_train)
validation_train = X_train[0:50]
validation_test = y_train[0:50]
train_data = X_train[51:]
train_test = y_train[51:]
m = 1
n = 50
step_size = m / (0.01 * epoch + n)
for step in range(500):
if step % 30 == 0:
accuracy = get_accuracy(a, b, validation_train, validation_test)
dict_accuracy[lamb].append(accuracy)
dict_a[lamb].append(a)
dict_b[lamb].append(b)
# current index randomly chosen
curr = random.randint(0, len(train_data))
curr_train = np.array(train_data[curr])
curr_train = curr_train.reshape(1, 6)
curr_val = (curr_train.dot(a.T) + b) * train_test[curr]
if curr_val >= 1:
a = a - np.dot(a, lamb) * step_size
else:
a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr], train_test[curr]))
b = b - (step_size * (-train_test[curr]))
'''
x_val = [i for i in range(1, 851)]
# dict_accuracy
fig = plt.figure()
ax1 = fig.add_subplot(411)
ax2 = fig.add_subplot(412)
ax3 = fig.add_subplot(413)
ax4 = fig.add_subplot(414)
fig.tight_layout()
y1 = dict_accuracy[0.001]
y2 = dict_accuracy[0.01]
y3 = dict_accuracy[0.1]
y4 = dict_accuracy[1]
ax1.plot(x_val, y1, color='m')
ax2.plot(x_val, y2, color='g')
ax3.plot(x_val, y3, color='r')
ax4.plot(x_val, y4, color='b')
ax1.set_xlabel('lambda = 0.001')
ax2.set_xlabel('lambda = 0.01')
ax3.set_xlabel('lambda = 0.1')
ax4.set_xlabel('lambda = 1')
plt.show()
#########################################
a_norm = {}
for lamb in lambdas:
a_norm[lamb] = []
for lamb in dict_a:
curr_list = dict_a[lamb]
for curr in curr_list:
norm = la.norm(curr, 2)
a_norm[lamb].append(norm)
plt.plot(x_val, a_norm[0.001], label = 'lambda is 0.001', color = 'b')
plt.plot(x_val, a_norm[0.01], label = 'lambda is 0.01', color = 'r')
plt.plot(x_val, a_norm[0.1], label = 'lambda is 0.01', color = 'g')
plt.plot(x_val, a_norm[1], label = 'lambda is 1', color = 'm')
plt.legend()
plt.show()
'''
lamb = 0.001
a = random.dirichlet(np.ones(6)*1000, size = 1)
b = 0
data = []
with open("train.txt") as file:
data = [line.split() for line in file]
X = []
y = []
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]), \
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
if line[14] == '<=50K':
y.append(-1)
else:
y.append(1)
#scale X
scaler = StandardScaler()
X = scaler.fit_transform(X)
X = X - np.mean(X)
for epoch in range(30):
if epoch == 29:
result = get_accuracy(a, b, X_test, y_test)
print(str(lamb) + ' : ' + str(result))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)
shuffle(X_train)
validation_train = X_train[0:50]
validation_test = y_train[0:50]
train_data = X_train[51:]
train_test = y_train[51:]
m = 1
n = 50
step_size = m / (0.01 * epoch + n)
for step in range(300):
# current index randomly chosen
curr = random.randint(0, len(train_data))
curr_train = np.array(train_data[curr])
curr_train = curr_train.reshape(1, 6)
curr_val = (curr_train.dot(a.T) + b) * train_test[curr]
if curr_val >= 1:
a = a - np.dot(a, lamb) * step_size
else:
a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr], train_test[curr]))
b = b - (step_size * (-train_test[curr]))
data = []
with open("test.txt") as file:
data = [line.split() for line in file]
X = []
y = []
for line in data:
numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]), \
int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]
X.append(numerical)
prediction = []
for k in X:
numerical = np.array(k)
estimate = numerical.dot(a.T) + b
#print(estimate)
if estimate < 0:
prediction.append('<=50K')
else:
prediction.append('>50K')
index_final = []
for i in range(len(prediction)):
index_final.append(["'" + str(i) + "'", prediction[i]])
with open('output.csv', 'w') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
writer.writerow(['Example', 'Label'])
writer.writerows(index_final)
|
normal
|
{
"blob_id": "f5c4057babc873099ae2a4d8c1aca960ab9fa30a",
"index": 9692,
"step-1": "<mask token>\n\n\ndef get_accuracy(a, b, X_test, y_test):\n size = len(y_test)\n count = 0\n for i in range(size):\n x = X_test[i]\n real = y_test[i]\n x = np.array(x)\n x = x.reshape(1, 6)\n prediction = x.dot(a.T) + b\n if prediction > 0 and real == 1:\n count += 1\n elif prediction < 0 and real == -1:\n count += 1\n return count / size\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_accuracy(a, b, X_test, y_test):\n size = len(y_test)\n count = 0\n for i in range(size):\n x = X_test[i]\n real = y_test[i]\n x = np.array(x)\n x = x.reshape(1, 6)\n prediction = x.dot(a.T) + b\n if prediction > 0 and real == 1:\n count += 1\n elif prediction < 0 and real == -1:\n count += 1\n return count / size\n\n\n<mask token>\nwith open('train.txt') as file:\n data = [line.split() for line in file]\n<mask token>\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\n<mask token>\nfor lamb in lambdas:\n dict_accuracy[lamb] = []\n<mask token>\nfor lamb in lambdas:\n dict_a[lamb] = []\n<mask token>\nfor lamb in lambdas:\n dict_b[lamb] = []\n<mask token>\nfor lamb in lambdas:\n a = np.zeros(6)\n b = 0\n for epoch in range(50):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1\n )\n if epoch == 49:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n shuffle(X_train)\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n train_data = X_train[51:]\n train_test = y_train[51:]\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n for step in range(500):\n if step % 30 == 0:\n accuracy = get_accuracy(a, b, validation_train, validation_test\n )\n dict_accuracy[lamb].append(accuracy)\n dict_a[lamb].append(a)\n dict_b[lamb].append(b)\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[\n curr], train_test[curr]))\n b = b - step_size * -train_test[curr]\n<mask token>\nwith open('train.txt') as file:\n data = [line.split() for line in file]\n<mask token>\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\n<mask token>\nfor epoch in range(30):\n if epoch == 29:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n shuffle(X_train)\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n train_data = X_train[51:]\n train_test = y_train[51:]\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n for step in range(300):\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr],\n train_test[curr]))\n b = b - step_size * -train_test[curr]\n<mask token>\nwith open('test.txt') as file:\n data = [line.split() for line in file]\n<mask token>\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n<mask token>\nfor k in X:\n numerical = np.array(k)\n estimate = numerical.dot(a.T) + b\n if estimate < 0:\n prediction.append('<=50K')\n else:\n prediction.append('>50K')\n<mask token>\nfor i in range(len(prediction)):\n index_final.append([\"'\" + str(i) + \"'\", prediction[i]])\nwith open('output.csv', 'w') as csvfile:\n writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)\n writer.writerow(['Example', 'Label'])\n writer.writerows(index_final)\n",
"step-3": "<mask token>\n\n\ndef get_accuracy(a, b, X_test, y_test):\n size = len(y_test)\n count = 0\n for i in range(size):\n x = X_test[i]\n real = y_test[i]\n x = np.array(x)\n x = x.reshape(1, 6)\n prediction = x.dot(a.T) + b\n if prediction > 0 and real == 1:\n count += 1\n elif prediction < 0 and real == -1:\n count += 1\n return count / size\n\n\ndata = []\nwith open('train.txt') as file:\n data = [line.split() for line in file]\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\na = random.dirichlet(np.ones(6) * 1000, size=1)\nb = 0\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\nX = X - np.mean(X)\nlambdas = [0.001, 0.01, 0.1, 1]\ndict_accuracy = {}\nfor lamb in lambdas:\n dict_accuracy[lamb] = []\ndict_a = {}\nfor lamb in lambdas:\n dict_a[lamb] = []\ndict_b = {}\nfor lamb in lambdas:\n dict_b[lamb] = []\na = 0\nb = 0\nfor lamb in lambdas:\n a = np.zeros(6)\n b = 0\n for epoch in range(50):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1\n )\n if epoch == 49:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n shuffle(X_train)\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n train_data = X_train[51:]\n train_test = y_train[51:]\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n for step in range(500):\n if step % 30 == 0:\n accuracy = get_accuracy(a, b, validation_train, validation_test\n )\n dict_accuracy[lamb].append(accuracy)\n dict_a[lamb].append(a)\n dict_b[lamb].append(b)\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[\n curr], train_test[curr]))\n b = b - step_size * -train_test[curr]\n<mask token>\nlamb = 0.001\na = random.dirichlet(np.ones(6) * 1000, size=1)\nb = 0\ndata = []\nwith open('train.txt') as file:\n data = [line.split() for line in file]\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\nX = X - np.mean(X)\nfor epoch in range(30):\n if epoch == 29:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n shuffle(X_train)\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n train_data = X_train[51:]\n train_test = y_train[51:]\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n for step in range(300):\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr],\n train_test[curr]))\n b = b - step_size * -train_test[curr]\ndata = []\nwith open('test.txt') as file:\n data = [line.split() for line in file]\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\nprediction = []\nfor k in X:\n numerical = np.array(k)\n estimate = numerical.dot(a.T) + b\n if estimate < 0:\n prediction.append('<=50K')\n else:\n prediction.append('>50K')\nindex_final = []\nfor i in range(len(prediction)):\n index_final.append([\"'\" + str(i) + \"'\", prediction[i]])\nwith open('output.csv', 'w') as csvfile:\n writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)\n writer.writerow(['Example', 'Label'])\n writer.writerows(index_final)\n",
"step-4": "import numpy as np\nfrom numpy import random\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cross_validation import train_test_split\nfrom numpy.random import shuffle\nimport matplotlib.pyplot as plt\nimport numpy.linalg as la\nimport sklearn.preprocessing as proc\nimport csv\n\n\ndef get_accuracy(a, b, X_test, y_test):\n size = len(y_test)\n count = 0\n for i in range(size):\n x = X_test[i]\n real = y_test[i]\n x = np.array(x)\n x = x.reshape(1, 6)\n prediction = x.dot(a.T) + b\n if prediction > 0 and real == 1:\n count += 1\n elif prediction < 0 and real == -1:\n count += 1\n return count / size\n\n\ndata = []\nwith open('train.txt') as file:\n data = [line.split() for line in file]\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\na = random.dirichlet(np.ones(6) * 1000, size=1)\nb = 0\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\nX = X - np.mean(X)\nlambdas = [0.001, 0.01, 0.1, 1]\ndict_accuracy = {}\nfor lamb in lambdas:\n dict_accuracy[lamb] = []\ndict_a = {}\nfor lamb in lambdas:\n dict_a[lamb] = []\ndict_b = {}\nfor lamb in lambdas:\n dict_b[lamb] = []\na = 0\nb = 0\nfor lamb in lambdas:\n a = np.zeros(6)\n b = 0\n for epoch in range(50):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1\n )\n if epoch == 49:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n shuffle(X_train)\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n train_data = X_train[51:]\n train_test = y_train[51:]\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n for step in range(500):\n if step % 30 == 0:\n accuracy = get_accuracy(a, b, validation_train, validation_test\n )\n dict_accuracy[lamb].append(accuracy)\n dict_a[lamb].append(a)\n dict_b[lamb].append(b)\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[\n curr], train_test[curr]))\n b = b - step_size * -train_test[curr]\n<mask token>\nlamb = 0.001\na = random.dirichlet(np.ones(6) * 1000, size=1)\nb = 0\ndata = []\nwith open('train.txt') as file:\n data = [line.split() for line in file]\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\nX = X - np.mean(X)\nfor epoch in range(30):\n if epoch == 29:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n shuffle(X_train)\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n train_data = X_train[51:]\n train_test = y_train[51:]\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n for step in range(300):\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr],\n train_test[curr]))\n b = b - step_size * -train_test[curr]\ndata = []\nwith open('test.txt') as file:\n data = [line.split() for line in file]\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]),\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\nprediction = []\nfor k in X:\n numerical = np.array(k)\n estimate = numerical.dot(a.T) + b\n if estimate < 0:\n prediction.append('<=50K')\n else:\n prediction.append('>50K')\nindex_final = []\nfor i in range(len(prediction)):\n index_final.append([\"'\" + str(i) + \"'\", prediction[i]])\nwith open('output.csv', 'w') as csvfile:\n writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)\n writer.writerow(['Example', 'Label'])\n writer.writerows(index_final)\n",
"step-5": "import numpy as np\nfrom numpy import random\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cross_validation import train_test_split\nfrom numpy.random import shuffle\nimport matplotlib.pyplot as plt\nimport numpy.linalg as la\nimport sklearn.preprocessing as proc\nimport csv\n\ndef get_accuracy(a, b, X_test, y_test):\n size = len(y_test)\n count = 0\n for i in range(size):\n x = X_test[i]\n real = y_test[i]\n\n x = np.array(x)\n x = x.reshape(1, 6)\n\n prediction = x.dot(a.T) + b\n\n if prediction > 0 and real == 1:\n count += 1\n elif prediction < 0 and real == -1:\n count += 1\n return count / size\n\ndata = []\nwith open(\"train.txt\") as file:\n data = [line.split() for line in file]\n\n\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]), \\\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\n\n\na = random.dirichlet(np.ones(6)*1000, size = 1)\nb = 0\n\n\n#scale X\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\n\nX = X - np.mean(X)\n\n#10% test data and 90% train data\n#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)\n\n\nlambdas = [0.001, 0.01, 0.1, 1]\n\ndict_accuracy = {}\nfor lamb in lambdas:\n dict_accuracy[lamb] = []\n\n\ndict_a = {}\nfor lamb in lambdas:\n dict_a[lamb] = []\n\ndict_b = {}\nfor lamb in lambdas:\n dict_b[lamb] = []\n\na = 0\nb = 0\n\nfor lamb in lambdas:\n\n #a = random.dirichlet(np.ones(6)*1000, size = 1)\n a = np.zeros(6)\n b = 0\n\n for epoch in range(50):\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)\n\n if epoch == 49:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n\n shuffle(X_train)\n\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n\n train_data = X_train[51:]\n train_test = y_train[51:]\n\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n\n for step in range(500):\n\n if step % 30 == 0:\n accuracy = get_accuracy(a, b, validation_train, validation_test)\n\n dict_accuracy[lamb].append(accuracy)\n dict_a[lamb].append(a)\n dict_b[lamb].append(b)\n\n # current index randomly chosen\n curr = random.randint(0, len(train_data))\n\n curr_train = np.array(train_data[curr])\n\n curr_train = curr_train.reshape(1, 6)\n\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr], train_test[curr]))\n b = b - (step_size * (-train_test[curr]))\n\n\n'''\n\nx_val = [i for i in range(1, 851)]\n\n# dict_accuracy\nfig = plt.figure()\nax1 = fig.add_subplot(411)\nax2 = fig.add_subplot(412)\nax3 = fig.add_subplot(413)\nax4 = fig.add_subplot(414)\n\nfig.tight_layout()\n\ny1 = dict_accuracy[0.001]\ny2 = dict_accuracy[0.01]\ny3 = dict_accuracy[0.1]\ny4 = dict_accuracy[1]\n\nax1.plot(x_val, y1, color='m')\nax2.plot(x_val, y2, color='g')\nax3.plot(x_val, y3, color='r')\nax4.plot(x_val, y4, color='b')\n\nax1.set_xlabel('lambda = 0.001')\nax2.set_xlabel('lambda = 0.01')\nax3.set_xlabel('lambda = 0.1')\nax4.set_xlabel('lambda = 1')\n\nplt.show()\n\n#########################################\n\na_norm = {}\nfor lamb in lambdas:\n a_norm[lamb] = []\n\nfor lamb in dict_a:\n curr_list = dict_a[lamb]\n for curr in curr_list:\n norm = la.norm(curr, 2)\n a_norm[lamb].append(norm)\n\nplt.plot(x_val, a_norm[0.001], label = 'lambda is 0.001', color = 'b')\nplt.plot(x_val, a_norm[0.01], label = 'lambda is 0.01', color = 'r')\nplt.plot(x_val, a_norm[0.1], label = 'lambda is 0.01', color = 'g')\nplt.plot(x_val, a_norm[1], label = 'lambda is 1', color = 'm')\nplt.legend()\nplt.show()\n\n\n'''\n\n\nlamb = 0.001\n\na = random.dirichlet(np.ones(6)*1000, size = 1)\n\n\nb = 0\n\ndata = []\nwith open(\"train.txt\") as file:\n data = [line.split() for line in file]\n\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]), \\\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n\n X.append(numerical)\n if line[14] == '<=50K':\n y.append(-1)\n else:\n y.append(1)\n\n\n#scale X\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\n\nX = X - np.mean(X)\n\nfor epoch in range(30):\n\n if epoch == 29:\n result = get_accuracy(a, b, X_test, y_test)\n print(str(lamb) + ' : ' + str(result))\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)\n\n shuffle(X_train)\n\n validation_train = X_train[0:50]\n validation_test = y_train[0:50]\n\n train_data = X_train[51:]\n train_test = y_train[51:]\n\n m = 1\n n = 50\n step_size = m / (0.01 * epoch + n)\n\n for step in range(300):\n # current index randomly chosen\n curr = random.randint(0, len(train_data))\n curr_train = np.array(train_data[curr])\n curr_train = curr_train.reshape(1, 6)\n curr_val = (curr_train.dot(a.T) + b) * train_test[curr]\n\n if curr_val >= 1:\n a = a - np.dot(a, lamb) * step_size\n else:\n a = a - step_size * (np.dot(a, lamb) - np.dot(train_data[curr], train_test[curr]))\n b = b - (step_size * (-train_test[curr]))\n\n\ndata = []\nwith open(\"test.txt\") as file:\n data = [line.split() for line in file]\n\nX = []\ny = []\nfor line in data:\n numerical = [int(line[0][:-1]), int(line[2][:-1]), int(line[4][:-1]), \\\n int(line[10][:-1]), int(line[11][:-1]), int(line[12][:-1])]\n X.append(numerical)\n\n\n\n\nprediction = []\nfor k in X:\n numerical = np.array(k)\n estimate = numerical.dot(a.T) + b\n #print(estimate)\n if estimate < 0:\n prediction.append('<=50K')\n else:\n prediction.append('>50K')\n\n\nindex_final = []\nfor i in range(len(prediction)):\n index_final.append([\"'\" + str(i) + \"'\", prediction[i]])\n\nwith open('output.csv', 'w') as csvfile:\n writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)\n writer.writerow(['Example', 'Label'])\n writer.writerows(index_final)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def get_first_timestamp(log_file):
with open(log_file) as f:
for line in f:
line_json = json.loads(line)
return line_json['timestamp']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
send_socket.connect(connect_to)
def get_first_timestamp(log_file):
with open(log_file) as f:
for line in f:
line_json = json.loads(line)
return line_json['timestamp']
<|reserved_special_token_0|>
with open(log_file) as f:
for line in f:
line_json = json.loads(line)
importer_age = time.time() - start_time_importer
line_age = line_json['timestamp'] - start_time_file
sleep_time = line_age - importer_age
if sleep_time > 0:
time.sleep(sleep_time)
print(line_json)
send_socket.send_json(line_json)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
log_file = './mavlink-log.txt'
zmq_context = zmq.Context()
connect_to = sys.argv[1]
send_socket = zmq_context.socket(zmq.PUSH)
send_socket.connect(connect_to)
def get_first_timestamp(log_file):
with open(log_file) as f:
for line in f:
line_json = json.loads(line)
return line_json['timestamp']
start_time_file = get_first_timestamp(log_file)
start_time_importer = time.time()
with open(log_file) as f:
for line in f:
line_json = json.loads(line)
importer_age = time.time() - start_time_importer
line_age = line_json['timestamp'] - start_time_file
sleep_time = line_age - importer_age
if sleep_time > 0:
time.sleep(sleep_time)
print(line_json)
send_socket.send_json(line_json)
<|reserved_special_token_1|>
import json
import sys
import time
import zmq
log_file = './mavlink-log.txt'
zmq_context = zmq.Context()
connect_to = sys.argv[1]
send_socket = zmq_context.socket(zmq.PUSH)
send_socket.connect(connect_to)
def get_first_timestamp(log_file):
with open(log_file) as f:
for line in f:
line_json = json.loads(line)
return line_json['timestamp']
start_time_file = get_first_timestamp(log_file)
start_time_importer = time.time()
with open(log_file) as f:
for line in f:
line_json = json.loads(line)
importer_age = time.time() - start_time_importer
line_age = line_json['timestamp'] - start_time_file
sleep_time = line_age - importer_age
if sleep_time > 0:
time.sleep(sleep_time)
print(line_json)
send_socket.send_json(line_json)
<|reserved_special_token_1|>
#!/usr/bin/env python3
import json
import sys
import time
import zmq
log_file = "./mavlink-log.txt"
zmq_context = zmq.Context()
connect_to = sys.argv[1]
send_socket = zmq_context.socket(zmq.PUSH)
send_socket.connect(connect_to)
def get_first_timestamp(log_file):
with open(log_file) as f:
for line in f:
line_json = json.loads(line)
return line_json["timestamp"]
start_time_file = get_first_timestamp(log_file)
start_time_importer = time.time()
with open(log_file) as f:
for line in f:
line_json = json.loads(line)
importer_age = time.time() - start_time_importer
line_age = line_json["timestamp"] - start_time_file
sleep_time = line_age - importer_age
if sleep_time > 0:
#print(str(line_age)+" - "+str(importer_age))
#print(sleep_time)
time.sleep(sleep_time)
print(line_json)
send_socket.send_json(line_json)
|
flexible
|
{
"blob_id": "49679782ac696b3dc4f5038565f88304a44098e1",
"index": 6188,
"step-1": "<mask token>\n\n\ndef get_first_timestamp(log_file):\n with open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n return line_json['timestamp']\n\n\n<mask token>\n",
"step-2": "<mask token>\nsend_socket.connect(connect_to)\n\n\ndef get_first_timestamp(log_file):\n with open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n return line_json['timestamp']\n\n\n<mask token>\nwith open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n importer_age = time.time() - start_time_importer\n line_age = line_json['timestamp'] - start_time_file\n sleep_time = line_age - importer_age\n if sleep_time > 0:\n time.sleep(sleep_time)\n print(line_json)\n send_socket.send_json(line_json)\n",
"step-3": "<mask token>\nlog_file = './mavlink-log.txt'\nzmq_context = zmq.Context()\nconnect_to = sys.argv[1]\nsend_socket = zmq_context.socket(zmq.PUSH)\nsend_socket.connect(connect_to)\n\n\ndef get_first_timestamp(log_file):\n with open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n return line_json['timestamp']\n\n\nstart_time_file = get_first_timestamp(log_file)\nstart_time_importer = time.time()\nwith open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n importer_age = time.time() - start_time_importer\n line_age = line_json['timestamp'] - start_time_file\n sleep_time = line_age - importer_age\n if sleep_time > 0:\n time.sleep(sleep_time)\n print(line_json)\n send_socket.send_json(line_json)\n",
"step-4": "import json\nimport sys\nimport time\nimport zmq\nlog_file = './mavlink-log.txt'\nzmq_context = zmq.Context()\nconnect_to = sys.argv[1]\nsend_socket = zmq_context.socket(zmq.PUSH)\nsend_socket.connect(connect_to)\n\n\ndef get_first_timestamp(log_file):\n with open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n return line_json['timestamp']\n\n\nstart_time_file = get_first_timestamp(log_file)\nstart_time_importer = time.time()\nwith open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n importer_age = time.time() - start_time_importer\n line_age = line_json['timestamp'] - start_time_file\n sleep_time = line_age - importer_age\n if sleep_time > 0:\n time.sleep(sleep_time)\n print(line_json)\n send_socket.send_json(line_json)\n",
"step-5": "#!/usr/bin/env python3\n\nimport json\nimport sys\nimport time\nimport zmq\n\nlog_file = \"./mavlink-log.txt\"\n\nzmq_context = zmq.Context()\n\nconnect_to = sys.argv[1]\nsend_socket = zmq_context.socket(zmq.PUSH)\nsend_socket.connect(connect_to)\n\ndef get_first_timestamp(log_file):\n\twith open(log_file) as f:\n\t\tfor line in f:\n\t\t\tline_json = json.loads(line)\n\t\t\treturn line_json[\"timestamp\"]\n\n\n\nstart_time_file = get_first_timestamp(log_file)\nstart_time_importer = time.time()\n\nwith open(log_file) as f:\n\tfor line in f:\n\t\tline_json = json.loads(line)\n\n\t\timporter_age = time.time() - start_time_importer\n\t\tline_age = line_json[\"timestamp\"] - start_time_file\n\n\t\tsleep_time = line_age - importer_age\n\n\t\tif sleep_time > 0:\n\t\t\t#print(str(line_age)+\" - \"+str(importer_age))\n\t\t\t#print(sleep_time)\n\t\t\ttime.sleep(sleep_time)\n\n\n\t\tprint(line_json)\n\t\tsend_socket.send_json(line_json)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
Desafios:
1: Crie um script python que leia o nome de uma pessoa e mostre uma mensagem de boas-vindas de acordo com o valor digitado.
Script:
Desafio 01:
1: Crie um script python que leia o nome de uma pessoa
e mostre uma mensagem de boas-vindas de acordo com o valor digitado."""
nome=input('Qual é o seu nome?')
print('Olá ',nome,'! Prazer em te conhecer!')
Retorno:
Python 3.6.9 (default, Nov 7 2019, 10:44:02)
[GCC 8.3.0] on linux
Type "help", "copyright", "credits" or "license()" for more information.
>>>
=== RESTART: /home/anderson/Área de Trabalho/scripts_python/desafio_01.py ===
Qual é o seu nome?Anderson
Olá Anderson ! Prazer em te conhecer!
>>>
|
normal
|
{
"blob_id": "80454a3935f0d42b5535440fc316af1b5598d8a1",
"index": 7090,
"step-1": "Desafios:\n1: Crie um script python que leia o nome de uma pessoa e mostre uma mensagem de boas-vindas de acordo com o valor digitado.\n\nScript:\nDesafio 01:\n1: Crie um script python que leia o nome de uma pessoa\ne mostre uma mensagem de boas-vindas de acordo com o valor digitado.\"\"\"\nnome=input('Qual é o seu nome?')\nprint('Olá ',nome,'! Prazer em te conhecer!')\n\nRetorno:\nPython 3.6.9 (default, Nov 7 2019, 10:44:02) \n[GCC 8.3.0] on linux\nType \"help\", \"copyright\", \"credits\" or \"license()\" for more information.\n>>> \n=== RESTART: /home/anderson/Área de Trabalho/scripts_python/desafio_01.py ===\nQual é o seu nome?Anderson\nOlá Anderson ! Prazer em te conhecer!\n>>> ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if DEBUG:
p = process('binary_100')
else:
p = remote('bamboofox.cs.nctu.edu.tw', 22001)
<|reserved_special_token_0|>
p.send(payload)
p.interactive()
p.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DEBUG = False
if DEBUG:
p = process('binary_100')
else:
p = remote('bamboofox.cs.nctu.edu.tw', 22001)
padding = 52 - 12
payload = padding * 'A' + p32(2882343476)
p.send(payload)
p.interactive()
p.close()
<|reserved_special_token_1|>
from pwn import *
DEBUG = False
if DEBUG:
p = process('binary_100')
else:
p = remote('bamboofox.cs.nctu.edu.tw', 22001)
padding = 52 - 12
payload = padding * 'A' + p32(2882343476)
p.send(payload)
p.interactive()
p.close()
<|reserved_special_token_1|>
from pwn import *
DEBUG = False
if DEBUG:
p = process("binary_100")
else:
p = remote("bamboofox.cs.nctu.edu.tw", 22001)
padding = 0x34 - 0xc
payload = padding * "A" + p32(0xabcd1234)
p.send(payload)
p.interactive()
p.close()
|
flexible
|
{
"blob_id": "fab75c5b55d85cef245fa6d7e04f4bf3a35e492c",
"index": 7068,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif DEBUG:\n p = process('binary_100')\nelse:\n p = remote('bamboofox.cs.nctu.edu.tw', 22001)\n<mask token>\np.send(payload)\np.interactive()\np.close()\n",
"step-3": "<mask token>\nDEBUG = False\nif DEBUG:\n p = process('binary_100')\nelse:\n p = remote('bamboofox.cs.nctu.edu.tw', 22001)\npadding = 52 - 12\npayload = padding * 'A' + p32(2882343476)\np.send(payload)\np.interactive()\np.close()\n",
"step-4": "from pwn import *\nDEBUG = False\nif DEBUG:\n p = process('binary_100')\nelse:\n p = remote('bamboofox.cs.nctu.edu.tw', 22001)\npadding = 52 - 12\npayload = padding * 'A' + p32(2882343476)\np.send(payload)\np.interactive()\np.close()\n",
"step-5": "from pwn import *\n\nDEBUG = False\n\nif DEBUG:\n p = process(\"binary_100\")\nelse:\n p = remote(\"bamboofox.cs.nctu.edu.tw\", 22001)\n\npadding = 0x34 - 0xc\n\npayload = padding * \"A\" + p32(0xabcd1234)\n\np.send(payload)\n\np.interactive()\np.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestFrame:
<|reserved_special_token_0|>
def teardown(self):
self.driver.quit()
def test_frame(self):
self.driver.switch_to.frame('iframeResult')
action = ActionChains(self.driver)
drag = self.driver.find_element_by_id('draggable')
drop = self.driver.find_element_by_id('droppable')
action.drag_and_drop(drag, drop).perform()
sleep(2)
self.driver.switch_to.alert.accept()
self.driver.switch_to.default_content()
self.driver.find_element_by_id('submitBTN').click()
sleep(3)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestFrame:
def setup(self):
self.driver = webdriver.Chrome()
self.driver.get(
'https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'
)
self.driver.implicitly_wait(5)
self.driver.maximize_window()
def teardown(self):
self.driver.quit()
def test_frame(self):
self.driver.switch_to.frame('iframeResult')
action = ActionChains(self.driver)
drag = self.driver.find_element_by_id('draggable')
drop = self.driver.find_element_by_id('droppable')
action.drag_and_drop(drag, drop).perform()
sleep(2)
self.driver.switch_to.alert.accept()
self.driver.switch_to.default_content()
self.driver.find_element_by_id('submitBTN').click()
sleep(3)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestFrame:
def setup(self):
self.driver = webdriver.Chrome()
self.driver.get(
'https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'
)
self.driver.implicitly_wait(5)
self.driver.maximize_window()
def teardown(self):
self.driver.quit()
def test_frame(self):
self.driver.switch_to.frame('iframeResult')
action = ActionChains(self.driver)
drag = self.driver.find_element_by_id('draggable')
drop = self.driver.find_element_by_id('droppable')
action.drag_and_drop(drag, drop).perform()
sleep(2)
self.driver.switch_to.alert.accept()
self.driver.switch_to.default_content()
self.driver.find_element_by_id('submitBTN').click()
sleep(3)
if __name__ == '__main__':
pytest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pytest
from selenium import webdriver
from time import sleep
from selenium.webdriver import ActionChains
class TestFrame:
def setup(self):
self.driver = webdriver.Chrome()
self.driver.get(
'https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'
)
self.driver.implicitly_wait(5)
self.driver.maximize_window()
def teardown(self):
self.driver.quit()
def test_frame(self):
self.driver.switch_to.frame('iframeResult')
action = ActionChains(self.driver)
drag = self.driver.find_element_by_id('draggable')
drop = self.driver.find_element_by_id('droppable')
action.drag_and_drop(drag, drop).perform()
sleep(2)
self.driver.switch_to.alert.accept()
self.driver.switch_to.default_content()
self.driver.find_element_by_id('submitBTN').click()
sleep(3)
if __name__ == '__main__':
pytest.main()
<|reserved_special_token_1|>
"""
实战练习:
1.打开网页
https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable
2.操作窗口右侧页面,将元素1拖拽到元素2
3.这时候会有一个alert弹框,点击弹框中的‘确定’
3.然后再按’点击运行’
4.关闭网页
"""
import pytest
from selenium import webdriver
from time import sleep
from selenium.webdriver import ActionChains
class TestFrame:
def setup(self):
self.driver = webdriver.Chrome()
self.driver.get("https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable")
self.driver.implicitly_wait(5)
self.driver.maximize_window()
def teardown(self):
self.driver.quit()
def test_frame(self):
# 检查要打印的元素,可以发现他们属于iframe元素,也就是需要先使用switch_to.frame("新frame的id")切换到对应的frame页
self.driver.switch_to.frame("iframeResult")
# 拖拽需要调用ActionChains方法
action=ActionChains(self.driver)
drag=self.driver.find_element_by_id("draggable")
drop=self.driver.find_element_by_id("droppable")
action.drag_and_drop(drag,drop).perform()
sleep(2)
# 拖拽完成后会弹出一个alert弹框,所以需要切换到alert,并调用.accept()进行确认操作
self.driver.switch_to.alert.accept()
# 点击确认后,alert弹框消失,默认还是在拖拽的iframe页面,接下来要点击运行,所以要再次进行切换
self.driver.switch_to.default_content() # 切换到默认frame,第一种方式
#self.driver.switch_to.parent_frame() # 切换到父frame第二种方式,两种方式都可以
self.driver.find_element_by_id("submitBTN").click()
sleep(3)
if __name__ == '__main__':
pytest.main()
|
flexible
|
{
"blob_id": "74843dea00a88513c3a9237eb024e1e14e8b1ff8",
"index": 3088,
"step-1": "<mask token>\n\n\nclass TestFrame:\n <mask token>\n\n def teardown(self):\n self.driver.quit()\n\n def test_frame(self):\n self.driver.switch_to.frame('iframeResult')\n action = ActionChains(self.driver)\n drag = self.driver.find_element_by_id('draggable')\n drop = self.driver.find_element_by_id('droppable')\n action.drag_and_drop(drag, drop).perform()\n sleep(2)\n self.driver.switch_to.alert.accept()\n self.driver.switch_to.default_content()\n self.driver.find_element_by_id('submitBTN').click()\n sleep(3)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestFrame:\n\n def setup(self):\n self.driver = webdriver.Chrome()\n self.driver.get(\n 'https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'\n )\n self.driver.implicitly_wait(5)\n self.driver.maximize_window()\n\n def teardown(self):\n self.driver.quit()\n\n def test_frame(self):\n self.driver.switch_to.frame('iframeResult')\n action = ActionChains(self.driver)\n drag = self.driver.find_element_by_id('draggable')\n drop = self.driver.find_element_by_id('droppable')\n action.drag_and_drop(drag, drop).perform()\n sleep(2)\n self.driver.switch_to.alert.accept()\n self.driver.switch_to.default_content()\n self.driver.find_element_by_id('submitBTN').click()\n sleep(3)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestFrame:\n\n def setup(self):\n self.driver = webdriver.Chrome()\n self.driver.get(\n 'https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'\n )\n self.driver.implicitly_wait(5)\n self.driver.maximize_window()\n\n def teardown(self):\n self.driver.quit()\n\n def test_frame(self):\n self.driver.switch_to.frame('iframeResult')\n action = ActionChains(self.driver)\n drag = self.driver.find_element_by_id('draggable')\n drop = self.driver.find_element_by_id('droppable')\n action.drag_and_drop(drag, drop).perform()\n sleep(2)\n self.driver.switch_to.alert.accept()\n self.driver.switch_to.default_content()\n self.driver.find_element_by_id('submitBTN').click()\n sleep(3)\n\n\nif __name__ == '__main__':\n pytest.main()\n",
"step-4": "<mask token>\nimport pytest\nfrom selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver import ActionChains\n\n\nclass TestFrame:\n\n def setup(self):\n self.driver = webdriver.Chrome()\n self.driver.get(\n 'https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'\n )\n self.driver.implicitly_wait(5)\n self.driver.maximize_window()\n\n def teardown(self):\n self.driver.quit()\n\n def test_frame(self):\n self.driver.switch_to.frame('iframeResult')\n action = ActionChains(self.driver)\n drag = self.driver.find_element_by_id('draggable')\n drop = self.driver.find_element_by_id('droppable')\n action.drag_and_drop(drag, drop).perform()\n sleep(2)\n self.driver.switch_to.alert.accept()\n self.driver.switch_to.default_content()\n self.driver.find_element_by_id('submitBTN').click()\n sleep(3)\n\n\nif __name__ == '__main__':\n pytest.main()\n",
"step-5": "\"\"\"\n实战练习:\n1.打开网页\nhttps://www.runoob.com/try/try.php?filename=jqueryui-api-droppable\n2.操作窗口右侧页面,将元素1拖拽到元素2\n3.这时候会有一个alert弹框,点击弹框中的‘确定’\n3.然后再按’点击运行’\n4.关闭网页\n\"\"\"\nimport pytest\nfrom selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver import ActionChains\nclass TestFrame:\n def setup(self):\n self.driver = webdriver.Chrome()\n self.driver.get(\"https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable\")\n self.driver.implicitly_wait(5)\n self.driver.maximize_window()\n def teardown(self):\n self.driver.quit()\n def test_frame(self):\n # 检查要打印的元素,可以发现他们属于iframe元素,也就是需要先使用switch_to.frame(\"新frame的id\")切换到对应的frame页\n self.driver.switch_to.frame(\"iframeResult\")\n # 拖拽需要调用ActionChains方法\n action=ActionChains(self.driver)\n drag=self.driver.find_element_by_id(\"draggable\")\n drop=self.driver.find_element_by_id(\"droppable\")\n action.drag_and_drop(drag,drop).perform()\n sleep(2)\n # 拖拽完成后会弹出一个alert弹框,所以需要切换到alert,并调用.accept()进行确认操作\n self.driver.switch_to.alert.accept()\n # 点击确认后,alert弹框消失,默认还是在拖拽的iframe页面,接下来要点击运行,所以要再次进行切换\n self.driver.switch_to.default_content() # 切换到默认frame,第一种方式\n #self.driver.switch_to.parent_frame() # 切换到父frame第二种方式,两种方式都可以\n self.driver.find_element_by_id(\"submitBTN\").click()\n sleep(3)\nif __name__ == '__main__':\n pytest.main()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from .models import RecommendedArtifact
from .serializers import RecommendedArtifactSerialize
from rest_framework.decorators import api_view
from rest_framework.response import Response
from datetime import datetime
import requests, bs4
# constant value
service_key = "{jo's museum key}"
@api_view(['GET'])
def artifact_save_recommend(request,pageNo):
# 1. 페이지 선정 및 페이지 내 모든 유물 정보 가져오기
artifact_url = f"http://www.emuseum.go.kr/openapi/relic/list?serviceKey={service_key}&numOfRows=100&pageNo={pageNo}"
#http://www.emuseum.go.kr/openapi/relic/list?serviceKey=DLuSbLjmCJIDKmhoSB7ELx3eVXXxg9ZBqh9oC8/eFWTcq2gDMqfQA7jrooSkvzWgYv/pd9a6fUJKG40K3VQXHg==&numOfRows=100&pageNo=1
response = requests.get(artifact_url)
response_dict = bs4.BeautifulSoup(response.content, 'html.parser')
search_list = []
for data in response_dict.findAll('data'):
for item in data.findAll('item'):
if item['key'] == 'id':
id_num = item['value']
search_list.append(id_num)
# 2-1. 변수설정
detail_list = []
dataDict = {
'id_num': '',
'name': '',
'desc': '',
'museum_name': '',
'nationality_name': '',
'image_uri': '',
}
# 2-2. 모든 유물에서 desc있나 파악하기
for i in range(len(search_list)):
artifact_num = search_list[i]
artifact_url = f"http://www.emuseum.go.kr/openapi/relic/detail?serviceKey={service_key}&id={artifact_num}"
# http://www.emuseum.go.kr/openapi/relic/detail?serviceKey=DLuSbLjmCJIDKmhoSB7ELx3eVXXxg9ZBqh9oC8/eFWTcq2gDMqfQA7jrooSkvzWgYv/pd9a6fUJKG40K3VQXHg==&id=PS0100100100100021500000
response = requests.get(artifact_url)
response_dict = bs4.BeautifulSoup(response.content, 'html.parser')
for data in response_dict.findAll('list'):
for item in data.findAll('item'):
if item['key'] == 'id':
dataDict['id_num'] = item['value']
elif item['key'] == 'desc':
dataDict['desc'] = item['value']
elif item['key'] == 'nameKr':
dataDict['name'] = item['value']
elif item['key'] == 'nationalityName2':
dataDict['nationality_name'] = item['value']
elif item['key'] == 'museumName2':
dataDict['museum_name'] = item['value']
elif item['key'] == 'imgThumUriM':
dataDict['image_uri'] = item['value']
# 2-3 db에 저장하기
if dataDict['desc'] != '':
serializer = RecommendedArtifactSerialize(data=dataDict)
if serializer.is_valid(raise_exception=True):
serializer.save()
dataDict = {
'id_num': '',
'name': '',
'desc': '',
'museum_name': '',
'nationality_name': '',
'image_uri': '',
}
return Response(serializer.data)
@api_view(['GET'])
def artifact_recommend(request):
## 오늘은 며칠째인가요??
now = datetime.now()
nowYear = now.year
nowMonth = now.month
nowDay = now.day
daySum = 0
if nowYear%4==0 and nowYear%100!=0 or nowYear%400==0:
month = [31,29,31,30,31,30,31,31,30,31,30,31]
else:
month = [31,28,31,30,31,30,31,31,30,31,30,31]
for i in range(nowMonth-1):
daySum += month[i]
daySum += nowDay
Recommended_list = RecommendedArtifact.objects.all()
Recommended_artifact = Recommended_list[daySum]
dataDict = {
'id_num': Recommended_artifact.id_num,
'name': Recommended_artifact.name,
'desc': Recommended_artifact.desc,
'museum_name': Recommended_artifact.museum_name,
'nationality_name': Recommended_artifact.nationality_name,
'image_uri': Recommended_artifact.image_uri,
}
# print(Recommended_artifact.name)
return Response(dataDict)
|
normal
|
{
"blob_id": "707e3e60d6d9a3db5b9bc733e912b34e2cec5974",
"index": 8585,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@api_view(['GET'])\ndef artifact_save_recommend(request, pageNo):\n artifact_url = (\n f'http://www.emuseum.go.kr/openapi/relic/list?serviceKey={service_key}&numOfRows=100&pageNo={pageNo}'\n )\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n search_list = []\n for data in response_dict.findAll('data'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n id_num = item['value']\n search_list.append(id_num)\n detail_list = []\n dataDict = {'id_num': '', 'name': '', 'desc': '', 'museum_name': '',\n 'nationality_name': '', 'image_uri': ''}\n for i in range(len(search_list)):\n artifact_num = search_list[i]\n artifact_url = (\n f'http://www.emuseum.go.kr/openapi/relic/detail?serviceKey={service_key}&id={artifact_num}'\n )\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n for data in response_dict.findAll('list'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n dataDict['id_num'] = item['value']\n elif item['key'] == 'desc':\n dataDict['desc'] = item['value']\n elif item['key'] == 'nameKr':\n dataDict['name'] = item['value']\n elif item['key'] == 'nationalityName2':\n dataDict['nationality_name'] = item['value']\n elif item['key'] == 'museumName2':\n dataDict['museum_name'] = item['value']\n elif item['key'] == 'imgThumUriM':\n dataDict['image_uri'] = item['value']\n if dataDict['desc'] != '':\n serializer = RecommendedArtifactSerialize(data=dataDict)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n dataDict = {'id_num': '', 'name': '', 'desc': '',\n 'museum_name': '', 'nationality_name': '', 'image_uri': ''}\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef artifact_recommend(request):\n now = datetime.now()\n nowYear = now.year\n nowMonth = now.month\n nowDay = now.day\n daySum = 0\n if nowYear % 4 == 0 and nowYear % 100 != 0 or nowYear % 400 == 0:\n month = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n else:\n month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n for i in range(nowMonth - 1):\n daySum += month[i]\n daySum += nowDay\n Recommended_list = RecommendedArtifact.objects.all()\n Recommended_artifact = Recommended_list[daySum]\n dataDict = {'id_num': Recommended_artifact.id_num, 'name':\n Recommended_artifact.name, 'desc': Recommended_artifact.desc,\n 'museum_name': Recommended_artifact.museum_name, 'nationality_name':\n Recommended_artifact.nationality_name, 'image_uri':\n Recommended_artifact.image_uri}\n return Response(dataDict)\n",
"step-3": "<mask token>\nservice_key = \"{jo's museum key}\"\n\n\n@api_view(['GET'])\ndef artifact_save_recommend(request, pageNo):\n artifact_url = (\n f'http://www.emuseum.go.kr/openapi/relic/list?serviceKey={service_key}&numOfRows=100&pageNo={pageNo}'\n )\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n search_list = []\n for data in response_dict.findAll('data'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n id_num = item['value']\n search_list.append(id_num)\n detail_list = []\n dataDict = {'id_num': '', 'name': '', 'desc': '', 'museum_name': '',\n 'nationality_name': '', 'image_uri': ''}\n for i in range(len(search_list)):\n artifact_num = search_list[i]\n artifact_url = (\n f'http://www.emuseum.go.kr/openapi/relic/detail?serviceKey={service_key}&id={artifact_num}'\n )\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n for data in response_dict.findAll('list'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n dataDict['id_num'] = item['value']\n elif item['key'] == 'desc':\n dataDict['desc'] = item['value']\n elif item['key'] == 'nameKr':\n dataDict['name'] = item['value']\n elif item['key'] == 'nationalityName2':\n dataDict['nationality_name'] = item['value']\n elif item['key'] == 'museumName2':\n dataDict['museum_name'] = item['value']\n elif item['key'] == 'imgThumUriM':\n dataDict['image_uri'] = item['value']\n if dataDict['desc'] != '':\n serializer = RecommendedArtifactSerialize(data=dataDict)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n dataDict = {'id_num': '', 'name': '', 'desc': '',\n 'museum_name': '', 'nationality_name': '', 'image_uri': ''}\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef artifact_recommend(request):\n now = datetime.now()\n nowYear = now.year\n nowMonth = now.month\n nowDay = now.day\n daySum = 0\n if nowYear % 4 == 0 and nowYear % 100 != 0 or nowYear % 400 == 0:\n month = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n else:\n month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n for i in range(nowMonth - 1):\n daySum += month[i]\n daySum += nowDay\n Recommended_list = RecommendedArtifact.objects.all()\n Recommended_artifact = Recommended_list[daySum]\n dataDict = {'id_num': Recommended_artifact.id_num, 'name':\n Recommended_artifact.name, 'desc': Recommended_artifact.desc,\n 'museum_name': Recommended_artifact.museum_name, 'nationality_name':\n Recommended_artifact.nationality_name, 'image_uri':\n Recommended_artifact.image_uri}\n return Response(dataDict)\n",
"step-4": "from .models import RecommendedArtifact\nfrom .serializers import RecommendedArtifactSerialize\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom datetime import datetime\nimport requests, bs4\nservice_key = \"{jo's museum key}\"\n\n\n@api_view(['GET'])\ndef artifact_save_recommend(request, pageNo):\n artifact_url = (\n f'http://www.emuseum.go.kr/openapi/relic/list?serviceKey={service_key}&numOfRows=100&pageNo={pageNo}'\n )\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n search_list = []\n for data in response_dict.findAll('data'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n id_num = item['value']\n search_list.append(id_num)\n detail_list = []\n dataDict = {'id_num': '', 'name': '', 'desc': '', 'museum_name': '',\n 'nationality_name': '', 'image_uri': ''}\n for i in range(len(search_list)):\n artifact_num = search_list[i]\n artifact_url = (\n f'http://www.emuseum.go.kr/openapi/relic/detail?serviceKey={service_key}&id={artifact_num}'\n )\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n for data in response_dict.findAll('list'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n dataDict['id_num'] = item['value']\n elif item['key'] == 'desc':\n dataDict['desc'] = item['value']\n elif item['key'] == 'nameKr':\n dataDict['name'] = item['value']\n elif item['key'] == 'nationalityName2':\n dataDict['nationality_name'] = item['value']\n elif item['key'] == 'museumName2':\n dataDict['museum_name'] = item['value']\n elif item['key'] == 'imgThumUriM':\n dataDict['image_uri'] = item['value']\n if dataDict['desc'] != '':\n serializer = RecommendedArtifactSerialize(data=dataDict)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n dataDict = {'id_num': '', 'name': '', 'desc': '',\n 'museum_name': '', 'nationality_name': '', 'image_uri': ''}\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef artifact_recommend(request):\n now = datetime.now()\n nowYear = now.year\n nowMonth = now.month\n nowDay = now.day\n daySum = 0\n if nowYear % 4 == 0 and nowYear % 100 != 0 or nowYear % 400 == 0:\n month = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n else:\n month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n for i in range(nowMonth - 1):\n daySum += month[i]\n daySum += nowDay\n Recommended_list = RecommendedArtifact.objects.all()\n Recommended_artifact = Recommended_list[daySum]\n dataDict = {'id_num': Recommended_artifact.id_num, 'name':\n Recommended_artifact.name, 'desc': Recommended_artifact.desc,\n 'museum_name': Recommended_artifact.museum_name, 'nationality_name':\n Recommended_artifact.nationality_name, 'image_uri':\n Recommended_artifact.image_uri}\n return Response(dataDict)\n",
"step-5": "from .models import RecommendedArtifact\nfrom .serializers import RecommendedArtifactSerialize\nfrom rest_framework.decorators import api_view \nfrom rest_framework.response import Response\nfrom datetime import datetime\nimport requests, bs4\n\n# constant value\nservice_key = \"{jo's museum key}\"\n\n@api_view(['GET'])\ndef artifact_save_recommend(request,pageNo):\n \n # 1. 페이지 선정 및 페이지 내 모든 유물 정보 가져오기\n artifact_url = f\"http://www.emuseum.go.kr/openapi/relic/list?serviceKey={service_key}&numOfRows=100&pageNo={pageNo}\"\n #http://www.emuseum.go.kr/openapi/relic/list?serviceKey=DLuSbLjmCJIDKmhoSB7ELx3eVXXxg9ZBqh9oC8/eFWTcq2gDMqfQA7jrooSkvzWgYv/pd9a6fUJKG40K3VQXHg==&numOfRows=100&pageNo=1\n\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n search_list = []\n\n for data in response_dict.findAll('data'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n id_num = item['value']\n search_list.append(id_num)\n\n # 2-1. 변수설정\n detail_list = []\n dataDict = {\n 'id_num': '',\n 'name': '',\n 'desc': '',\n 'museum_name': '',\n 'nationality_name': '',\n 'image_uri': '',\n }\n\n # 2-2. 모든 유물에서 desc있나 파악하기\n for i in range(len(search_list)):\n artifact_num = search_list[i]\n artifact_url = f\"http://www.emuseum.go.kr/openapi/relic/detail?serviceKey={service_key}&id={artifact_num}\"\n # http://www.emuseum.go.kr/openapi/relic/detail?serviceKey=DLuSbLjmCJIDKmhoSB7ELx3eVXXxg9ZBqh9oC8/eFWTcq2gDMqfQA7jrooSkvzWgYv/pd9a6fUJKG40K3VQXHg==&id=PS0100100100100021500000\n \n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n\n for data in response_dict.findAll('list'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n dataDict['id_num'] = item['value']\n\n elif item['key'] == 'desc':\n dataDict['desc'] = item['value']\n\n elif item['key'] == 'nameKr':\n dataDict['name'] = item['value']\n\n elif item['key'] == 'nationalityName2':\n dataDict['nationality_name'] = item['value']\n\n elif item['key'] == 'museumName2':\n dataDict['museum_name'] = item['value']\n\n elif item['key'] == 'imgThumUriM':\n dataDict['image_uri'] = item['value']\n\n # 2-3 db에 저장하기\n if dataDict['desc'] != '':\n serializer = RecommendedArtifactSerialize(data=dataDict)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n dataDict = {\n 'id_num': '',\n 'name': '',\n 'desc': '',\n 'museum_name': '',\n 'nationality_name': '',\n 'image_uri': '',\n } \n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef artifact_recommend(request):\n ## 오늘은 며칠째인가요??\n now = datetime.now()\n nowYear = now.year\n nowMonth = now.month\n nowDay = now.day\n daySum = 0\n\n if nowYear%4==0 and nowYear%100!=0 or nowYear%400==0:\n month = [31,29,31,30,31,30,31,31,30,31,30,31]\n else:\n month = [31,28,31,30,31,30,31,31,30,31,30,31]\n \n for i in range(nowMonth-1):\n daySum += month[i]\n\n daySum += nowDay\n\n Recommended_list = RecommendedArtifact.objects.all()\n Recommended_artifact = Recommended_list[daySum]\n dataDict = {\n 'id_num': Recommended_artifact.id_num,\n 'name': Recommended_artifact.name,\n 'desc': Recommended_artifact.desc,\n 'museum_name': Recommended_artifact.museum_name,\n 'nationality_name': Recommended_artifact.nationality_name,\n 'image_uri': Recommended_artifact.image_uri,\n } \n # print(Recommended_artifact.name)\n\n return Response(dataDict)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import tensorflow as tf
from util.helper import focal_loss
from util.helper import conv_elu_bn
from util.helper import deconv_elu_bn
from util.helper import residual_block_elu
from util.helper import conv_elu
from util.helper import conv
from util.helper import reg_l1_loss
from util.helper import conv_bn
from util.helper import deconv
from util.helper import max_pool2d
from util.helper import upsample_layer
from util.helper import hourglass_module
from util.helper import conv_block
from util.helper import bottlenect_block_v1
from util.helper import pyramid_pooling_block
# 0 cat , 1 dog,
class model_objectdetection_ppm_centernet_v1:
def __init__(self, sess, class_count):
self.sess = sess
self.class_count = class_count
self.up_sample_rate = 1
self.feature_channels = 32
#self.hourglass_channel = 32
with tf.variable_scope('CenterNet'):
self._build_net()
def _build_net(self):
self.learning_rate_tensor = tf.compat.v1.placeholder(tf.float32, shape=[], name='learning_rate')
print(self.learning_rate_tensor)
self.X = tf.compat.v1.placeholder(tf.float32, [None, 512, 512, 3], name='X')
print(self.X)
self.keep_layer = tf.compat.v1.placeholder(tf.bool, name='phase')
print(self.keep_layer)
self.Y = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, self.class_count], 'Y')
self.SIZE = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, 2], 'Y')
print(self.Y)
## Batch , Height , Width, Class
#X_input = tf.compat.v1.reshape(self.X, [-1, 512, 512, 3])
#Y_input = tf.compat.v1.reshape(self.Y, [-1, 128, 128, self.class_count])
# 512 512 -> 256x 256
with tf.variable_scope('downsamples'):
stage_1_1 = conv_block(self.X, conv_type='conv', filters=16, kernel_size=3, strides=2, training=self.keep_layer)
stage_1_2 = conv_block(stage_1_1, conv_type='ds', filters=32, kernel_size=3, strides=2, training=self.keep_layer)
stage_1_3 = conv_block(stage_1_2, conv_type='ds', filters=64, kernel_size=3, strides=2, training=self.keep_layer)
with tf.variable_scope('feature_extraction'):
feature1 = bottlenect_block_v1(inputs=stage_1_3, filters=64, kernel_size=3, upsample_rate=2, strides=2, repeat=2, training=self.keep_layer, name='residual1')
feature2 = bottlenect_block_v1(inputs=feature1, filters=64, kernel_size=3, upsample_rate=2, strides=2, repeat=2, training=self.keep_layer, name='residual2')
feature3 = bottlenect_block_v1(inputs=feature2, filters=32, kernel_size=3, upsample_rate=2, strides=1, repeat=2, training=self.keep_layer, name='residual3')
with tf.variable_scope('pyramid_pooling'):
pyramid = pyramid_pooling_block(feature3, kernel_size=32, input_width=32, input_height=32, bin_sizes=[2, 4, 6, 8])
with tf.variable_scope('featurefuse'):
feature_fuse_layer1 = conv_block(stage_1_3, conv_type='conv', filters=160, kernel_size=1, strides=1, training=self.keep_layer)
print('test',feature_fuse_layer1)
feature_fuse_layer2 = upsample_layer(pyramid, [128, 128])
depthwise_filter = tf.compat.v1.get_variable('feature_fuse_layer2', [3, 3, 32 * 5, 1], initializer=tf.compat.v1.variance_scaling_initializer())
feature_fuse_layer2 = tf.compat.v1.nn.depthwise_conv2d(input=feature_fuse_layer2, filter=depthwise_filter, strides=[1, 1, 1, 1], padding='SAME')
print('feature_deptiwise conv=', feature_fuse_layer2)
feature_fuse_layer2 = tf.compat.v1.layers.batch_normalization(feature_fuse_layer2, scale=True, center=True, momentum=0.9, training=self.keep_layer)
feature_fuse_layer2 = tf.compat.v1.nn.relu(feature_fuse_layer2)
feature_fuse_layer2 = tf.compat.v1.layers.conv2d(inputs=feature_fuse_layer2, filters=1, kernel_size=1, strides=1, padding='same', kernel_initializer=tf.compat.v1.variance_scaling_initializer())
final_feature = feature_fuse_layer2 + feature_fuse_layer1
final_feature = tf.compat.v1.layers.batch_normalization(final_feature, scale=True, center=True, momentum=0.9, training=self.keep_layer)
final_feature = tf.compat.v1.nn.relu(final_feature)
with tf.variable_scope('classifier'):
classifiter = conv_block(final_feature, conv_type='ds', filters=64, kernel_size=3, strides=1, training=self.keep_layer)
#classifiter = conv_block(classifiter, conv_type='ds', filters=64, kernel_size=3, strides=1, training=self.keep_layer)
print("=== network structure ===")
with tf.variable_scope("detector"):
#self.cls = conv_elu_bn(feature_fuse_layer2, filters=self.feature_channels, training=self.keep_layer, kernel_size=3, strides=1, name='detector_convelu1')
self.cls = conv(classifiter, filters=self.class_count, kernel_size=1, strides=1, name='detector_conv1')
self.cls = tf.compat.v1.nn.sigmoid(self.cls, name="heatmap")
#self.size = conv_elu_bn(feature_fuse_layer2, filters=self.feature_channels, training=self.keep_layer, kernel_size=3, strides=1, name='detector_convelu2')
self.size = conv(classifiter, filters=2, kernel_size=1, strides=1, name='detector_conv2')
self.size = tf.compat.v1.nn.relu(self.size, name='sizemap')
print("heatmap sigmoid=", self.cls)
self.output = self.cls;
print("=== network structure ===")
self.heatmap_loss = focal_loss(self.output, self.Y)
self.size_loss = reg_l1_loss(self.size, self.SIZE)
self.cost = self.heatmap_loss + 0.1 * self.size_loss
# define cost/loss & optimizer
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
with tf.compat.v1.control_dependencies(update_ops):
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.learning_rate_tensor).minimize(self.cost, name='AdamMinimize')
print("==============Node Name List==============")
print("learning rate tensor : ", self.learning_rate_tensor)
print("Input Node Name : ", self.X)
print("Output 4 Train Node Name : ", self.Y)
print("Phase Node Name", self.keep_layer)
print("Output Node Name (heatmap) : ", self.output)
print("Output Node Name (sizemap) : ", self.size)
print("Cost Function Node Name : ", self.cost)
print("Run this operation for a train step :", self.optimizer.name)
print("==============Node Name List==============")
def predict(self, x_test, keep_prop=False):
return self.sess.run([self.output, self.size], feed_dict={self.X: x_test, self.keep_layer: keep_prop})
def get_cost(self, x_test, y_test, y_size, keep_prop=False):
# print(self.sess.run(self.output, feed_dict={self.X: x_test, self.keep_layer: keep_prop}))
return self.sess.run(self.cost, feed_dict={self.X: x_test, self.Y: y_test, self.SIZE:y_size, self.keep_layer: keep_prop})
def train(self, x_data, y_data, y_size, keep_prop=True, learn_rate=0.003):
return self.sess.run(self.optimizer, feed_dict={self.X: x_data, self.Y: y_data, self.SIZE:y_size, self.keep_layer: keep_prop, self.learning_rate_tensor: learn_rate})
|
normal
|
{
"blob_id": "e24a62f2a3ff0122922f472a7b37f1773dfe9c11",
"index": 7605,
"step-1": "<mask token>\n\n\nclass model_objectdetection_ppm_centernet_v1:\n <mask token>\n\n def _build_net(self):\n self.learning_rate_tensor = tf.compat.v1.placeholder(tf.float32,\n shape=[], name='learning_rate')\n print(self.learning_rate_tensor)\n self.X = tf.compat.v1.placeholder(tf.float32, [None, 512, 512, 3],\n name='X')\n print(self.X)\n self.keep_layer = tf.compat.v1.placeholder(tf.bool, name='phase')\n print(self.keep_layer)\n self.Y = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, self\n .class_count], 'Y')\n self.SIZE = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, 2\n ], 'Y')\n print(self.Y)\n with tf.variable_scope('downsamples'):\n stage_1_1 = conv_block(self.X, conv_type='conv', filters=16,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_2 = conv_block(stage_1_1, conv_type='ds', filters=32,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_3 = conv_block(stage_1_2, conv_type='ds', filters=64,\n kernel_size=3, strides=2, training=self.keep_layer)\n with tf.variable_scope('feature_extraction'):\n feature1 = bottlenect_block_v1(inputs=stage_1_3, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual1')\n feature2 = bottlenect_block_v1(inputs=feature1, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual2')\n feature3 = bottlenect_block_v1(inputs=feature2, filters=32,\n kernel_size=3, upsample_rate=2, strides=1, repeat=2,\n training=self.keep_layer, name='residual3')\n with tf.variable_scope('pyramid_pooling'):\n pyramid = pyramid_pooling_block(feature3, kernel_size=32,\n input_width=32, input_height=32, bin_sizes=[2, 4, 6, 8])\n with tf.variable_scope('featurefuse'):\n feature_fuse_layer1 = conv_block(stage_1_3, conv_type='conv',\n filters=160, kernel_size=1, strides=1, training=self.keep_layer\n )\n print('test', feature_fuse_layer1)\n feature_fuse_layer2 = upsample_layer(pyramid, [128, 128])\n depthwise_filter = tf.compat.v1.get_variable('feature_fuse_layer2',\n [3, 3, 32 * 5, 1], initializer=tf.compat.v1.\n variance_scaling_initializer())\n feature_fuse_layer2 = tf.compat.v1.nn.depthwise_conv2d(input=\n feature_fuse_layer2, filter=depthwise_filter, strides=[1, 1,\n 1, 1], padding='SAME')\n print('feature_deptiwise conv=', feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.batch_normalization(\n feature_fuse_layer2, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n feature_fuse_layer2 = tf.compat.v1.nn.relu(feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.conv2d(inputs=\n feature_fuse_layer2, filters=1, kernel_size=1, strides=1,\n padding='same', kernel_initializer=tf.compat.v1.\n variance_scaling_initializer())\n final_feature = feature_fuse_layer2 + feature_fuse_layer1\n final_feature = tf.compat.v1.layers.batch_normalization(\n final_feature, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n final_feature = tf.compat.v1.nn.relu(final_feature)\n with tf.variable_scope('classifier'):\n classifiter = conv_block(final_feature, conv_type='ds', filters\n =64, kernel_size=3, strides=1, training=self.keep_layer)\n print('=== network structure ===')\n with tf.variable_scope('detector'):\n self.cls = conv(classifiter, filters=self.class_count,\n kernel_size=1, strides=1, name='detector_conv1')\n self.cls = tf.compat.v1.nn.sigmoid(self.cls, name='heatmap')\n self.size = conv(classifiter, filters=2, kernel_size=1, strides\n =1, name='detector_conv2')\n self.size = tf.compat.v1.nn.relu(self.size, name='sizemap')\n print('heatmap sigmoid=', self.cls)\n self.output = self.cls\n print('=== network structure ===')\n self.heatmap_loss = focal_loss(self.output, self.Y)\n self.size_loss = reg_l1_loss(self.size, self.SIZE)\n self.cost = self.heatmap_loss + 0.1 * self.size_loss\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.\n UPDATE_OPS)\n with tf.compat.v1.control_dependencies(update_ops):\n self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate\n =self.learning_rate_tensor).minimize(self.cost, name=\n 'AdamMinimize')\n print('==============Node Name List==============')\n print('learning rate tensor : ', self.learning_rate_tensor)\n print('Input Node Name : ', self.X)\n print('Output 4 Train Node Name : ', self.Y)\n print('Phase Node Name', self.keep_layer)\n print('Output Node Name (heatmap) : ', self.output)\n print('Output Node Name (sizemap) : ', self.size)\n print('Cost Function Node Name : ', self.cost)\n print('Run this operation for a train step :', self.\n optimizer.name)\n print('==============Node Name List==============')\n\n def predict(self, x_test, keep_prop=False):\n return self.sess.run([self.output, self.size], feed_dict={self.X:\n x_test, self.keep_layer: keep_prop})\n <mask token>\n\n def train(self, x_data, y_data, y_size, keep_prop=True, learn_rate=0.003):\n return self.sess.run(self.optimizer, feed_dict={self.X: x_data,\n self.Y: y_data, self.SIZE: y_size, self.keep_layer: keep_prop,\n self.learning_rate_tensor: learn_rate})\n",
"step-2": "<mask token>\n\n\nclass model_objectdetection_ppm_centernet_v1:\n\n def __init__(self, sess, class_count):\n self.sess = sess\n self.class_count = class_count\n self.up_sample_rate = 1\n self.feature_channels = 32\n with tf.variable_scope('CenterNet'):\n self._build_net()\n\n def _build_net(self):\n self.learning_rate_tensor = tf.compat.v1.placeholder(tf.float32,\n shape=[], name='learning_rate')\n print(self.learning_rate_tensor)\n self.X = tf.compat.v1.placeholder(tf.float32, [None, 512, 512, 3],\n name='X')\n print(self.X)\n self.keep_layer = tf.compat.v1.placeholder(tf.bool, name='phase')\n print(self.keep_layer)\n self.Y = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, self\n .class_count], 'Y')\n self.SIZE = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, 2\n ], 'Y')\n print(self.Y)\n with tf.variable_scope('downsamples'):\n stage_1_1 = conv_block(self.X, conv_type='conv', filters=16,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_2 = conv_block(stage_1_1, conv_type='ds', filters=32,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_3 = conv_block(stage_1_2, conv_type='ds', filters=64,\n kernel_size=3, strides=2, training=self.keep_layer)\n with tf.variable_scope('feature_extraction'):\n feature1 = bottlenect_block_v1(inputs=stage_1_3, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual1')\n feature2 = bottlenect_block_v1(inputs=feature1, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual2')\n feature3 = bottlenect_block_v1(inputs=feature2, filters=32,\n kernel_size=3, upsample_rate=2, strides=1, repeat=2,\n training=self.keep_layer, name='residual3')\n with tf.variable_scope('pyramid_pooling'):\n pyramid = pyramid_pooling_block(feature3, kernel_size=32,\n input_width=32, input_height=32, bin_sizes=[2, 4, 6, 8])\n with tf.variable_scope('featurefuse'):\n feature_fuse_layer1 = conv_block(stage_1_3, conv_type='conv',\n filters=160, kernel_size=1, strides=1, training=self.keep_layer\n )\n print('test', feature_fuse_layer1)\n feature_fuse_layer2 = upsample_layer(pyramid, [128, 128])\n depthwise_filter = tf.compat.v1.get_variable('feature_fuse_layer2',\n [3, 3, 32 * 5, 1], initializer=tf.compat.v1.\n variance_scaling_initializer())\n feature_fuse_layer2 = tf.compat.v1.nn.depthwise_conv2d(input=\n feature_fuse_layer2, filter=depthwise_filter, strides=[1, 1,\n 1, 1], padding='SAME')\n print('feature_deptiwise conv=', feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.batch_normalization(\n feature_fuse_layer2, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n feature_fuse_layer2 = tf.compat.v1.nn.relu(feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.conv2d(inputs=\n feature_fuse_layer2, filters=1, kernel_size=1, strides=1,\n padding='same', kernel_initializer=tf.compat.v1.\n variance_scaling_initializer())\n final_feature = feature_fuse_layer2 + feature_fuse_layer1\n final_feature = tf.compat.v1.layers.batch_normalization(\n final_feature, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n final_feature = tf.compat.v1.nn.relu(final_feature)\n with tf.variable_scope('classifier'):\n classifiter = conv_block(final_feature, conv_type='ds', filters\n =64, kernel_size=3, strides=1, training=self.keep_layer)\n print('=== network structure ===')\n with tf.variable_scope('detector'):\n self.cls = conv(classifiter, filters=self.class_count,\n kernel_size=1, strides=1, name='detector_conv1')\n self.cls = tf.compat.v1.nn.sigmoid(self.cls, name='heatmap')\n self.size = conv(classifiter, filters=2, kernel_size=1, strides\n =1, name='detector_conv2')\n self.size = tf.compat.v1.nn.relu(self.size, name='sizemap')\n print('heatmap sigmoid=', self.cls)\n self.output = self.cls\n print('=== network structure ===')\n self.heatmap_loss = focal_loss(self.output, self.Y)\n self.size_loss = reg_l1_loss(self.size, self.SIZE)\n self.cost = self.heatmap_loss + 0.1 * self.size_loss\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.\n UPDATE_OPS)\n with tf.compat.v1.control_dependencies(update_ops):\n self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate\n =self.learning_rate_tensor).minimize(self.cost, name=\n 'AdamMinimize')\n print('==============Node Name List==============')\n print('learning rate tensor : ', self.learning_rate_tensor)\n print('Input Node Name : ', self.X)\n print('Output 4 Train Node Name : ', self.Y)\n print('Phase Node Name', self.keep_layer)\n print('Output Node Name (heatmap) : ', self.output)\n print('Output Node Name (sizemap) : ', self.size)\n print('Cost Function Node Name : ', self.cost)\n print('Run this operation for a train step :', self.\n optimizer.name)\n print('==============Node Name List==============')\n\n def predict(self, x_test, keep_prop=False):\n return self.sess.run([self.output, self.size], feed_dict={self.X:\n x_test, self.keep_layer: keep_prop})\n <mask token>\n\n def train(self, x_data, y_data, y_size, keep_prop=True, learn_rate=0.003):\n return self.sess.run(self.optimizer, feed_dict={self.X: x_data,\n self.Y: y_data, self.SIZE: y_size, self.keep_layer: keep_prop,\n self.learning_rate_tensor: learn_rate})\n",
"step-3": "<mask token>\n\n\nclass model_objectdetection_ppm_centernet_v1:\n\n def __init__(self, sess, class_count):\n self.sess = sess\n self.class_count = class_count\n self.up_sample_rate = 1\n self.feature_channels = 32\n with tf.variable_scope('CenterNet'):\n self._build_net()\n\n def _build_net(self):\n self.learning_rate_tensor = tf.compat.v1.placeholder(tf.float32,\n shape=[], name='learning_rate')\n print(self.learning_rate_tensor)\n self.X = tf.compat.v1.placeholder(tf.float32, [None, 512, 512, 3],\n name='X')\n print(self.X)\n self.keep_layer = tf.compat.v1.placeholder(tf.bool, name='phase')\n print(self.keep_layer)\n self.Y = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, self\n .class_count], 'Y')\n self.SIZE = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, 2\n ], 'Y')\n print(self.Y)\n with tf.variable_scope('downsamples'):\n stage_1_1 = conv_block(self.X, conv_type='conv', filters=16,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_2 = conv_block(stage_1_1, conv_type='ds', filters=32,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_3 = conv_block(stage_1_2, conv_type='ds', filters=64,\n kernel_size=3, strides=2, training=self.keep_layer)\n with tf.variable_scope('feature_extraction'):\n feature1 = bottlenect_block_v1(inputs=stage_1_3, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual1')\n feature2 = bottlenect_block_v1(inputs=feature1, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual2')\n feature3 = bottlenect_block_v1(inputs=feature2, filters=32,\n kernel_size=3, upsample_rate=2, strides=1, repeat=2,\n training=self.keep_layer, name='residual3')\n with tf.variable_scope('pyramid_pooling'):\n pyramid = pyramid_pooling_block(feature3, kernel_size=32,\n input_width=32, input_height=32, bin_sizes=[2, 4, 6, 8])\n with tf.variable_scope('featurefuse'):\n feature_fuse_layer1 = conv_block(stage_1_3, conv_type='conv',\n filters=160, kernel_size=1, strides=1, training=self.keep_layer\n )\n print('test', feature_fuse_layer1)\n feature_fuse_layer2 = upsample_layer(pyramid, [128, 128])\n depthwise_filter = tf.compat.v1.get_variable('feature_fuse_layer2',\n [3, 3, 32 * 5, 1], initializer=tf.compat.v1.\n variance_scaling_initializer())\n feature_fuse_layer2 = tf.compat.v1.nn.depthwise_conv2d(input=\n feature_fuse_layer2, filter=depthwise_filter, strides=[1, 1,\n 1, 1], padding='SAME')\n print('feature_deptiwise conv=', feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.batch_normalization(\n feature_fuse_layer2, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n feature_fuse_layer2 = tf.compat.v1.nn.relu(feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.conv2d(inputs=\n feature_fuse_layer2, filters=1, kernel_size=1, strides=1,\n padding='same', kernel_initializer=tf.compat.v1.\n variance_scaling_initializer())\n final_feature = feature_fuse_layer2 + feature_fuse_layer1\n final_feature = tf.compat.v1.layers.batch_normalization(\n final_feature, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n final_feature = tf.compat.v1.nn.relu(final_feature)\n with tf.variable_scope('classifier'):\n classifiter = conv_block(final_feature, conv_type='ds', filters\n =64, kernel_size=3, strides=1, training=self.keep_layer)\n print('=== network structure ===')\n with tf.variable_scope('detector'):\n self.cls = conv(classifiter, filters=self.class_count,\n kernel_size=1, strides=1, name='detector_conv1')\n self.cls = tf.compat.v1.nn.sigmoid(self.cls, name='heatmap')\n self.size = conv(classifiter, filters=2, kernel_size=1, strides\n =1, name='detector_conv2')\n self.size = tf.compat.v1.nn.relu(self.size, name='sizemap')\n print('heatmap sigmoid=', self.cls)\n self.output = self.cls\n print('=== network structure ===')\n self.heatmap_loss = focal_loss(self.output, self.Y)\n self.size_loss = reg_l1_loss(self.size, self.SIZE)\n self.cost = self.heatmap_loss + 0.1 * self.size_loss\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.\n UPDATE_OPS)\n with tf.compat.v1.control_dependencies(update_ops):\n self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate\n =self.learning_rate_tensor).minimize(self.cost, name=\n 'AdamMinimize')\n print('==============Node Name List==============')\n print('learning rate tensor : ', self.learning_rate_tensor)\n print('Input Node Name : ', self.X)\n print('Output 4 Train Node Name : ', self.Y)\n print('Phase Node Name', self.keep_layer)\n print('Output Node Name (heatmap) : ', self.output)\n print('Output Node Name (sizemap) : ', self.size)\n print('Cost Function Node Name : ', self.cost)\n print('Run this operation for a train step :', self.\n optimizer.name)\n print('==============Node Name List==============')\n\n def predict(self, x_test, keep_prop=False):\n return self.sess.run([self.output, self.size], feed_dict={self.X:\n x_test, self.keep_layer: keep_prop})\n\n def get_cost(self, x_test, y_test, y_size, keep_prop=False):\n return self.sess.run(self.cost, feed_dict={self.X: x_test, self.Y:\n y_test, self.SIZE: y_size, self.keep_layer: keep_prop})\n\n def train(self, x_data, y_data, y_size, keep_prop=True, learn_rate=0.003):\n return self.sess.run(self.optimizer, feed_dict={self.X: x_data,\n self.Y: y_data, self.SIZE: y_size, self.keep_layer: keep_prop,\n self.learning_rate_tensor: learn_rate})\n",
"step-4": "import tensorflow as tf\nfrom util.helper import focal_loss\nfrom util.helper import conv_elu_bn\nfrom util.helper import deconv_elu_bn\nfrom util.helper import residual_block_elu\nfrom util.helper import conv_elu\nfrom util.helper import conv\nfrom util.helper import reg_l1_loss\nfrom util.helper import conv_bn\nfrom util.helper import deconv\nfrom util.helper import max_pool2d\nfrom util.helper import upsample_layer\nfrom util.helper import hourglass_module\nfrom util.helper import conv_block\nfrom util.helper import bottlenect_block_v1\nfrom util.helper import pyramid_pooling_block\n\n\nclass model_objectdetection_ppm_centernet_v1:\n\n def __init__(self, sess, class_count):\n self.sess = sess\n self.class_count = class_count\n self.up_sample_rate = 1\n self.feature_channels = 32\n with tf.variable_scope('CenterNet'):\n self._build_net()\n\n def _build_net(self):\n self.learning_rate_tensor = tf.compat.v1.placeholder(tf.float32,\n shape=[], name='learning_rate')\n print(self.learning_rate_tensor)\n self.X = tf.compat.v1.placeholder(tf.float32, [None, 512, 512, 3],\n name='X')\n print(self.X)\n self.keep_layer = tf.compat.v1.placeholder(tf.bool, name='phase')\n print(self.keep_layer)\n self.Y = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, self\n .class_count], 'Y')\n self.SIZE = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, 2\n ], 'Y')\n print(self.Y)\n with tf.variable_scope('downsamples'):\n stage_1_1 = conv_block(self.X, conv_type='conv', filters=16,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_2 = conv_block(stage_1_1, conv_type='ds', filters=32,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_3 = conv_block(stage_1_2, conv_type='ds', filters=64,\n kernel_size=3, strides=2, training=self.keep_layer)\n with tf.variable_scope('feature_extraction'):\n feature1 = bottlenect_block_v1(inputs=stage_1_3, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual1')\n feature2 = bottlenect_block_v1(inputs=feature1, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual2')\n feature3 = bottlenect_block_v1(inputs=feature2, filters=32,\n kernel_size=3, upsample_rate=2, strides=1, repeat=2,\n training=self.keep_layer, name='residual3')\n with tf.variable_scope('pyramid_pooling'):\n pyramid = pyramid_pooling_block(feature3, kernel_size=32,\n input_width=32, input_height=32, bin_sizes=[2, 4, 6, 8])\n with tf.variable_scope('featurefuse'):\n feature_fuse_layer1 = conv_block(stage_1_3, conv_type='conv',\n filters=160, kernel_size=1, strides=1, training=self.keep_layer\n )\n print('test', feature_fuse_layer1)\n feature_fuse_layer2 = upsample_layer(pyramid, [128, 128])\n depthwise_filter = tf.compat.v1.get_variable('feature_fuse_layer2',\n [3, 3, 32 * 5, 1], initializer=tf.compat.v1.\n variance_scaling_initializer())\n feature_fuse_layer2 = tf.compat.v1.nn.depthwise_conv2d(input=\n feature_fuse_layer2, filter=depthwise_filter, strides=[1, 1,\n 1, 1], padding='SAME')\n print('feature_deptiwise conv=', feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.batch_normalization(\n feature_fuse_layer2, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n feature_fuse_layer2 = tf.compat.v1.nn.relu(feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.conv2d(inputs=\n feature_fuse_layer2, filters=1, kernel_size=1, strides=1,\n padding='same', kernel_initializer=tf.compat.v1.\n variance_scaling_initializer())\n final_feature = feature_fuse_layer2 + feature_fuse_layer1\n final_feature = tf.compat.v1.layers.batch_normalization(\n final_feature, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n final_feature = tf.compat.v1.nn.relu(final_feature)\n with tf.variable_scope('classifier'):\n classifiter = conv_block(final_feature, conv_type='ds', filters\n =64, kernel_size=3, strides=1, training=self.keep_layer)\n print('=== network structure ===')\n with tf.variable_scope('detector'):\n self.cls = conv(classifiter, filters=self.class_count,\n kernel_size=1, strides=1, name='detector_conv1')\n self.cls = tf.compat.v1.nn.sigmoid(self.cls, name='heatmap')\n self.size = conv(classifiter, filters=2, kernel_size=1, strides\n =1, name='detector_conv2')\n self.size = tf.compat.v1.nn.relu(self.size, name='sizemap')\n print('heatmap sigmoid=', self.cls)\n self.output = self.cls\n print('=== network structure ===')\n self.heatmap_loss = focal_loss(self.output, self.Y)\n self.size_loss = reg_l1_loss(self.size, self.SIZE)\n self.cost = self.heatmap_loss + 0.1 * self.size_loss\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.\n UPDATE_OPS)\n with tf.compat.v1.control_dependencies(update_ops):\n self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate\n =self.learning_rate_tensor).minimize(self.cost, name=\n 'AdamMinimize')\n print('==============Node Name List==============')\n print('learning rate tensor : ', self.learning_rate_tensor)\n print('Input Node Name : ', self.X)\n print('Output 4 Train Node Name : ', self.Y)\n print('Phase Node Name', self.keep_layer)\n print('Output Node Name (heatmap) : ', self.output)\n print('Output Node Name (sizemap) : ', self.size)\n print('Cost Function Node Name : ', self.cost)\n print('Run this operation for a train step :', self.\n optimizer.name)\n print('==============Node Name List==============')\n\n def predict(self, x_test, keep_prop=False):\n return self.sess.run([self.output, self.size], feed_dict={self.X:\n x_test, self.keep_layer: keep_prop})\n\n def get_cost(self, x_test, y_test, y_size, keep_prop=False):\n return self.sess.run(self.cost, feed_dict={self.X: x_test, self.Y:\n y_test, self.SIZE: y_size, self.keep_layer: keep_prop})\n\n def train(self, x_data, y_data, y_size, keep_prop=True, learn_rate=0.003):\n return self.sess.run(self.optimizer, feed_dict={self.X: x_data,\n self.Y: y_data, self.SIZE: y_size, self.keep_layer: keep_prop,\n self.learning_rate_tensor: learn_rate})\n",
"step-5": "import tensorflow as tf\n\nfrom util.helper import focal_loss\nfrom util.helper import conv_elu_bn\nfrom util.helper import deconv_elu_bn\nfrom util.helper import residual_block_elu\nfrom util.helper import conv_elu\nfrom util.helper import conv\nfrom util.helper import reg_l1_loss\nfrom util.helper import conv_bn\nfrom util.helper import deconv\nfrom util.helper import max_pool2d\nfrom util.helper import upsample_layer\nfrom util.helper import hourglass_module\n\n\nfrom util.helper import conv_block\nfrom util.helper import bottlenect_block_v1\nfrom util.helper import pyramid_pooling_block\n\n# 0 cat , 1 dog,\n\nclass model_objectdetection_ppm_centernet_v1:\n\n def __init__(self, sess, class_count):\n self.sess = sess\n self.class_count = class_count\n self.up_sample_rate = 1\n self.feature_channels = 32\n #self.hourglass_channel = 32\n\n with tf.variable_scope('CenterNet'):\n self._build_net()\n\n def _build_net(self):\n self.learning_rate_tensor = tf.compat.v1.placeholder(tf.float32, shape=[], name='learning_rate')\n print(self.learning_rate_tensor)\n\n self.X = tf.compat.v1.placeholder(tf.float32, [None, 512, 512, 3], name='X')\n print(self.X)\n\n self.keep_layer = tf.compat.v1.placeholder(tf.bool, name='phase')\n print(self.keep_layer)\n\n self.Y = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, self.class_count], 'Y')\n self.SIZE = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, 2], 'Y')\n print(self.Y)\n\n ## Batch , Height , Width, Class\n #X_input = tf.compat.v1.reshape(self.X, [-1, 512, 512, 3])\n #Y_input = tf.compat.v1.reshape(self.Y, [-1, 128, 128, self.class_count])\n\n\n # 512 512 -> 256x 256\n with tf.variable_scope('downsamples'):\n stage_1_1 = conv_block(self.X, conv_type='conv', filters=16, kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_2 = conv_block(stage_1_1, conv_type='ds', filters=32, kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_3 = conv_block(stage_1_2, conv_type='ds', filters=64, kernel_size=3, strides=2, training=self.keep_layer)\n\n\n\n with tf.variable_scope('feature_extraction'):\n feature1 = bottlenect_block_v1(inputs=stage_1_3, filters=64, kernel_size=3, upsample_rate=2, strides=2, repeat=2, training=self.keep_layer, name='residual1')\n feature2 = bottlenect_block_v1(inputs=feature1, filters=64, kernel_size=3, upsample_rate=2, strides=2, repeat=2, training=self.keep_layer, name='residual2')\n feature3 = bottlenect_block_v1(inputs=feature2, filters=32, kernel_size=3, upsample_rate=2, strides=1, repeat=2, training=self.keep_layer, name='residual3')\n\n\n with tf.variable_scope('pyramid_pooling'):\n pyramid = pyramid_pooling_block(feature3, kernel_size=32, input_width=32, input_height=32, bin_sizes=[2, 4, 6, 8])\n\n\n with tf.variable_scope('featurefuse'):\n feature_fuse_layer1 = conv_block(stage_1_3, conv_type='conv', filters=160, kernel_size=1, strides=1, training=self.keep_layer)\n print('test',feature_fuse_layer1)\n\n feature_fuse_layer2 = upsample_layer(pyramid, [128, 128])\n depthwise_filter = tf.compat.v1.get_variable('feature_fuse_layer2', [3, 3, 32 * 5, 1], initializer=tf.compat.v1.variance_scaling_initializer())\n feature_fuse_layer2 = tf.compat.v1.nn.depthwise_conv2d(input=feature_fuse_layer2, filter=depthwise_filter, strides=[1, 1, 1, 1], padding='SAME')\n print('feature_deptiwise conv=', feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.batch_normalization(feature_fuse_layer2, scale=True, center=True, momentum=0.9, training=self.keep_layer)\n feature_fuse_layer2 = tf.compat.v1.nn.relu(feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.conv2d(inputs=feature_fuse_layer2, filters=1, kernel_size=1, strides=1, padding='same', kernel_initializer=tf.compat.v1.variance_scaling_initializer())\n\n final_feature = feature_fuse_layer2 + feature_fuse_layer1\n final_feature = tf.compat.v1.layers.batch_normalization(final_feature, scale=True, center=True, momentum=0.9, training=self.keep_layer)\n final_feature = tf.compat.v1.nn.relu(final_feature)\n\n\n with tf.variable_scope('classifier'):\n classifiter = conv_block(final_feature, conv_type='ds', filters=64, kernel_size=3, strides=1, training=self.keep_layer)\n #classifiter = conv_block(classifiter, conv_type='ds', filters=64, kernel_size=3, strides=1, training=self.keep_layer)\n\n\n print(\"=== network structure ===\")\n\n with tf.variable_scope(\"detector\"):\n #self.cls = conv_elu_bn(feature_fuse_layer2, filters=self.feature_channels, training=self.keep_layer, kernel_size=3, strides=1, name='detector_convelu1')\n self.cls = conv(classifiter, filters=self.class_count, kernel_size=1, strides=1, name='detector_conv1')\n self.cls = tf.compat.v1.nn.sigmoid(self.cls, name=\"heatmap\")\n\n #self.size = conv_elu_bn(feature_fuse_layer2, filters=self.feature_channels, training=self.keep_layer, kernel_size=3, strides=1, name='detector_convelu2')\n self.size = conv(classifiter, filters=2, kernel_size=1, strides=1, name='detector_conv2')\n self.size = tf.compat.v1.nn.relu(self.size, name='sizemap')\n\n\n print(\"heatmap sigmoid=\", self.cls)\n\n self.output = self.cls;\n print(\"=== network structure ===\")\n\n\n self.heatmap_loss = focal_loss(self.output, self.Y)\n self.size_loss = reg_l1_loss(self.size, self.SIZE)\n self.cost = self.heatmap_loss + 0.1 * self.size_loss\n # define cost/loss & optimizer\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)\n with tf.compat.v1.control_dependencies(update_ops):\n self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.learning_rate_tensor).minimize(self.cost, name='AdamMinimize')\n\n print(\"==============Node Name List==============\")\n print(\"learning rate tensor : \", self.learning_rate_tensor)\n print(\"Input Node Name : \", self.X)\n print(\"Output 4 Train Node Name : \", self.Y)\n print(\"Phase Node Name\", self.keep_layer)\n print(\"Output Node Name (heatmap) : \", self.output)\n print(\"Output Node Name (sizemap) : \", self.size)\n print(\"Cost Function Node Name : \", self.cost)\n print(\"Run this operation for a train step :\", self.optimizer.name)\n print(\"==============Node Name List==============\")\n\n def predict(self, x_test, keep_prop=False):\n return self.sess.run([self.output, self.size], feed_dict={self.X: x_test, self.keep_layer: keep_prop})\n\n def get_cost(self, x_test, y_test, y_size, keep_prop=False):\n # print(self.sess.run(self.output, feed_dict={self.X: x_test, self.keep_layer: keep_prop}))\n return self.sess.run(self.cost, feed_dict={self.X: x_test, self.Y: y_test, self.SIZE:y_size, self.keep_layer: keep_prop})\n\n def train(self, x_data, y_data, y_size, keep_prop=True, learn_rate=0.003):\n return self.sess.run(self.optimizer, feed_dict={self.X: x_data, self.Y: y_data, self.SIZE:y_size, self.keep_layer: keep_prop, self.learning_rate_tensor: learn_rate})",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class TestMaxInteger(unittest.TestCase):
<|reserved_special_token_0|>
def test_max(self):
"""Tests max_integer"""
self.assertEqual(max_integer([1, 2, 3]), 3)
self.assertEqual(max_integer([6, 2, 6]), 6)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([1, 5, 3]), 5)
self.assertEqual(max_integer([1, 2, -3]), 2)
self.assertEqual(max_integer([-1, -2, -3]), -1)
self.assertEqual(max_integer([2]), 2)
self.assertEqual(max_integer([]), None)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestMaxInteger(unittest.TestCase):
""" Interactive tests """
def test_max(self):
"""Tests max_integer"""
self.assertEqual(max_integer([1, 2, 3]), 3)
self.assertEqual(max_integer([6, 2, 6]), 6)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([1, 5, 3]), 5)
self.assertEqual(max_integer([1, 2, -3]), 2)
self.assertEqual(max_integer([-1, -2, -3]), -1)
self.assertEqual(max_integer([2]), 2)
self.assertEqual(max_integer([]), None)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
""" Interactive tests """
def test_max(self):
"""Tests max_integer"""
self.assertEqual(max_integer([1, 2, 3]), 3)
self.assertEqual(max_integer([6, 2, 6]), 6)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([1, 5, 3]), 5)
self.assertEqual(max_integer([1, 2, -3]), 2)
self.assertEqual(max_integer([-1, -2, -3]), -1)
self.assertEqual(max_integer([2]), 2)
self.assertEqual(max_integer([]), None)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
""" Interactive tests """
def test_max(self):
"""Tests max_integer"""
self.assertEqual(max_integer([1, 2, 3]), 3)
self.assertEqual(max_integer([6, 2, 6]), 6)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([1, 5, 3]), 5)
self.assertEqual(max_integer([1, 2, -3]), 2)
self.assertEqual(max_integer([-1, -2, -3]), -1)
self.assertEqual(max_integer([2]), 2)
self.assertEqual(max_integer([]), None)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
#!/usr/bin/python3
"""Unittest for max_integer([..])
"""
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
""" Interactive tests """
def test_max(self):
"""Tests max_integer"""
self.assertEqual(max_integer([1, 2, 3]), 3)
self.assertEqual(max_integer([6, 2, 6]), 6)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([1, 5, 3]), 5)
self.assertEqual(max_integer([1, 2, -3]), 2)
self.assertEqual(max_integer([-1, -2, -3]), -1)
self.assertEqual(max_integer([2]), 2)
self.assertEqual(max_integer([]), None)
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "f799fdfde537bbe8f6c49a5e1a15cf6f910a0d45",
"index": 889,
"step-1": "<mask token>\n\n\nclass TestMaxInteger(unittest.TestCase):\n <mask token>\n\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMaxInteger(unittest.TestCase):\n \"\"\" Interactive tests \"\"\"\n\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\n\n<mask token>\n",
"step-3": "<mask token>\nmax_integer = __import__('6-max_integer').max_integer\n\n\nclass TestMaxInteger(unittest.TestCase):\n \"\"\" Interactive tests \"\"\"\n\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport unittest\nmax_integer = __import__('6-max_integer').max_integer\n\n\nclass TestMaxInteger(unittest.TestCase):\n \"\"\" Interactive tests \"\"\"\n\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/python3\n\"\"\"Unittest for max_integer([..])\n\"\"\"\nimport unittest\nmax_integer = __import__('6-max_integer').max_integer\n\n\nclass TestMaxInteger(unittest.TestCase):\n \"\"\" Interactive tests \"\"\"\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
<|reserved_special_token_0|>
class Vts:
def __init__(self):
self.last_comment_id = 0
self.vk = None
def update_vk(self):
if self.vk is not None:
return
vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)
try:
vk_session.authorization()
except vk_api.AuthorizationError as error_msg:
logging.error(error_msg)
return
except vk_api.Captcha as captcha:
logging.error(captcha)
return
self.vk = vk_session.get_api()
def update_last_comment_id(self):
self.update_vk()
if self.vk is None:
return
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=
TOPIC_ID, sort='desc', count=1)
if response['count'] == 0:
time.sleep(5)
return
self.last_comment_id = response['items'][0]['id']
print('Set initial id to ' + str(self.last_comment_id))
def get_comments(self):
self.update_vk()
if self.vk is None:
return [], []
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=
TOPIC_ID, start_comment_id=self.last_comment_id, extended=1)
return response['items'], response['profiles']
def get_topic(self):
self.update_vk()
response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[
TOPIC_ID])
if response['count'] == 0:
return None
return response['items'][0]
def run(self):
while True:
if self.last_comment_id == 0:
self.update_last_comment_id()
topic = self.get_topic()
if topic is None:
logging.warning('Topic not found')
time.sleep(60)
continue
comments, profiles = self.get_comments()
if len(comments) == 0:
time.sleep(5)
continue
users = dict()
for profile in profiles:
users[profile['id']] = profile
for comment in comments:
id = comment['id']
if id > self.last_comment_id:
self.last_comment_id = id
text = comment['text']
title = topic['title']
user_id = abs(comment['from_id'])
try:
user = users[user_id]
username = ' '.join([user['first_name'], user[
'last_name']])
except KeyError:
username = ''
date = comment['date']
message_date = '<!date^' + str(date
) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'
text = '\n'.join(map(lambda s: '>' + s, text.split('\n')))
message = ('>*' + title + '*\n>_' + username + '_ (' +
message_date + ')\n' + text)
slack.chat.post_message(channel='#random', text=message,
username='vts', icon_url=ICON_URL)
logging.info('Posted comment_id=%s\n%s', id, message)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Vts:
def __init__(self):
self.last_comment_id = 0
self.vk = None
def update_vk(self):
if self.vk is not None:
return
vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)
try:
vk_session.authorization()
except vk_api.AuthorizationError as error_msg:
logging.error(error_msg)
return
except vk_api.Captcha as captcha:
logging.error(captcha)
return
self.vk = vk_session.get_api()
def update_last_comment_id(self):
self.update_vk()
if self.vk is None:
return
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=
TOPIC_ID, sort='desc', count=1)
if response['count'] == 0:
time.sleep(5)
return
self.last_comment_id = response['items'][0]['id']
print('Set initial id to ' + str(self.last_comment_id))
def get_comments(self):
self.update_vk()
if self.vk is None:
return [], []
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=
TOPIC_ID, start_comment_id=self.last_comment_id, extended=1)
return response['items'], response['profiles']
def get_topic(self):
self.update_vk()
response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[
TOPIC_ID])
if response['count'] == 0:
return None
return response['items'][0]
def run(self):
while True:
if self.last_comment_id == 0:
self.update_last_comment_id()
topic = self.get_topic()
if topic is None:
logging.warning('Topic not found')
time.sleep(60)
continue
comments, profiles = self.get_comments()
if len(comments) == 0:
time.sleep(5)
continue
users = dict()
for profile in profiles:
users[profile['id']] = profile
for comment in comments:
id = comment['id']
if id > self.last_comment_id:
self.last_comment_id = id
text = comment['text']
title = topic['title']
user_id = abs(comment['from_id'])
try:
user = users[user_id]
username = ' '.join([user['first_name'], user[
'last_name']])
except KeyError:
username = ''
date = comment['date']
message_date = '<!date^' + str(date
) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'
text = '\n'.join(map(lambda s: '>' + s, text.split('\n')))
message = ('>*' + title + '*\n>_' + username + '_ (' +
message_date + ')\n' + text)
slack.chat.post_message(channel='#random', text=message,
username='vts', icon_url=ICON_URL)
logging.info('Posted comment_id=%s\n%s', id, message)
if __name__ == '__main__':
vts = Vts()
try:
while True:
try:
vts.run()
except vk_api.requests.exceptions.ConnectionError:
time.sleep(10)
except KeyboardInterrupt:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
slack = Slacker(SLACK_TOKEN)
class Vts:
def __init__(self):
self.last_comment_id = 0
self.vk = None
def update_vk(self):
if self.vk is not None:
return
vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)
try:
vk_session.authorization()
except vk_api.AuthorizationError as error_msg:
logging.error(error_msg)
return
except vk_api.Captcha as captcha:
logging.error(captcha)
return
self.vk = vk_session.get_api()
def update_last_comment_id(self):
self.update_vk()
if self.vk is None:
return
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=
TOPIC_ID, sort='desc', count=1)
if response['count'] == 0:
time.sleep(5)
return
self.last_comment_id = response['items'][0]['id']
print('Set initial id to ' + str(self.last_comment_id))
def get_comments(self):
self.update_vk()
if self.vk is None:
return [], []
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=
TOPIC_ID, start_comment_id=self.last_comment_id, extended=1)
return response['items'], response['profiles']
def get_topic(self):
self.update_vk()
response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[
TOPIC_ID])
if response['count'] == 0:
return None
return response['items'][0]
def run(self):
while True:
if self.last_comment_id == 0:
self.update_last_comment_id()
topic = self.get_topic()
if topic is None:
logging.warning('Topic not found')
time.sleep(60)
continue
comments, profiles = self.get_comments()
if len(comments) == 0:
time.sleep(5)
continue
users = dict()
for profile in profiles:
users[profile['id']] = profile
for comment in comments:
id = comment['id']
if id > self.last_comment_id:
self.last_comment_id = id
text = comment['text']
title = topic['title']
user_id = abs(comment['from_id'])
try:
user = users[user_id]
username = ' '.join([user['first_name'], user[
'last_name']])
except KeyError:
username = ''
date = comment['date']
message_date = '<!date^' + str(date
) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'
text = '\n'.join(map(lambda s: '>' + s, text.split('\n')))
message = ('>*' + title + '*\n>_' + username + '_ (' +
message_date + ')\n' + text)
slack.chat.post_message(channel='#random', text=message,
username='vts', icon_url=ICON_URL)
logging.info('Posted comment_id=%s\n%s', id, message)
if __name__ == '__main__':
vts = Vts()
try:
while True:
try:
vts.run()
except vk_api.requests.exceptions.ConnectionError:
time.sleep(10)
except KeyboardInterrupt:
pass
<|reserved_special_token_1|>
from slacker import Slacker
import vk_api
import time
import logging
from settings import SLACK_TOKEN, VK_LOGIN, VK_PASSWORD, GROUP_ID, TOPIC_ID, ICON_URL
slack = Slacker(SLACK_TOKEN)
class Vts:
def __init__(self):
self.last_comment_id = 0
self.vk = None
def update_vk(self):
if self.vk is not None:
return
vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)
try:
vk_session.authorization()
except vk_api.AuthorizationError as error_msg:
logging.error(error_msg)
return
except vk_api.Captcha as captcha:
logging.error(captcha)
return
self.vk = vk_session.get_api()
def update_last_comment_id(self):
self.update_vk()
if self.vk is None:
return
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=
TOPIC_ID, sort='desc', count=1)
if response['count'] == 0:
time.sleep(5)
return
self.last_comment_id = response['items'][0]['id']
print('Set initial id to ' + str(self.last_comment_id))
def get_comments(self):
self.update_vk()
if self.vk is None:
return [], []
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=
TOPIC_ID, start_comment_id=self.last_comment_id, extended=1)
return response['items'], response['profiles']
def get_topic(self):
self.update_vk()
response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[
TOPIC_ID])
if response['count'] == 0:
return None
return response['items'][0]
def run(self):
while True:
if self.last_comment_id == 0:
self.update_last_comment_id()
topic = self.get_topic()
if topic is None:
logging.warning('Topic not found')
time.sleep(60)
continue
comments, profiles = self.get_comments()
if len(comments) == 0:
time.sleep(5)
continue
users = dict()
for profile in profiles:
users[profile['id']] = profile
for comment in comments:
id = comment['id']
if id > self.last_comment_id:
self.last_comment_id = id
text = comment['text']
title = topic['title']
user_id = abs(comment['from_id'])
try:
user = users[user_id]
username = ' '.join([user['first_name'], user[
'last_name']])
except KeyError:
username = ''
date = comment['date']
message_date = '<!date^' + str(date
) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'
text = '\n'.join(map(lambda s: '>' + s, text.split('\n')))
message = ('>*' + title + '*\n>_' + username + '_ (' +
message_date + ')\n' + text)
slack.chat.post_message(channel='#random', text=message,
username='vts', icon_url=ICON_URL)
logging.info('Posted comment_id=%s\n%s', id, message)
if __name__ == '__main__':
vts = Vts()
try:
while True:
try:
vts.run()
except vk_api.requests.exceptions.ConnectionError:
time.sleep(10)
except KeyboardInterrupt:
pass
<|reserved_special_token_1|>
from slacker import Slacker
import vk_api
import time
import logging
from settings import SLACK_TOKEN, VK_LOGIN, VK_PASSWORD, GROUP_ID, TOPIC_ID, ICON_URL
slack = Slacker(SLACK_TOKEN)
class Vts:
def __init__(self):
self.last_comment_id = 0
self.vk = None
def update_vk(self):
if self.vk is not None:
return
vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)
try:
vk_session.authorization()
except vk_api.AuthorizationError as error_msg:
logging.error(error_msg)
return
except vk_api.Captcha as captcha:
logging.error(captcha)
return
self.vk = vk_session.get_api()
def update_last_comment_id(self):
self.update_vk()
if self.vk is None:
return
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=TOPIC_ID, sort='desc', count=1)
if response['count'] == 0:
time.sleep(5)
return
self.last_comment_id = response['items'][0]['id']
print('Set initial id to ' + str(self.last_comment_id))
def get_comments(self):
self.update_vk()
if self.vk is None:
return [], []
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=TOPIC_ID,
start_comment_id=self.last_comment_id, extended=1)
return response['items'], response['profiles']
def get_topic(self):
self.update_vk()
response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[TOPIC_ID])
if response['count'] == 0:
return None
return response['items'][0]
def run(self):
while True:
if self.last_comment_id == 0:
self.update_last_comment_id()
topic = self.get_topic()
if topic is None:
logging.warning('Topic not found')
time.sleep(60)
continue
comments, profiles = self.get_comments()
if len(comments) == 0:
time.sleep(5)
continue
users = dict()
for profile in profiles:
users[profile['id']] = profile
for comment in comments:
id = comment['id']
if id > self.last_comment_id:
self.last_comment_id = id
text = comment['text']
title = topic['title']
user_id = abs(comment['from_id'])
try:
user = users[user_id]
username = ' '.join([user['first_name'], user['last_name']])
except KeyError:
username = ''
date = comment['date']
message_date = '<!date^' + str(date) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'
text = "\n".join(map(lambda s: ">" + s, text.split("\n")))
message = '>*' + title + '*\n>_' + username + '_ (' + message_date + ')\n' + text
slack.chat.post_message(channel='#random', text=message, username='vts', icon_url=ICON_URL)
logging.info('Posted comment_id=%s\n%s', id, message)
if __name__ == '__main__':
vts = Vts()
try:
while True:
try:
vts.run()
except vk_api.requests.exceptions.ConnectionError:
time.sleep(10)
except KeyboardInterrupt:
pass
|
flexible
|
{
"blob_id": "885e02cbf78412d77bd17eba64a8a1a52aaed0df",
"index": 5837,
"step-1": "<mask token>\n\n\nclass Vts:\n\n def __init__(self):\n self.last_comment_id = 0\n self.vk = None\n\n def update_vk(self):\n if self.vk is not None:\n return\n vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)\n try:\n vk_session.authorization()\n except vk_api.AuthorizationError as error_msg:\n logging.error(error_msg)\n return\n except vk_api.Captcha as captcha:\n logging.error(captcha)\n return\n self.vk = vk_session.get_api()\n\n def update_last_comment_id(self):\n self.update_vk()\n if self.vk is None:\n return\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, sort='desc', count=1)\n if response['count'] == 0:\n time.sleep(5)\n return\n self.last_comment_id = response['items'][0]['id']\n print('Set initial id to ' + str(self.last_comment_id))\n\n def get_comments(self):\n self.update_vk()\n if self.vk is None:\n return [], []\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, start_comment_id=self.last_comment_id, extended=1)\n return response['items'], response['profiles']\n\n def get_topic(self):\n self.update_vk()\n response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[\n TOPIC_ID])\n if response['count'] == 0:\n return None\n return response['items'][0]\n\n def run(self):\n while True:\n if self.last_comment_id == 0:\n self.update_last_comment_id()\n topic = self.get_topic()\n if topic is None:\n logging.warning('Topic not found')\n time.sleep(60)\n continue\n comments, profiles = self.get_comments()\n if len(comments) == 0:\n time.sleep(5)\n continue\n users = dict()\n for profile in profiles:\n users[profile['id']] = profile\n for comment in comments:\n id = comment['id']\n if id > self.last_comment_id:\n self.last_comment_id = id\n text = comment['text']\n title = topic['title']\n user_id = abs(comment['from_id'])\n try:\n user = users[user_id]\n username = ' '.join([user['first_name'], user[\n 'last_name']])\n except KeyError:\n username = ''\n date = comment['date']\n message_date = '<!date^' + str(date\n ) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'\n text = '\\n'.join(map(lambda s: '>' + s, text.split('\\n')))\n message = ('>*' + title + '*\\n>_' + username + '_ (' +\n message_date + ')\\n' + text)\n slack.chat.post_message(channel='#random', text=message,\n username='vts', icon_url=ICON_URL)\n logging.info('Posted comment_id=%s\\n%s', id, message)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Vts:\n\n def __init__(self):\n self.last_comment_id = 0\n self.vk = None\n\n def update_vk(self):\n if self.vk is not None:\n return\n vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)\n try:\n vk_session.authorization()\n except vk_api.AuthorizationError as error_msg:\n logging.error(error_msg)\n return\n except vk_api.Captcha as captcha:\n logging.error(captcha)\n return\n self.vk = vk_session.get_api()\n\n def update_last_comment_id(self):\n self.update_vk()\n if self.vk is None:\n return\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, sort='desc', count=1)\n if response['count'] == 0:\n time.sleep(5)\n return\n self.last_comment_id = response['items'][0]['id']\n print('Set initial id to ' + str(self.last_comment_id))\n\n def get_comments(self):\n self.update_vk()\n if self.vk is None:\n return [], []\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, start_comment_id=self.last_comment_id, extended=1)\n return response['items'], response['profiles']\n\n def get_topic(self):\n self.update_vk()\n response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[\n TOPIC_ID])\n if response['count'] == 0:\n return None\n return response['items'][0]\n\n def run(self):\n while True:\n if self.last_comment_id == 0:\n self.update_last_comment_id()\n topic = self.get_topic()\n if topic is None:\n logging.warning('Topic not found')\n time.sleep(60)\n continue\n comments, profiles = self.get_comments()\n if len(comments) == 0:\n time.sleep(5)\n continue\n users = dict()\n for profile in profiles:\n users[profile['id']] = profile\n for comment in comments:\n id = comment['id']\n if id > self.last_comment_id:\n self.last_comment_id = id\n text = comment['text']\n title = topic['title']\n user_id = abs(comment['from_id'])\n try:\n user = users[user_id]\n username = ' '.join([user['first_name'], user[\n 'last_name']])\n except KeyError:\n username = ''\n date = comment['date']\n message_date = '<!date^' + str(date\n ) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'\n text = '\\n'.join(map(lambda s: '>' + s, text.split('\\n')))\n message = ('>*' + title + '*\\n>_' + username + '_ (' +\n message_date + ')\\n' + text)\n slack.chat.post_message(channel='#random', text=message,\n username='vts', icon_url=ICON_URL)\n logging.info('Posted comment_id=%s\\n%s', id, message)\n\n\nif __name__ == '__main__':\n vts = Vts()\n try:\n while True:\n try:\n vts.run()\n except vk_api.requests.exceptions.ConnectionError:\n time.sleep(10)\n except KeyboardInterrupt:\n pass\n",
"step-3": "<mask token>\nslack = Slacker(SLACK_TOKEN)\n\n\nclass Vts:\n\n def __init__(self):\n self.last_comment_id = 0\n self.vk = None\n\n def update_vk(self):\n if self.vk is not None:\n return\n vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)\n try:\n vk_session.authorization()\n except vk_api.AuthorizationError as error_msg:\n logging.error(error_msg)\n return\n except vk_api.Captcha as captcha:\n logging.error(captcha)\n return\n self.vk = vk_session.get_api()\n\n def update_last_comment_id(self):\n self.update_vk()\n if self.vk is None:\n return\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, sort='desc', count=1)\n if response['count'] == 0:\n time.sleep(5)\n return\n self.last_comment_id = response['items'][0]['id']\n print('Set initial id to ' + str(self.last_comment_id))\n\n def get_comments(self):\n self.update_vk()\n if self.vk is None:\n return [], []\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, start_comment_id=self.last_comment_id, extended=1)\n return response['items'], response['profiles']\n\n def get_topic(self):\n self.update_vk()\n response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[\n TOPIC_ID])\n if response['count'] == 0:\n return None\n return response['items'][0]\n\n def run(self):\n while True:\n if self.last_comment_id == 0:\n self.update_last_comment_id()\n topic = self.get_topic()\n if topic is None:\n logging.warning('Topic not found')\n time.sleep(60)\n continue\n comments, profiles = self.get_comments()\n if len(comments) == 0:\n time.sleep(5)\n continue\n users = dict()\n for profile in profiles:\n users[profile['id']] = profile\n for comment in comments:\n id = comment['id']\n if id > self.last_comment_id:\n self.last_comment_id = id\n text = comment['text']\n title = topic['title']\n user_id = abs(comment['from_id'])\n try:\n user = users[user_id]\n username = ' '.join([user['first_name'], user[\n 'last_name']])\n except KeyError:\n username = ''\n date = comment['date']\n message_date = '<!date^' + str(date\n ) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'\n text = '\\n'.join(map(lambda s: '>' + s, text.split('\\n')))\n message = ('>*' + title + '*\\n>_' + username + '_ (' +\n message_date + ')\\n' + text)\n slack.chat.post_message(channel='#random', text=message,\n username='vts', icon_url=ICON_URL)\n logging.info('Posted comment_id=%s\\n%s', id, message)\n\n\nif __name__ == '__main__':\n vts = Vts()\n try:\n while True:\n try:\n vts.run()\n except vk_api.requests.exceptions.ConnectionError:\n time.sleep(10)\n except KeyboardInterrupt:\n pass\n",
"step-4": "from slacker import Slacker\nimport vk_api\nimport time\nimport logging\nfrom settings import SLACK_TOKEN, VK_LOGIN, VK_PASSWORD, GROUP_ID, TOPIC_ID, ICON_URL\nslack = Slacker(SLACK_TOKEN)\n\n\nclass Vts:\n\n def __init__(self):\n self.last_comment_id = 0\n self.vk = None\n\n def update_vk(self):\n if self.vk is not None:\n return\n vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)\n try:\n vk_session.authorization()\n except vk_api.AuthorizationError as error_msg:\n logging.error(error_msg)\n return\n except vk_api.Captcha as captcha:\n logging.error(captcha)\n return\n self.vk = vk_session.get_api()\n\n def update_last_comment_id(self):\n self.update_vk()\n if self.vk is None:\n return\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, sort='desc', count=1)\n if response['count'] == 0:\n time.sleep(5)\n return\n self.last_comment_id = response['items'][0]['id']\n print('Set initial id to ' + str(self.last_comment_id))\n\n def get_comments(self):\n self.update_vk()\n if self.vk is None:\n return [], []\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, start_comment_id=self.last_comment_id, extended=1)\n return response['items'], response['profiles']\n\n def get_topic(self):\n self.update_vk()\n response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[\n TOPIC_ID])\n if response['count'] == 0:\n return None\n return response['items'][0]\n\n def run(self):\n while True:\n if self.last_comment_id == 0:\n self.update_last_comment_id()\n topic = self.get_topic()\n if topic is None:\n logging.warning('Topic not found')\n time.sleep(60)\n continue\n comments, profiles = self.get_comments()\n if len(comments) == 0:\n time.sleep(5)\n continue\n users = dict()\n for profile in profiles:\n users[profile['id']] = profile\n for comment in comments:\n id = comment['id']\n if id > self.last_comment_id:\n self.last_comment_id = id\n text = comment['text']\n title = topic['title']\n user_id = abs(comment['from_id'])\n try:\n user = users[user_id]\n username = ' '.join([user['first_name'], user[\n 'last_name']])\n except KeyError:\n username = ''\n date = comment['date']\n message_date = '<!date^' + str(date\n ) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'\n text = '\\n'.join(map(lambda s: '>' + s, text.split('\\n')))\n message = ('>*' + title + '*\\n>_' + username + '_ (' +\n message_date + ')\\n' + text)\n slack.chat.post_message(channel='#random', text=message,\n username='vts', icon_url=ICON_URL)\n logging.info('Posted comment_id=%s\\n%s', id, message)\n\n\nif __name__ == '__main__':\n vts = Vts()\n try:\n while True:\n try:\n vts.run()\n except vk_api.requests.exceptions.ConnectionError:\n time.sleep(10)\n except KeyboardInterrupt:\n pass\n",
"step-5": "from slacker import Slacker\n\nimport vk_api\n\nimport time\n\nimport logging\n\nfrom settings import SLACK_TOKEN, VK_LOGIN, VK_PASSWORD, GROUP_ID, TOPIC_ID, ICON_URL\n\nslack = Slacker(SLACK_TOKEN)\n\n\nclass Vts:\n def __init__(self):\n self.last_comment_id = 0\n self.vk = None\n\n def update_vk(self):\n if self.vk is not None:\n return\n\n vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)\n\n try:\n vk_session.authorization()\n except vk_api.AuthorizationError as error_msg:\n logging.error(error_msg)\n return\n except vk_api.Captcha as captcha:\n logging.error(captcha)\n return\n\n self.vk = vk_session.get_api()\n\n def update_last_comment_id(self):\n self.update_vk()\n\n if self.vk is None:\n return\n\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=TOPIC_ID, sort='desc', count=1)\n if response['count'] == 0:\n time.sleep(5)\n return\n self.last_comment_id = response['items'][0]['id']\n print('Set initial id to ' + str(self.last_comment_id))\n\n def get_comments(self):\n self.update_vk()\n\n if self.vk is None:\n return [], []\n\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=TOPIC_ID,\n start_comment_id=self.last_comment_id, extended=1)\n\n return response['items'], response['profiles']\n\n def get_topic(self):\n self.update_vk()\n\n response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[TOPIC_ID])\n if response['count'] == 0:\n return None\n\n return response['items'][0]\n\n def run(self):\n while True:\n if self.last_comment_id == 0:\n self.update_last_comment_id()\n\n topic = self.get_topic()\n if topic is None:\n logging.warning('Topic not found')\n time.sleep(60)\n continue\n\n comments, profiles = self.get_comments()\n\n if len(comments) == 0:\n time.sleep(5)\n continue\n\n users = dict()\n\n for profile in profiles:\n users[profile['id']] = profile\n\n for comment in comments:\n id = comment['id']\n if id > self.last_comment_id:\n self.last_comment_id = id\n text = comment['text']\n title = topic['title']\n user_id = abs(comment['from_id'])\n try:\n user = users[user_id]\n username = ' '.join([user['first_name'], user['last_name']])\n except KeyError:\n username = ''\n\n date = comment['date']\n message_date = '<!date^' + str(date) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'\n text = \"\\n\".join(map(lambda s: \">\" + s, text.split(\"\\n\")))\n message = '>*' + title + '*\\n>_' + username + '_ (' + message_date + ')\\n' + text\n slack.chat.post_message(channel='#random', text=message, username='vts', icon_url=ICON_URL)\n logging.info('Posted comment_id=%s\\n%s', id, message)\n\nif __name__ == '__main__':\n vts = Vts()\n try:\n while True:\n try:\n vts.run()\n except vk_api.requests.exceptions.ConnectionError:\n time.sleep(10)\n except KeyboardInterrupt:\n pass\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
from django.shortcuts import render
from django.http import HttpResponse
# from appTwo.models import User
from appTwo.forms import NewUserForm
# Create your views here.
# def index(request):
# return HttpResponse("<em>My Second Project</em>")
def welcome(request):
# welcomedict={'welcome_insert':'Go to /users to see the list of user information!'}
return render(request,'welcome.html')
def users(request):
# users_list=User.objects.all()
# user_dict={'users':users_list}
form = NewUserForm()
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
form.save(commit=True)
return welcome(request)
else:
print('ERROR FORM INVALID')
return render(request,"users.html",{'form':form})
|
normal
|
{
"blob_id": "d5f66d92371838c703abbf80e2b78717cdd4a4fb",
"index": 7140,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef welcome(request):\n return render(request, 'welcome.html')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef welcome(request):\n return render(request, 'welcome.html')\n\n\ndef users(request):\n form = NewUserForm()\n if request.method == 'POST':\n form = NewUserForm(request.POST)\n if form.is_valid():\n form.save(commit=True)\n return welcome(request)\n else:\n print('ERROR FORM INVALID')\n return render(request, 'users.html', {'form': form})\n",
"step-4": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom appTwo.forms import NewUserForm\n\n\ndef welcome(request):\n return render(request, 'welcome.html')\n\n\ndef users(request):\n form = NewUserForm()\n if request.method == 'POST':\n form = NewUserForm(request.POST)\n if form.is_valid():\n form.save(commit=True)\n return welcome(request)\n else:\n print('ERROR FORM INVALID')\n return render(request, 'users.html', {'form': form})\n",
"step-5": "from django.shortcuts import render\nfrom django.http import HttpResponse\n# from appTwo.models import User\nfrom appTwo.forms import NewUserForm\n# Create your views here.\n\n\n# def index(request):\n# return HttpResponse(\"<em>My Second Project</em>\")\n\ndef welcome(request):\n # welcomedict={'welcome_insert':'Go to /users to see the list of user information!'}\n return render(request,'welcome.html')\n\ndef users(request):\n # users_list=User.objects.all()\n # user_dict={'users':users_list}\n form = NewUserForm()\n\n if request.method == \"POST\":\n form = NewUserForm(request.POST)\n\n if form.is_valid():\n form.save(commit=True)\n return welcome(request)\n else:\n print('ERROR FORM INVALID')\n\n return render(request,\"users.html\",{'form':form})\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from newspaper import Article
import random
import string
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import nltk
import numpy as np
import warnings
import speech_recognition as sr
warnings.filterwarnings('ignore')
nltk.download('punkt',quiet=True)
nltk.download('wordnet',quiet=True)
article=Article('https://www.mayoclinic.org/diseases-conditions/chronic-kidney-disease/symptoms-causes/syc-20354521')
article.download()
article.parse()
article.nlp()
corpus=article.text
#print(corpus)
text=corpus
sent_tokens=nltk.sent_tokenize(text)#convert the text into a alist of sentences
#print(sent_tokens)
#creatre a dictionary (key:value) pair to remove punctuations
remove_punct_dict=dict( (ord(punct),None) for punct in string.punctuation)
#print(string.punctuation)
#print(remove_punct_dict)
#create ala function to return a list of lenmatized lowercase words after removing puctuatuins.i,e all the sentences in the article are now converted into a list
def LemNormalize(text):
return nltk.word_tokenize(text.lower().translate(remove_punct_dict))
#prints the tokenozation text by removing the punctuation
#print(LemNormalize(text))
#keyword matching
#GREETINGS INPUT
GREETING_INPUTS=["hi","hello","hola","greetings","wassup","hey"]
#greeting response back
GREETING_RESPONSE=["howdy","hi","hey","what's good","hello"]
#function to return a random greeting response
def greeting(sentence):
#return a randomly choosen responce
for word in sentence.split():
if word.lower() in GREETING_INPUTS:
return random.choice(GREETING_RESPONSE)
#generate the respnse to the given question
def responce(user_responce):
#the user's query is taken
#user_responce='what is chronic kidney disease'
#the user may give his input as capitals so we should convert them into lower()
user_responce=user_responce.lower()
#set the chat bot respnse to an empt srting i.e declare the roborespnse as a string
robo_responce=''
#convert the user_responce into a list
sent_tokens.append(user_responce)
#create a TfidVectorizer object it is used to know how man tomes a word has occured
TfidVec=TfidfVectorizer(tokenizer=LemNormalize,stop_words='english')
#convert the text into a matrix of TF-IDF features
tfidf=TfidVec.fit_transform(sent_tokens)
#print(tfidf)
#get the measure of similarity(similarit scores)
vals=cosine_similarity(tfidf[-1],tfidf)
#print(vals)
#get the index of the most similar text/sentence to the user response
idx=vals.argsort()[0][-2]
#reduce the domensionalit of vals
flat=vals.flatten()
#sort the list in asc
flat.sort()
#get the most simliar score for the user's responce
score=flat[-2]
#print the similarit score
#print(score)
#if the score is 0 then the most similar score to the user resoponce
if(score==0):
robo_responce=robo_responce+"i aplogise i didn't understand"
else:
robo_responce=robo_responce+sent_tokens[idx]
#pritn the chat bot respnce
#print(robo_responce)
sent_tokens.remove(user_responce)
return robo_responce
r=sr.Recognizer()
with sr.Microphone() as source:
flag=True
print("BOT:Iam doctor bot and iam going to answeer your questions")
while(flag==True):
print("speak:")
audio=r.listen(source)
try:
text=r.recognize_google(audio)
print("you said:{}".format(text))
user_responce=text
if(user_responce!='bye'):
if(user_responce=='thanks' or user_responce=='thank you'):
flag=False
print("BOT:you are welcome")
else:
if(greeting(user_responce)!=None):
print("BOT:"+greeting(user_responce))
else:
print("BOT: "+responce(user_responce))
else:
flag=False
print("BOT:chat with u later")
except:
print("could not recognize")
|
normal
|
{
"blob_id": "53b56cf9265a658d999388f0a1e03d7ceb186213",
"index": 2836,
"step-1": "<mask token>\n\n\ndef LemNormalize(text):\n return nltk.word_tokenize(text.lower().translate(remove_punct_dict))\n\n\n<mask token>\n\n\ndef greeting(sentence):\n for word in sentence.split():\n if word.lower() in GREETING_INPUTS:\n return random.choice(GREETING_RESPONSE)\n\n\ndef responce(user_responce):\n user_responce = user_responce.lower()\n robo_responce = ''\n sent_tokens.append(user_responce)\n TfidVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')\n tfidf = TfidVec.fit_transform(sent_tokens)\n vals = cosine_similarity(tfidf[-1], tfidf)\n idx = vals.argsort()[0][-2]\n flat = vals.flatten()\n flat.sort()\n score = flat[-2]\n if score == 0:\n robo_responce = robo_responce + \"i aplogise i didn't understand\"\n else:\n robo_responce = robo_responce + sent_tokens[idx]\n sent_tokens.remove(user_responce)\n return robo_responce\n\n\n<mask token>\n",
"step-2": "<mask token>\nwarnings.filterwarnings('ignore')\nnltk.download('punkt', quiet=True)\nnltk.download('wordnet', quiet=True)\n<mask token>\narticle.download()\narticle.parse()\narticle.nlp()\n<mask token>\n\n\ndef LemNormalize(text):\n return nltk.word_tokenize(text.lower().translate(remove_punct_dict))\n\n\n<mask token>\n\n\ndef greeting(sentence):\n for word in sentence.split():\n if word.lower() in GREETING_INPUTS:\n return random.choice(GREETING_RESPONSE)\n\n\ndef responce(user_responce):\n user_responce = user_responce.lower()\n robo_responce = ''\n sent_tokens.append(user_responce)\n TfidVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')\n tfidf = TfidVec.fit_transform(sent_tokens)\n vals = cosine_similarity(tfidf[-1], tfidf)\n idx = vals.argsort()[0][-2]\n flat = vals.flatten()\n flat.sort()\n score = flat[-2]\n if score == 0:\n robo_responce = robo_responce + \"i aplogise i didn't understand\"\n else:\n robo_responce = robo_responce + sent_tokens[idx]\n sent_tokens.remove(user_responce)\n return robo_responce\n\n\n<mask token>\nwith sr.Microphone() as source:\n flag = True\n print('BOT:Iam doctor bot and iam going to answeer your questions')\n while flag == True:\n print('speak:')\n audio = r.listen(source)\n try:\n text = r.recognize_google(audio)\n print('you said:{}'.format(text))\n user_responce = text\n if user_responce != 'bye':\n if user_responce == 'thanks' or user_responce == 'thank you':\n flag = False\n print('BOT:you are welcome')\n elif greeting(user_responce) != None:\n print('BOT:' + greeting(user_responce))\n else:\n print('BOT: ' + responce(user_responce))\n else:\n flag = False\n print('BOT:chat with u later')\n except:\n print('could not recognize')\n",
"step-3": "<mask token>\nwarnings.filterwarnings('ignore')\nnltk.download('punkt', quiet=True)\nnltk.download('wordnet', quiet=True)\narticle = Article(\n 'https://www.mayoclinic.org/diseases-conditions/chronic-kidney-disease/symptoms-causes/syc-20354521'\n )\narticle.download()\narticle.parse()\narticle.nlp()\ncorpus = article.text\ntext = corpus\nsent_tokens = nltk.sent_tokenize(text)\nremove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)\n\n\ndef LemNormalize(text):\n return nltk.word_tokenize(text.lower().translate(remove_punct_dict))\n\n\nGREETING_INPUTS = ['hi', 'hello', 'hola', 'greetings', 'wassup', 'hey']\nGREETING_RESPONSE = ['howdy', 'hi', 'hey', \"what's good\", 'hello']\n\n\ndef greeting(sentence):\n for word in sentence.split():\n if word.lower() in GREETING_INPUTS:\n return random.choice(GREETING_RESPONSE)\n\n\ndef responce(user_responce):\n user_responce = user_responce.lower()\n robo_responce = ''\n sent_tokens.append(user_responce)\n TfidVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')\n tfidf = TfidVec.fit_transform(sent_tokens)\n vals = cosine_similarity(tfidf[-1], tfidf)\n idx = vals.argsort()[0][-2]\n flat = vals.flatten()\n flat.sort()\n score = flat[-2]\n if score == 0:\n robo_responce = robo_responce + \"i aplogise i didn't understand\"\n else:\n robo_responce = robo_responce + sent_tokens[idx]\n sent_tokens.remove(user_responce)\n return robo_responce\n\n\nr = sr.Recognizer()\nwith sr.Microphone() as source:\n flag = True\n print('BOT:Iam doctor bot and iam going to answeer your questions')\n while flag == True:\n print('speak:')\n audio = r.listen(source)\n try:\n text = r.recognize_google(audio)\n print('you said:{}'.format(text))\n user_responce = text\n if user_responce != 'bye':\n if user_responce == 'thanks' or user_responce == 'thank you':\n flag = False\n print('BOT:you are welcome')\n elif greeting(user_responce) != None:\n print('BOT:' + greeting(user_responce))\n else:\n print('BOT: ' + responce(user_responce))\n else:\n flag = False\n print('BOT:chat with u later')\n except:\n print('could not recognize')\n",
"step-4": "from newspaper import Article\nimport random\nimport string\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport nltk\nimport numpy as np\nimport warnings\nimport speech_recognition as sr\nwarnings.filterwarnings('ignore')\nnltk.download('punkt', quiet=True)\nnltk.download('wordnet', quiet=True)\narticle = Article(\n 'https://www.mayoclinic.org/diseases-conditions/chronic-kidney-disease/symptoms-causes/syc-20354521'\n )\narticle.download()\narticle.parse()\narticle.nlp()\ncorpus = article.text\ntext = corpus\nsent_tokens = nltk.sent_tokenize(text)\nremove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)\n\n\ndef LemNormalize(text):\n return nltk.word_tokenize(text.lower().translate(remove_punct_dict))\n\n\nGREETING_INPUTS = ['hi', 'hello', 'hola', 'greetings', 'wassup', 'hey']\nGREETING_RESPONSE = ['howdy', 'hi', 'hey', \"what's good\", 'hello']\n\n\ndef greeting(sentence):\n for word in sentence.split():\n if word.lower() in GREETING_INPUTS:\n return random.choice(GREETING_RESPONSE)\n\n\ndef responce(user_responce):\n user_responce = user_responce.lower()\n robo_responce = ''\n sent_tokens.append(user_responce)\n TfidVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')\n tfidf = TfidVec.fit_transform(sent_tokens)\n vals = cosine_similarity(tfidf[-1], tfidf)\n idx = vals.argsort()[0][-2]\n flat = vals.flatten()\n flat.sort()\n score = flat[-2]\n if score == 0:\n robo_responce = robo_responce + \"i aplogise i didn't understand\"\n else:\n robo_responce = robo_responce + sent_tokens[idx]\n sent_tokens.remove(user_responce)\n return robo_responce\n\n\nr = sr.Recognizer()\nwith sr.Microphone() as source:\n flag = True\n print('BOT:Iam doctor bot and iam going to answeer your questions')\n while flag == True:\n print('speak:')\n audio = r.listen(source)\n try:\n text = r.recognize_google(audio)\n print('you said:{}'.format(text))\n user_responce = text\n if user_responce != 'bye':\n if user_responce == 'thanks' or user_responce == 'thank you':\n flag = False\n print('BOT:you are welcome')\n elif greeting(user_responce) != None:\n print('BOT:' + greeting(user_responce))\n else:\n print('BOT: ' + responce(user_responce))\n else:\n flag = False\n print('BOT:chat with u later')\n except:\n print('could not recognize')\n",
"step-5": "from newspaper import Article\r\nimport random\r\nimport string\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nimport nltk\r\nimport numpy as np\r\nimport warnings\r\nimport speech_recognition as sr\r\n\r\n\r\n\r\nwarnings.filterwarnings('ignore')\r\nnltk.download('punkt',quiet=True)\r\nnltk.download('wordnet',quiet=True)\r\narticle=Article('https://www.mayoclinic.org/diseases-conditions/chronic-kidney-disease/symptoms-causes/syc-20354521')\r\narticle.download()\r\narticle.parse()\r\narticle.nlp()\r\ncorpus=article.text\r\n#print(corpus)\r\n \r\ntext=corpus\r\nsent_tokens=nltk.sent_tokenize(text)#convert the text into a alist of sentences\r\n#print(sent_tokens)\r\n\r\n#creatre a dictionary (key:value) pair to remove punctuations\r\nremove_punct_dict=dict( (ord(punct),None) for punct in string.punctuation)\r\n#print(string.punctuation)\r\n#print(remove_punct_dict)\r\n\r\n#create ala function to return a list of lenmatized lowercase words after removing puctuatuins.i,e all the sentences in the article are now converted into a list\r\ndef LemNormalize(text):\r\n return nltk.word_tokenize(text.lower().translate(remove_punct_dict))\r\n#prints the tokenozation text by removing the punctuation\r\n\r\n#print(LemNormalize(text))\r\n\r\n#keyword matching\r\n#GREETINGS INPUT\r\nGREETING_INPUTS=[\"hi\",\"hello\",\"hola\",\"greetings\",\"wassup\",\"hey\"]\r\n#greeting response back\r\nGREETING_RESPONSE=[\"howdy\",\"hi\",\"hey\",\"what's good\",\"hello\"]\r\n#function to return a random greeting response\r\ndef greeting(sentence):\r\n #return a randomly choosen responce\r\n for word in sentence.split():\r\n if word.lower() in GREETING_INPUTS:\r\n return random.choice(GREETING_RESPONSE)\r\n\r\n\r\n\r\n\r\n#generate the respnse to the given question\r\ndef responce(user_responce):\r\n \r\n#the user's query is taken \r\n #user_responce='what is chronic kidney disease'\r\n#the user may give his input as capitals so we should convert them into lower()\r\n user_responce=user_responce.lower()\r\n#set the chat bot respnse to an empt srting i.e declare the roborespnse as a string\r\n robo_responce=''\r\n#convert the user_responce into a list\r\n sent_tokens.append(user_responce)\r\n#create a TfidVectorizer object it is used to know how man tomes a word has occured\r\n TfidVec=TfidfVectorizer(tokenizer=LemNormalize,stop_words='english')\r\n#convert the text into a matrix of TF-IDF features\r\n tfidf=TfidVec.fit_transform(sent_tokens)\r\n#print(tfidf)\r\n\r\n#get the measure of similarity(similarit scores)\r\n vals=cosine_similarity(tfidf[-1],tfidf)\r\n#print(vals)\r\n#get the index of the most similar text/sentence to the user response\r\n idx=vals.argsort()[0][-2]\r\n\r\n #reduce the domensionalit of vals\r\n flat=vals.flatten()\r\n#sort the list in asc\r\n flat.sort()\r\n#get the most simliar score for the user's responce\r\n score=flat[-2]\r\n\r\n\r\n#print the similarit score\r\n#print(score) \r\n#if the score is 0 then the most similar score to the user resoponce\r\n if(score==0):\r\n robo_responce=robo_responce+\"i aplogise i didn't understand\"\r\n else:\r\n robo_responce=robo_responce+sent_tokens[idx]\r\n \r\n#pritn the chat bot respnce\r\n #print(robo_responce)\r\n sent_tokens.remove(user_responce)\r\n return robo_responce\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nr=sr.Recognizer()\r\nwith sr.Microphone() as source:\r\n\r\n\r\n flag=True\r\n print(\"BOT:Iam doctor bot and iam going to answeer your questions\")\r\n while(flag==True):\r\n print(\"speak:\")\r\n audio=r.listen(source)\r\n try:\r\n text=r.recognize_google(audio)\r\n print(\"you said:{}\".format(text))\r\n user_responce=text\r\n if(user_responce!='bye'):\r\n if(user_responce=='thanks' or user_responce=='thank you'):\r\n flag=False\r\n print(\"BOT:you are welcome\")\r\n else:\r\n if(greeting(user_responce)!=None):\r\n print(\"BOT:\"+greeting(user_responce))\r\n else:\r\n print(\"BOT: \"+responce(user_responce))\r\n \r\n else:\r\n flag=False\r\n print(\"BOT:chat with u later\")\r\n except:\r\n print(\"could not recognize\")\r\n ",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
__all__ = '''
calc_common_prefix_length
'''.split()
import operator
import itertools
def calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):
if __eq__ is None:
__eq__ = operator.__eq__
idx = -1
for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):
if not __eq__(a, b):
return idx
else:
return idx+1
assert calc_common_prefix_length([], []) == 0
assert calc_common_prefix_length([], [1]) == 0
assert calc_common_prefix_length([1], [1]) == 1
assert calc_common_prefix_length([1,3], [1,2]) == 1
|
normal
|
{
"blob_id": "2b73c4e07bba7ed5c89a31ebd45655eaa85dcdcc",
"index": 2689,
"step-1": "<mask token>\n\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx + 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx + 1\n\n\nassert calc_common_prefix_length([], []) == 0\nassert calc_common_prefix_length([], [1]) == 0\nassert calc_common_prefix_length([1], [1]) == 1\nassert calc_common_prefix_length([1, 3], [1, 2]) == 1\n",
"step-3": "__all__ = \"\"\"\n calc_common_prefix_length\n \"\"\".split()\n<mask token>\n\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx + 1\n\n\nassert calc_common_prefix_length([], []) == 0\nassert calc_common_prefix_length([], [1]) == 0\nassert calc_common_prefix_length([1], [1]) == 1\nassert calc_common_prefix_length([1, 3], [1, 2]) == 1\n",
"step-4": "__all__ = \"\"\"\n calc_common_prefix_length\n \"\"\".split()\nimport operator\nimport itertools\n\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx + 1\n\n\nassert calc_common_prefix_length([], []) == 0\nassert calc_common_prefix_length([], [1]) == 0\nassert calc_common_prefix_length([1], [1]) == 1\nassert calc_common_prefix_length([1, 3], [1, 2]) == 1\n",
"step-5": "\n__all__ = '''\n calc_common_prefix_length\n '''.split()\nimport operator\nimport itertools\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx+1\n\nassert calc_common_prefix_length([], []) == 0\nassert calc_common_prefix_length([], [1]) == 0\nassert calc_common_prefix_length([1], [1]) == 1\nassert calc_common_prefix_length([1,3], [1,2]) == 1\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from fastapi import APIRouter, Depends
from fastapi.responses import RedirectResponse
import app.setting as setting
from app.dependencies import get_project_by_prefix
from app.entities.project import Project
router = APIRouter(
prefix="/go",
)
@router.get("/{prefix_id}")
def redirect_to_board(project: Project = Depends(get_project_by_prefix)):
return RedirectResponse(url=project.notion_board_url)
@router.get("/{prefix_id}/{ticket_id}")
def redirect_to_ticket(
ticket_id: str, project: Project = Depends(get_project_by_prefix)
):
ticket = project.query_ticket(ticket_id=ticket_id)
notion_url = setting.notion_base_url + ticket.id.replace("-", "")
return RedirectResponse(url=notion_url)
|
normal
|
{
"blob_id": "49b295c3e323695779eb32181193ef88b678b34d",
"index": 6340,
"step-1": "<mask token>\n\n\n@router.get('/{prefix_id}')\ndef redirect_to_board(project: Project=Depends(get_project_by_prefix)):\n return RedirectResponse(url=project.notion_board_url)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@router.get('/{prefix_id}')\ndef redirect_to_board(project: Project=Depends(get_project_by_prefix)):\n return RedirectResponse(url=project.notion_board_url)\n\n\n@router.get('/{prefix_id}/{ticket_id}')\ndef redirect_to_ticket(ticket_id: str, project: Project=Depends(\n get_project_by_prefix)):\n ticket = project.query_ticket(ticket_id=ticket_id)\n notion_url = setting.notion_base_url + ticket.id.replace('-', '')\n return RedirectResponse(url=notion_url)\n",
"step-3": "<mask token>\nrouter = APIRouter(prefix='/go')\n\n\n@router.get('/{prefix_id}')\ndef redirect_to_board(project: Project=Depends(get_project_by_prefix)):\n return RedirectResponse(url=project.notion_board_url)\n\n\n@router.get('/{prefix_id}/{ticket_id}')\ndef redirect_to_ticket(ticket_id: str, project: Project=Depends(\n get_project_by_prefix)):\n ticket = project.query_ticket(ticket_id=ticket_id)\n notion_url = setting.notion_base_url + ticket.id.replace('-', '')\n return RedirectResponse(url=notion_url)\n",
"step-4": "from fastapi import APIRouter, Depends\nfrom fastapi.responses import RedirectResponse\nimport app.setting as setting\nfrom app.dependencies import get_project_by_prefix\nfrom app.entities.project import Project\nrouter = APIRouter(prefix='/go')\n\n\n@router.get('/{prefix_id}')\ndef redirect_to_board(project: Project=Depends(get_project_by_prefix)):\n return RedirectResponse(url=project.notion_board_url)\n\n\n@router.get('/{prefix_id}/{ticket_id}')\ndef redirect_to_ticket(ticket_id: str, project: Project=Depends(\n get_project_by_prefix)):\n ticket = project.query_ticket(ticket_id=ticket_id)\n notion_url = setting.notion_base_url + ticket.id.replace('-', '')\n return RedirectResponse(url=notion_url)\n",
"step-5": "from fastapi import APIRouter, Depends\nfrom fastapi.responses import RedirectResponse\n\nimport app.setting as setting\nfrom app.dependencies import get_project_by_prefix\nfrom app.entities.project import Project\n\n\nrouter = APIRouter(\n prefix=\"/go\",\n)\n\n\n@router.get(\"/{prefix_id}\")\ndef redirect_to_board(project: Project = Depends(get_project_by_prefix)):\n return RedirectResponse(url=project.notion_board_url)\n\n\n@router.get(\"/{prefix_id}/{ticket_id}\")\ndef redirect_to_ticket(\n ticket_id: str, project: Project = Depends(get_project_by_prefix)\n):\n ticket = project.query_ticket(ticket_id=ticket_id)\n notion_url = setting.notion_base_url + ticket.id.replace(\"-\", \"\")\n return RedirectResponse(url=notion_url)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
'''
vetor = ["pares de pregos ligados por uma linha"]
indice do vetor representa os pregos na vertical, e o
inteiro em cada pos, os pregos na horizontal.
i(vertical) e j(horizontal) entao:
vetor[i] = j
pregos a(vertical) e pregos b(horizontal)
se a>i and b<j or a<i and b>j
a e i(são indices) b e j(são os elemntos salvos na pos)
'''
def merge(p,n):
global vet
global aux
if n <= 1:
return 0
c = merge(p,n//2) + merge(p+n//2,n-n//2)
d,a,b = 0,0,n//2
while d<n:
if a != n//2 and (b == n or vet[p+a]<vet[p+b]):
aux[d] = vet[p+a]
a+=1
else:
aux[d] = vet[p+b]
c+=n//2+a
b+=1
d+=1
for i in range(n):
vet[p+i] = aux[i]
return c
entrada = int(input())
vet = [int(x) for x in input().split()]
aux = [0]*entrada
print(merge(0,entrada))
|
normal
|
{
"blob_id": "fe081a422db6b7f10c89179beab852c6b74ec687",
"index": 2795,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef merge(p, n):\n global vet\n global aux\n if n <= 1:\n return 0\n c = merge(p, n // 2) + merge(p + n // 2, n - n // 2)\n d, a, b = 0, 0, n // 2\n while d < n:\n if a != n // 2 and (b == n or vet[p + a] < vet[p + b]):\n aux[d] = vet[p + a]\n a += 1\n else:\n aux[d] = vet[p + b]\n c += n // 2 + a\n b += 1\n d += 1\n for i in range(n):\n vet[p + i] = aux[i]\n return c\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef merge(p, n):\n global vet\n global aux\n if n <= 1:\n return 0\n c = merge(p, n // 2) + merge(p + n // 2, n - n // 2)\n d, a, b = 0, 0, n // 2\n while d < n:\n if a != n // 2 and (b == n or vet[p + a] < vet[p + b]):\n aux[d] = vet[p + a]\n a += 1\n else:\n aux[d] = vet[p + b]\n c += n // 2 + a\n b += 1\n d += 1\n for i in range(n):\n vet[p + i] = aux[i]\n return c\n\n\n<mask token>\nprint(merge(0, entrada))\n",
"step-4": "<mask token>\n\n\ndef merge(p, n):\n global vet\n global aux\n if n <= 1:\n return 0\n c = merge(p, n // 2) + merge(p + n // 2, n - n // 2)\n d, a, b = 0, 0, n // 2\n while d < n:\n if a != n // 2 and (b == n or vet[p + a] < vet[p + b]):\n aux[d] = vet[p + a]\n a += 1\n else:\n aux[d] = vet[p + b]\n c += n // 2 + a\n b += 1\n d += 1\n for i in range(n):\n vet[p + i] = aux[i]\n return c\n\n\nentrada = int(input())\nvet = [int(x) for x in input().split()]\naux = [0] * entrada\nprint(merge(0, entrada))\n",
"step-5": "'''\nvetor = [\"pares de pregos ligados por uma linha\"]\nindice do vetor representa os pregos na vertical, e o\ninteiro em cada pos, os pregos na horizontal.\n\ni(vertical) e j(horizontal) entao:\n vetor[i] = j\n\npregos a(vertical) e pregos b(horizontal)\n\nse a>i and b<j or a<i and b>j\n\na e i(são indices) b e j(são os elemntos salvos na pos)\n'''\n\ndef merge(p,n):\n global vet\n global aux\n if n <= 1:\n return 0\n c = merge(p,n//2) + merge(p+n//2,n-n//2)\n d,a,b = 0,0,n//2\n while d<n:\n if a != n//2 and (b == n or vet[p+a]<vet[p+b]):\n aux[d] = vet[p+a]\n a+=1\n else:\n aux[d] = vet[p+b]\n c+=n//2+a\n b+=1\n d+=1\n for i in range(n):\n vet[p+i] = aux[i]\n return c\n\nentrada = int(input())\nvet = [int(x) for x in input().split()]\naux = [0]*entrada\nprint(merge(0,entrada))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.