code
stringlengths
13
6.09M
order_type
stringclasses
2 values
original_example
dict
step_ids
listlengths
1
5
from django.db import models from django.utils import timezone from accounts.models import AllUser from profiles.models import Profile ### MODEL HOLDING MEMBER TO CLIENT RELATIONSHIPS. ### class MemberClient(models.Model): created = models.DateTimeField(auto_now_add=timezone.now()) client = models.ForeignKey(AllUser, related_name='client', default=None, on_delete=models.CASCADE) member = models.ForeignKey(AllUser, related_name='member', default=None, on_delete=models.CASCADE) profile = models.ForeignKey(Profile, related_name='profile', default=None, on_delete=models.CASCADE, blank=True, null=True) def __str__(self): return "{0}".format(self.client)
normal
{ "blob_id": "b419e26cbf5bbb746f897367ddaa829773a6860c", "index": 7742, "step-1": "<mask token>\n\n\nclass MemberClient(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass MemberClient(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return '{0}'.format(self.client)\n", "step-3": "<mask token>\n\n\nclass MemberClient(models.Model):\n created = models.DateTimeField(auto_now_add=timezone.now())\n client = models.ForeignKey(AllUser, related_name='client', default=None,\n on_delete=models.CASCADE)\n member = models.ForeignKey(AllUser, related_name='member', default=None,\n on_delete=models.CASCADE)\n profile = models.ForeignKey(Profile, related_name='profile', default=\n None, on_delete=models.CASCADE, blank=True, null=True)\n\n def __str__(self):\n return '{0}'.format(self.client)\n", "step-4": "from django.db import models\nfrom django.utils import timezone\nfrom accounts.models import AllUser\nfrom profiles.models import Profile\n\n\nclass MemberClient(models.Model):\n created = models.DateTimeField(auto_now_add=timezone.now())\n client = models.ForeignKey(AllUser, related_name='client', default=None,\n on_delete=models.CASCADE)\n member = models.ForeignKey(AllUser, related_name='member', default=None,\n on_delete=models.CASCADE)\n profile = models.ForeignKey(Profile, related_name='profile', default=\n None, on_delete=models.CASCADE, blank=True, null=True)\n\n def __str__(self):\n return '{0}'.format(self.client)\n", "step-5": "from django.db import models\nfrom django.utils import timezone\nfrom accounts.models import AllUser\nfrom profiles.models import Profile\n\n### MODEL HOLDING MEMBER TO CLIENT RELATIONSHIPS. ###\n\nclass MemberClient(models.Model):\n created = models.DateTimeField(auto_now_add=timezone.now())\n client = models.ForeignKey(AllUser, \n related_name='client', \n default=None, \n on_delete=models.CASCADE)\n member = models.ForeignKey(AllUser,\n related_name='member', \n default=None, \n on_delete=models.CASCADE)\n profile = models.ForeignKey(Profile,\n related_name='profile', \n default=None, \n on_delete=models.CASCADE,\n blank=True,\n null=True)\n \n def __str__(self):\n return \"{0}\".format(self.client)", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import numpy #Matrixmultiplikation #Matrixinvertierung #nicht p inv #selbst invertierbar machen import math import operator
normal
{ "blob_id": "ece20c8c8fae2225cbac3552e254314b7116057c", "index": 7095, "step-1": "<mask token>\n", "step-2": "import numpy\nimport math\nimport operator\n", "step-3": "import numpy\n#Matrixmultiplikation\n#Matrixinvertierung\n#nicht p inv\n#selbst invertierbar machen\n\nimport math\nimport operator", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): dependencies = [('meeting', '0004_auto_20210511_0947')] operations = [migrations.AlterField(model_name='event', name='end', field=models.DateTimeField(auto_now_add=True)), migrations. AlterField(model_name='event', name='start', field=models. DateTimeField(auto_now_add=True))] <|reserved_special_token_1|> from django.db import migrations, models class Migration(migrations.Migration): dependencies = [('meeting', '0004_auto_20210511_0947')] operations = [migrations.AlterField(model_name='event', name='end', field=models.DateTimeField(auto_now_add=True)), migrations. AlterField(model_name='event', name='start', field=models. DateTimeField(auto_now_add=True))] <|reserved_special_token_1|> # Generated by Django 3.2.2 on 2021-05-11 09:49 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('meeting', '0004_auto_20210511_0947'), ] operations = [ migrations.AlterField( model_name='event', name='end', field=models.DateTimeField(auto_now_add=True), ), migrations.AlterField( model_name='event', name='start', field=models.DateTimeField(auto_now_add=True), ), ]
flexible
{ "blob_id": "1c1cd0eeea4dbf446aa4582f42ef1f3b5a4e8875", "index": 7452, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('meeting', '0004_auto_20210511_0947')]\n operations = [migrations.AlterField(model_name='event', name='end',\n field=models.DateTimeField(auto_now_add=True)), migrations.\n AlterField(model_name='event', name='start', field=models.\n DateTimeField(auto_now_add=True))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('meeting', '0004_auto_20210511_0947')]\n operations = [migrations.AlterField(model_name='event', name='end',\n field=models.DateTimeField(auto_now_add=True)), migrations.\n AlterField(model_name='event', name='start', field=models.\n DateTimeField(auto_now_add=True))]\n", "step-5": "# Generated by Django 3.2.2 on 2021-05-11 09:49\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('meeting', '0004_auto_20210511_0947'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='event',\n name='end',\n field=models.DateTimeField(auto_now_add=True),\n ),\n migrations.AlterField(\n model_name='event',\n name='start',\n field=models.DateTimeField(auto_now_add=True),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> @register.filter def td_humanize(diff): if diff.total_seconds() < 0: return 'Meni jo!' days = diff.days if days >= 7: weeks, days = divmod(days, 7) result = str(weeks) + ' vk' if days: result += ' ' + str(days) + ' pv' return result elif days: hours, remainder = divmod(diff.seconds, 3600) result = str(days) + ' pv' if hours: result += ' ' + str(hours) + ' h' return result else: hours, remainder = divmod(diff.seconds, 3600) minutes, seconds = divmod(remainder, 60) if minutes >= 30: hours += 1 result = str(hours) + ' h' return result @register.filter def time_from_now(datetime): now = timezone.now() if datetime != 'Ei tiedossa': return td_humanize(datetime - now) else: return 'Ei tiedossa' <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @register.filter def td_humanize(diff): if diff.total_seconds() < 0: return 'Meni jo!' days = diff.days if days >= 7: weeks, days = divmod(days, 7) result = str(weeks) + ' vk' if days: result += ' ' + str(days) + ' pv' return result elif days: hours, remainder = divmod(diff.seconds, 3600) result = str(days) + ' pv' if hours: result += ' ' + str(hours) + ' h' return result else: hours, remainder = divmod(diff.seconds, 3600) minutes, seconds = divmod(remainder, 60) if minutes >= 30: hours += 1 result = str(hours) + ' h' return result @register.filter def time_from_now(datetime): now = timezone.now() if datetime != 'Ei tiedossa': return td_humanize(datetime - now) else: return 'Ei tiedossa' @register.filter def parse_service_code(service_code): if Service.objects.filter(service_code=service_code).exists(): return service_code else: return '180' @register.filter def get_service_name(service_code): try: service = Service.objects.get(service_code=service_code) except ObjectDoesNotExist: return 'Muu' return service.service_name <|reserved_special_token_0|> @register.filter def real_status(feedback): if is_open(feedback): return 'Avoin' else: return 'Suljettu' @register.filter def get_expected_datetime(feedback): if feedback.expected_datetime: return feedback.expected_datetime else: time = calc_fixing_time(feedback.service_code) if time > 0: median = timedelta(milliseconds=time) return feedback.requested_datetime + median else: return 'Ei tiedossa' <|reserved_special_token_0|> @register.simple_tag def feedback_vote_icon_status(request, item): if 'vote_id_list' in request.session: if str(item.id) in request.session['vote_id_list']: return 'icon_disabled' return 'icon_enabled' <|reserved_special_token_1|> <|reserved_special_token_0|> @register.filter def td_humanize(diff): if diff.total_seconds() < 0: return 'Meni jo!' days = diff.days if days >= 7: weeks, days = divmod(days, 7) result = str(weeks) + ' vk' if days: result += ' ' + str(days) + ' pv' return result elif days: hours, remainder = divmod(diff.seconds, 3600) result = str(days) + ' pv' if hours: result += ' ' + str(hours) + ' h' return result else: hours, remainder = divmod(diff.seconds, 3600) minutes, seconds = divmod(remainder, 60) if minutes >= 30: hours += 1 result = str(hours) + ' h' return result @register.filter def time_from_now(datetime): now = timezone.now() if datetime != 'Ei tiedossa': return td_humanize(datetime - now) else: return 'Ei tiedossa' @register.filter def parse_service_code(service_code): if Service.objects.filter(service_code=service_code).exists(): return service_code else: return '180' @register.filter def get_service_name(service_code): try: service = Service.objects.get(service_code=service_code) except ObjectDoesNotExist: return 'Muu' return service.service_name <|reserved_special_token_0|> @register.filter def real_status(feedback): if is_open(feedback): return 'Avoin' else: return 'Suljettu' @register.filter def get_expected_datetime(feedback): if feedback.expected_datetime: return feedback.expected_datetime else: time = calc_fixing_time(feedback.service_code) if time > 0: median = timedelta(milliseconds=time) return feedback.requested_datetime + median else: return 'Ei tiedossa' @register.simple_tag def navbar_link_class(request, urls): if request.path in (reverse(url) for url in urls.split()): return 'active' return '' @register.simple_tag def feedback_vote_icon_status(request, item): if 'vote_id_list' in request.session: if str(item.id) in request.session['vote_id_list']: return 'icon_disabled' return 'icon_enabled' <|reserved_special_token_1|> <|reserved_special_token_0|> @register.filter def td_humanize(diff): if diff.total_seconds() < 0: return 'Meni jo!' days = diff.days if days >= 7: weeks, days = divmod(days, 7) result = str(weeks) + ' vk' if days: result += ' ' + str(days) + ' pv' return result elif days: hours, remainder = divmod(diff.seconds, 3600) result = str(days) + ' pv' if hours: result += ' ' + str(hours) + ' h' return result else: hours, remainder = divmod(diff.seconds, 3600) minutes, seconds = divmod(remainder, 60) if minutes >= 30: hours += 1 result = str(hours) + ' h' return result @register.filter def time_from_now(datetime): now = timezone.now() if datetime != 'Ei tiedossa': return td_humanize(datetime - now) else: return 'Ei tiedossa' @register.filter def parse_service_code(service_code): if Service.objects.filter(service_code=service_code).exists(): return service_code else: return '180' @register.filter def get_service_name(service_code): try: service = Service.objects.get(service_code=service_code) except ObjectDoesNotExist: return 'Muu' return service.service_name @register.filter def is_open(feedback): if settings.ALLOW_HELSINKI_SPECIFIC_FEATURES: open_strings = ['PUBLIC_WORKS_NEW', 'PUBLIC_WORKS_COMPLETED_SCHEDULED_LATER'] if feedback.status in ['open', 'moderation']: return True else: for string in open_strings: if string in feedback.detailed_status: return True return False else: return feedback.status in ['open', 'moderation'] @register.filter def real_status(feedback): if is_open(feedback): return 'Avoin' else: return 'Suljettu' @register.filter def get_expected_datetime(feedback): if feedback.expected_datetime: return feedback.expected_datetime else: time = calc_fixing_time(feedback.service_code) if time > 0: median = timedelta(milliseconds=time) return feedback.requested_datetime + median else: return 'Ei tiedossa' @register.simple_tag def navbar_link_class(request, urls): if request.path in (reverse(url) for url in urls.split()): return 'active' return '' @register.simple_tag def feedback_vote_icon_status(request, item): if 'vote_id_list' in request.session: if str(item.id) in request.session['vote_id_list']: return 'icon_disabled' return 'icon_enabled' <|reserved_special_token_1|> from datetime import timedelta from django import template from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.core.urlresolvers import reverse from django.utils import timezone from api.analysis import * from api.models import Service register = template.Library() # Takes a timdelta object and returns a string indicating how many # weeks, days, hours it is. Does not round, only truncates! @register.filter def td_humanize(diff): if diff.total_seconds() < 0: return "Meni jo!" days = diff.days if days >= 7: weeks, days = divmod(days, 7) result = str(weeks) + " vk" if days: result += " " + str(days) + " pv" return result elif days: hours, remainder = divmod(diff.seconds, 3600) result = str(days) + " pv" if hours: result += " " + str(hours) + " h" return result else: hours, remainder = divmod(diff.seconds, 3600) minutes, seconds = divmod(remainder, 60) if minutes >= 30: hours += 1 result = str(hours) + " h" return result # Takes a datetime object and returns the difference between now and then @register.filter def time_from_now(datetime): now = timezone.now() if datetime != "Ei tiedossa": return td_humanize(datetime - now) else: return "Ei tiedossa" # Check if the given service code is among supported service codes. If it is, return the same code. # If not, return code "180". @register.filter def parse_service_code(service_code): if Service.objects.filter(service_code=service_code).exists(): return service_code else: return "180" # Returns the service name based on given service code. This is done because somtimes # service_name is in the wrong language @register.filter def get_service_name(service_code): try: service = Service.objects.get(service_code=service_code) except ObjectDoesNotExist: return "Muu" return service.service_name # Check if the feedback really is open or not. Return true if: # - status == open/moderation # - detailed_status contains specified substrings # If ALLOW_HELSINKI_SPECIFIC_FEATURES == False just return basic status @register.filter def is_open(feedback): if settings.ALLOW_HELSINKI_SPECIFIC_FEATURES: open_strings = ["PUBLIC_WORKS_NEW", "PUBLIC_WORKS_COMPLETED_SCHEDULED_LATER"] if feedback.status in ["open", "moderation"]: return True else: for string in open_strings: if string in feedback.detailed_status: return True return False else: return (feedback.status in ["open", "moderation"]) # Returns the real status string of the feedback @register.filter def real_status(feedback): if is_open(feedback): return "Avoin" else: return "Suljettu" # If the expected_datetime is empty, return median estimation @register.filter def get_expected_datetime(feedback): if feedback.expected_datetime: return feedback.expected_datetime else: time = calc_fixing_time(feedback.service_code) if time > 0: median = timedelta(milliseconds=time) return (feedback.requested_datetime + median) else: return "Ei tiedossa" # Highlights the active navbar link @register.simple_tag def navbar_link_class(request, urls): if request.path in (reverse(url) for url in urls.split()): return "active" return "" # Checks if the user has already voted this feedback and returns a proper class. Uses session data. @register.simple_tag def feedback_vote_icon_status(request, item): if "vote_id_list" in request.session: if str(item.id) in request.session["vote_id_list"]: return "icon_disabled" return "icon_enabled"
flexible
{ "blob_id": "43792a647243b9d667d6d98b62a086d742e8e910", "index": 6093, "step-1": "<mask token>\n\n\n@register.filter\ndef td_humanize(diff):\n if diff.total_seconds() < 0:\n return 'Meni jo!'\n days = diff.days\n if days >= 7:\n weeks, days = divmod(days, 7)\n result = str(weeks) + ' vk'\n if days:\n result += ' ' + str(days) + ' pv'\n return result\n elif days:\n hours, remainder = divmod(diff.seconds, 3600)\n result = str(days) + ' pv'\n if hours:\n result += ' ' + str(hours) + ' h'\n return result\n else:\n hours, remainder = divmod(diff.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n if minutes >= 30:\n hours += 1\n result = str(hours) + ' h'\n return result\n\n\n@register.filter\ndef time_from_now(datetime):\n now = timezone.now()\n if datetime != 'Ei tiedossa':\n return td_humanize(datetime - now)\n else:\n return 'Ei tiedossa'\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@register.filter\ndef td_humanize(diff):\n if diff.total_seconds() < 0:\n return 'Meni jo!'\n days = diff.days\n if days >= 7:\n weeks, days = divmod(days, 7)\n result = str(weeks) + ' vk'\n if days:\n result += ' ' + str(days) + ' pv'\n return result\n elif days:\n hours, remainder = divmod(diff.seconds, 3600)\n result = str(days) + ' pv'\n if hours:\n result += ' ' + str(hours) + ' h'\n return result\n else:\n hours, remainder = divmod(diff.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n if minutes >= 30:\n hours += 1\n result = str(hours) + ' h'\n return result\n\n\n@register.filter\ndef time_from_now(datetime):\n now = timezone.now()\n if datetime != 'Ei tiedossa':\n return td_humanize(datetime - now)\n else:\n return 'Ei tiedossa'\n\n\n@register.filter\ndef parse_service_code(service_code):\n if Service.objects.filter(service_code=service_code).exists():\n return service_code\n else:\n return '180'\n\n\n@register.filter\ndef get_service_name(service_code):\n try:\n service = Service.objects.get(service_code=service_code)\n except ObjectDoesNotExist:\n return 'Muu'\n return service.service_name\n\n\n<mask token>\n\n\n@register.filter\ndef real_status(feedback):\n if is_open(feedback):\n return 'Avoin'\n else:\n return 'Suljettu'\n\n\n@register.filter\ndef get_expected_datetime(feedback):\n if feedback.expected_datetime:\n return feedback.expected_datetime\n else:\n time = calc_fixing_time(feedback.service_code)\n if time > 0:\n median = timedelta(milliseconds=time)\n return feedback.requested_datetime + median\n else:\n return 'Ei tiedossa'\n\n\n<mask token>\n\n\n@register.simple_tag\ndef feedback_vote_icon_status(request, item):\n if 'vote_id_list' in request.session:\n if str(item.id) in request.session['vote_id_list']:\n return 'icon_disabled'\n return 'icon_enabled'\n", "step-3": "<mask token>\n\n\n@register.filter\ndef td_humanize(diff):\n if diff.total_seconds() < 0:\n return 'Meni jo!'\n days = diff.days\n if days >= 7:\n weeks, days = divmod(days, 7)\n result = str(weeks) + ' vk'\n if days:\n result += ' ' + str(days) + ' pv'\n return result\n elif days:\n hours, remainder = divmod(diff.seconds, 3600)\n result = str(days) + ' pv'\n if hours:\n result += ' ' + str(hours) + ' h'\n return result\n else:\n hours, remainder = divmod(diff.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n if minutes >= 30:\n hours += 1\n result = str(hours) + ' h'\n return result\n\n\n@register.filter\ndef time_from_now(datetime):\n now = timezone.now()\n if datetime != 'Ei tiedossa':\n return td_humanize(datetime - now)\n else:\n return 'Ei tiedossa'\n\n\n@register.filter\ndef parse_service_code(service_code):\n if Service.objects.filter(service_code=service_code).exists():\n return service_code\n else:\n return '180'\n\n\n@register.filter\ndef get_service_name(service_code):\n try:\n service = Service.objects.get(service_code=service_code)\n except ObjectDoesNotExist:\n return 'Muu'\n return service.service_name\n\n\n<mask token>\n\n\n@register.filter\ndef real_status(feedback):\n if is_open(feedback):\n return 'Avoin'\n else:\n return 'Suljettu'\n\n\n@register.filter\ndef get_expected_datetime(feedback):\n if feedback.expected_datetime:\n return feedback.expected_datetime\n else:\n time = calc_fixing_time(feedback.service_code)\n if time > 0:\n median = timedelta(milliseconds=time)\n return feedback.requested_datetime + median\n else:\n return 'Ei tiedossa'\n\n\n@register.simple_tag\ndef navbar_link_class(request, urls):\n if request.path in (reverse(url) for url in urls.split()):\n return 'active'\n return ''\n\n\n@register.simple_tag\ndef feedback_vote_icon_status(request, item):\n if 'vote_id_list' in request.session:\n if str(item.id) in request.session['vote_id_list']:\n return 'icon_disabled'\n return 'icon_enabled'\n", "step-4": "<mask token>\n\n\n@register.filter\ndef td_humanize(diff):\n if diff.total_seconds() < 0:\n return 'Meni jo!'\n days = diff.days\n if days >= 7:\n weeks, days = divmod(days, 7)\n result = str(weeks) + ' vk'\n if days:\n result += ' ' + str(days) + ' pv'\n return result\n elif days:\n hours, remainder = divmod(diff.seconds, 3600)\n result = str(days) + ' pv'\n if hours:\n result += ' ' + str(hours) + ' h'\n return result\n else:\n hours, remainder = divmod(diff.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n if minutes >= 30:\n hours += 1\n result = str(hours) + ' h'\n return result\n\n\n@register.filter\ndef time_from_now(datetime):\n now = timezone.now()\n if datetime != 'Ei tiedossa':\n return td_humanize(datetime - now)\n else:\n return 'Ei tiedossa'\n\n\n@register.filter\ndef parse_service_code(service_code):\n if Service.objects.filter(service_code=service_code).exists():\n return service_code\n else:\n return '180'\n\n\n@register.filter\ndef get_service_name(service_code):\n try:\n service = Service.objects.get(service_code=service_code)\n except ObjectDoesNotExist:\n return 'Muu'\n return service.service_name\n\n\n@register.filter\ndef is_open(feedback):\n if settings.ALLOW_HELSINKI_SPECIFIC_FEATURES:\n open_strings = ['PUBLIC_WORKS_NEW',\n 'PUBLIC_WORKS_COMPLETED_SCHEDULED_LATER']\n if feedback.status in ['open', 'moderation']:\n return True\n else:\n for string in open_strings:\n if string in feedback.detailed_status:\n return True\n return False\n else:\n return feedback.status in ['open', 'moderation']\n\n\n@register.filter\ndef real_status(feedback):\n if is_open(feedback):\n return 'Avoin'\n else:\n return 'Suljettu'\n\n\n@register.filter\ndef get_expected_datetime(feedback):\n if feedback.expected_datetime:\n return feedback.expected_datetime\n else:\n time = calc_fixing_time(feedback.service_code)\n if time > 0:\n median = timedelta(milliseconds=time)\n return feedback.requested_datetime + median\n else:\n return 'Ei tiedossa'\n\n\n@register.simple_tag\ndef navbar_link_class(request, urls):\n if request.path in (reverse(url) for url in urls.split()):\n return 'active'\n return ''\n\n\n@register.simple_tag\ndef feedback_vote_icon_status(request, item):\n if 'vote_id_list' in request.session:\n if str(item.id) in request.session['vote_id_list']:\n return 'icon_disabled'\n return 'icon_enabled'\n", "step-5": "from datetime import timedelta\n\nfrom django import template\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.utils import timezone\n\nfrom api.analysis import *\nfrom api.models import Service\n\nregister = template.Library()\n\n\n# Takes a timdelta object and returns a string indicating how many\n# weeks, days, hours it is. Does not round, only truncates!\n@register.filter\ndef td_humanize(diff):\n if diff.total_seconds() < 0:\n return \"Meni jo!\"\n days = diff.days\n if days >= 7:\n weeks, days = divmod(days, 7)\n result = str(weeks) + \" vk\"\n if days:\n result += \" \" + str(days) + \" pv\"\n return result\n elif days:\n hours, remainder = divmod(diff.seconds, 3600)\n result = str(days) + \" pv\"\n if hours:\n result += \" \" + str(hours) + \" h\"\n return result\n else:\n hours, remainder = divmod(diff.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n if minutes >= 30:\n hours += 1\n result = str(hours) + \" h\"\n return result\n\n\n# Takes a datetime object and returns the difference between now and then\n@register.filter\ndef time_from_now(datetime):\n now = timezone.now()\n if datetime != \"Ei tiedossa\":\n return td_humanize(datetime - now)\n else:\n return \"Ei tiedossa\"\n\n\n# Check if the given service code is among supported service codes. If it is, return the same code.\n# If not, return code \"180\".\n@register.filter\ndef parse_service_code(service_code):\n if Service.objects.filter(service_code=service_code).exists():\n return service_code\n else:\n return \"180\"\n\n\n# Returns the service name based on given service code. This is done because somtimes\n# service_name is in the wrong language\n@register.filter\ndef get_service_name(service_code):\n try:\n service = Service.objects.get(service_code=service_code)\n except ObjectDoesNotExist:\n return \"Muu\"\n return service.service_name\n\n\n# Check if the feedback really is open or not. Return true if:\n# \t- status == open/moderation\n#\t- detailed_status contains specified substrings\n# If ALLOW_HELSINKI_SPECIFIC_FEATURES == False just return basic status\n@register.filter\ndef is_open(feedback):\n if settings.ALLOW_HELSINKI_SPECIFIC_FEATURES:\n open_strings = [\"PUBLIC_WORKS_NEW\", \"PUBLIC_WORKS_COMPLETED_SCHEDULED_LATER\"]\n if feedback.status in [\"open\", \"moderation\"]:\n return True\n else:\n for string in open_strings:\n if string in feedback.detailed_status:\n return True\n return False\n else:\n return (feedback.status in [\"open\", \"moderation\"])\n\n\n# Returns the real status string of the feedback\n@register.filter\ndef real_status(feedback):\n if is_open(feedback):\n return \"Avoin\"\n else:\n return \"Suljettu\"\n\n\n# If the expected_datetime is empty, return median estimation\n@register.filter\ndef get_expected_datetime(feedback):\n if feedback.expected_datetime:\n return feedback.expected_datetime\n else:\n time = calc_fixing_time(feedback.service_code)\n if time > 0:\n median = timedelta(milliseconds=time)\n return (feedback.requested_datetime + median)\n else:\n return \"Ei tiedossa\"\n\n\n# Highlights the active navbar link\n@register.simple_tag\ndef navbar_link_class(request, urls):\n if request.path in (reverse(url) for url in urls.split()):\n return \"active\"\n return \"\"\n\n\n# Checks if the user has already voted this feedback and returns a proper class. Uses session data.\n@register.simple_tag\ndef feedback_vote_icon_status(request, item):\n if \"vote_id_list\" in request.session:\n if str(item.id) in request.session[\"vote_id_list\"]:\n return \"icon_disabled\"\n return \"icon_enabled\"\n", "step-ids": [ 2, 7, 8, 9, 12 ] }
[ 2, 7, 8, 9, 12 ]
# defining private variables class Privacy: def __init__(self, val): self.__val = 900; print("Private data member =",self.__val,"\n") value = Privacy(800); print("Value not changable\n") value.__val;
normal
{ "blob_id": "b767519229058b50183d78bb97121f050e5b6bad", "index": 423, "step-1": "class Privacy:\n <mask token>\n\n\n<mask token>\n", "step-2": "class Privacy:\n\n def __init__(self, val):\n self.__val = 900\n print('Private data member =', self.__val, '\\n')\n\n\n<mask token>\n", "step-3": "class Privacy:\n\n def __init__(self, val):\n self.__val = 900\n print('Private data member =', self.__val, '\\n')\n\n\n<mask token>\nprint('Value not changable\\n')\nvalue.__val\n", "step-4": "class Privacy:\n\n def __init__(self, val):\n self.__val = 900\n print('Private data member =', self.__val, '\\n')\n\n\nvalue = Privacy(800)\nprint('Value not changable\\n')\nvalue.__val\n", "step-5": "# defining private variables\r\nclass Privacy:\r\n def __init__(self, val):\r\n self.__val = 900; \r\n print(\"Private data member =\",self.__val,\"\\n\")\r\nvalue = Privacy(800);\r\nprint(\"Value not changable\\n\")\r\nvalue.__val;\r\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
def ispalindrome(s): if len(s) <= 1: return True elif s[0] != s[-1]: return False else: return ispalindrome(s[1:-1])
normal
{ "blob_id": "c20a414f7f96a96f6e458fc27e5d2c7ac7ab05cf", "index": 8574, "step-1": "<mask token>\n", "step-2": "def ispalindrome(s):\n if len(s) <= 1:\n return True\n elif s[0] != s[-1]:\n return False\n else:\n return ispalindrome(s[1:-1])\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
<|reserved_special_token_0|> def patternToNumber(pattern): if len(pattern) == 0: return 0 return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:]) def symbolToNumber(symbol): if symbol == 'A': return 0 if symbol == 'C': return 1 if symbol == 'G': return 2 if symbol == 'T': return 3 def numberToPattern(index, k): if k == 1: return numberToSymbol(index) return numberToPattern(index // 4, k - 1) + numberToSymbol(index % 4) <|reserved_special_token_0|> def profileProbable(text, k, profile): maxprob = 0 kmer = text[0:k] for i in range(0, len(text) - k + 1): prob = 1 pattern = text[i:i + k] for j in range(k): l = symbolToNumber(pattern[j]) prob *= profile[l][j] if prob > maxprob: maxprob = prob kmer = pattern return kmer def hammingDistance(p, q): ham = 0 for index, y in zip(p, q): if index != y: ham += 1 return ham def distanceBetweenPatternAndString(pattern, DNA): k = len(pattern) distance = 0 for index in DNA: hamming = k + 1 for i in range(len(index) - k + 1): z = hammingDistance(pattern, index[i:i + k]) if hamming > z: hamming = z distance += hamming return distance def profileForm(motifs): k = len(motifs[0]) profile = [[(1) for i in range(k)] for j in range(4)] for index in motifs: for i in range(len(index)): j = symbolToNumber(index[i]) profile[j][i] += 1 for index in profile: for i in range(len(index)): index[i] = index[i] / len(motifs) return profile <|reserved_special_token_0|> def score(motifs): profile = profileForm(motifs) cons = consensus(profile) score = 0 for index in motifs: for i in range(len(index)): if cons[i] != index[i]: score += 1 return score <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def patternToNumber(pattern): if len(pattern) == 0: return 0 return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:]) def symbolToNumber(symbol): if symbol == 'A': return 0 if symbol == 'C': return 1 if symbol == 'G': return 2 if symbol == 'T': return 3 def numberToPattern(index, k): if k == 1: return numberToSymbol(index) return numberToPattern(index // 4, k - 1) + numberToSymbol(index % 4) def numberToSymbol(index): if index == 0: return 'A' if index == 1: return 'C' if index == 2: return 'G' if index == 3: return 'T' def profileProbable(text, k, profile): maxprob = 0 kmer = text[0:k] for i in range(0, len(text) - k + 1): prob = 1 pattern = text[i:i + k] for j in range(k): l = symbolToNumber(pattern[j]) prob *= profile[l][j] if prob > maxprob: maxprob = prob kmer = pattern return kmer def hammingDistance(p, q): ham = 0 for index, y in zip(p, q): if index != y: ham += 1 return ham def distanceBetweenPatternAndString(pattern, DNA): k = len(pattern) distance = 0 for index in DNA: hamming = k + 1 for i in range(len(index) - k + 1): z = hammingDistance(pattern, index[i:i + k]) if hamming > z: hamming = z distance += hamming return distance def profileForm(motifs): k = len(motifs[0]) profile = [[(1) for i in range(k)] for j in range(4)] for index in motifs: for i in range(len(index)): j = symbolToNumber(index[i]) profile[j][i] += 1 for index in profile: for i in range(len(index)): index[i] = index[i] / len(motifs) return profile <|reserved_special_token_0|> def score(motifs): profile = profileForm(motifs) cons = consensus(profile) score = 0 for index in motifs: for i in range(len(index)): if cons[i] != index[i]: score += 1 return score <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def patternToNumber(pattern): if len(pattern) == 0: return 0 return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:]) def symbolToNumber(symbol): if symbol == 'A': return 0 if symbol == 'C': return 1 if symbol == 'G': return 2 if symbol == 'T': return 3 def numberToPattern(index, k): if k == 1: return numberToSymbol(index) return numberToPattern(index // 4, k - 1) + numberToSymbol(index % 4) def numberToSymbol(index): if index == 0: return 'A' if index == 1: return 'C' if index == 2: return 'G' if index == 3: return 'T' def profileProbable(text, k, profile): maxprob = 0 kmer = text[0:k] for i in range(0, len(text) - k + 1): prob = 1 pattern = text[i:i + k] for j in range(k): l = symbolToNumber(pattern[j]) prob *= profile[l][j] if prob > maxprob: maxprob = prob kmer = pattern return kmer def hammingDistance(p, q): ham = 0 for index, y in zip(p, q): if index != y: ham += 1 return ham def distanceBetweenPatternAndString(pattern, DNA): k = len(pattern) distance = 0 for index in DNA: hamming = k + 1 for i in range(len(index) - k + 1): z = hammingDistance(pattern, index[i:i + k]) if hamming > z: hamming = z distance += hamming return distance def profileForm(motifs): k = len(motifs[0]) profile = [[(1) for i in range(k)] for j in range(4)] for index in motifs: for i in range(len(index)): j = symbolToNumber(index[i]) profile[j][i] += 1 for index in profile: for i in range(len(index)): index[i] = index[i] / len(motifs) return profile def consensus(profile): str = '' for i in range(len(profile[0])): max = 0 loc = 0 for j in range(4): if profile[j][i] > max: loc = j max = profile[j][i] str += numberToSymbol(loc) return str def score(motifs): profile = profileForm(motifs) cons = consensus(profile) score = 0 for index in motifs: for i in range(len(index)): if cons[i] != index[i]: score += 1 return score def randomMotifSearch(DNA, k, t): bestMotifs = [] motifs = [] for index in range(t): random.seed() i = random.randint(0, len(DNA[index]) - k) motifs.append(DNA[index][i:i + k]) bestMotifs = motifs.copy() count = 0 while True: profile = profileForm(motifs) for index in range(t): motifs[index] = profileProbable(DNA[index], k, profile) if score(motifs) < score(bestMotifs): bestMotifs = motifs.copy() count += 1 else: print(count) return bestMotifs <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def patternToNumber(pattern): if len(pattern) == 0: return 0 return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:]) def symbolToNumber(symbol): if symbol == 'A': return 0 if symbol == 'C': return 1 if symbol == 'G': return 2 if symbol == 'T': return 3 def numberToPattern(index, k): if k == 1: return numberToSymbol(index) return numberToPattern(index // 4, k - 1) + numberToSymbol(index % 4) def numberToSymbol(index): if index == 0: return 'A' if index == 1: return 'C' if index == 2: return 'G' if index == 3: return 'T' def profileProbable(text, k, profile): maxprob = 0 kmer = text[0:k] for i in range(0, len(text) - k + 1): prob = 1 pattern = text[i:i + k] for j in range(k): l = symbolToNumber(pattern[j]) prob *= profile[l][j] if prob > maxprob: maxprob = prob kmer = pattern return kmer def hammingDistance(p, q): ham = 0 for index, y in zip(p, q): if index != y: ham += 1 return ham def distanceBetweenPatternAndString(pattern, DNA): k = len(pattern) distance = 0 for index in DNA: hamming = k + 1 for i in range(len(index) - k + 1): z = hammingDistance(pattern, index[i:i + k]) if hamming > z: hamming = z distance += hamming return distance def profileForm(motifs): k = len(motifs[0]) profile = [[(1) for i in range(k)] for j in range(4)] for index in motifs: for i in range(len(index)): j = symbolToNumber(index[i]) profile[j][i] += 1 for index in profile: for i in range(len(index)): index[i] = index[i] / len(motifs) return profile def consensus(profile): str = '' for i in range(len(profile[0])): max = 0 loc = 0 for j in range(4): if profile[j][i] > max: loc = j max = profile[j][i] str += numberToSymbol(loc) return str def score(motifs): profile = profileForm(motifs) cons = consensus(profile) score = 0 for index in motifs: for i in range(len(index)): if cons[i] != index[i]: score += 1 return score def randomMotifSearch(DNA, k, t): bestMotifs = [] motifs = [] for index in range(t): random.seed() i = random.randint(0, len(DNA[index]) - k) motifs.append(DNA[index][i:i + k]) bestMotifs = motifs.copy() count = 0 while True: profile = profileForm(motifs) for index in range(t): motifs[index] = profileProbable(DNA[index], k, profile) if score(motifs) < score(bestMotifs): bestMotifs = motifs.copy() count += 1 else: print(count) return bestMotifs k = 15 t = 20 DNA = [ 'ACTTATATCTAGAGTAAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCGAGTGATTGAACTGACTTATATCTAGAGT' , 'AAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCCTCTCGATCACCGACGAGTGATTGAACTGACTTATATCTAGAGT' , 'CACTCCCGTCCGTCTGACGCCAGGTGCTCTACCCCGCTGATTGTCTGGTACATAGCAGCCTATAGATCACCGATGCAGAAACACTTCGAGGCAGCCGATTTCGCTTATCACAACGTGACGGAATTTGATAAACCACGTACTCTAATACCGTCACGGGCCCATCAACGAA' , 'ACAAGAACTGGTGGGGAGACTATGACACTCTAGCGGTCGCATAAGGGCCGGAAACCAGGACAAATCGATAAGATGAAGCGGGGATATAAGCCTTATACTGCGACTGGTTCCTTATATTATTTAGCCCCGATTGATCACCGATTAAAATATTCTGCGGTTTTCGAGACGG' , 'TAACCACACCTAAAATTTTTCTTGGTGAGATGGACCCCCGCCGTAAATATCAGGATTAAATGTACGGATACCCATGACCCTCCAGTCATCTACCTTCCCGTGGTGGTCGCTCAGCCTTGTGCAGACCGAACTAGCACCTGTCACATACAATGTTGCCCGCATAGATCGT' , 'ATCCGACAGAGGCAGTGAATAAGGTTTCGTTTCCTCAGAGAGTAGAACTGCGTGTGACCTTGCCTTCACCGACATCCGTTTCCAATTGAGCTTTTCAGGACGTTTAGGTAACTGATTGTCATTGCAATTGTCCGGGGGATTTAGATGGCCGGGTACCTCTCGGACTATA' , 'CCTTGTTGCCACCGATTCGCGAGCAACATCGGAGTGCTCTGATTCACGGCGATGCTCCACGAAGAGGACCGCGGCACGACACGCCCTGTACCTACGTTTCTGGATATCCTCCGGCGAGTTAATAGAGCAATACGACCTGGTCGTCGAGATCGTGTATCTAGCCCTACCT' , 'ATAGGTTAACGAATCAGGAGAGTTAATTTTACCTAGCTAGAGCGGACGGTGCCTGGCTGTATTCGCGTTTGACTTTCGGGCTCGCTGATAACTTGTGATCACCTTTTACGCTTACTGGATCCAACGATGGATCAAAGTTGAGAATTTCTGTGCCTTGGGTGTGAGCTGT' , 'CTGACGAAAGGACGGGCGGTGTACTTAGTTTGGGGTAAAATAGTTGGTATAATTCTGTGCGACAGACATTTGGTCAGGCCATACTGCCATATCGTGATGTAACTATCCACACTACGTCATAGGCCCTTGTGATCAATTAAACGTTCCTCATGCCAGGCTATCTGTTTAA' , 'GGCTTCGCGTTTAAGGCTGGATTAAGTACTCCGCCTTGTGATCTGTGATCCTCCGACCTGTGATCAGCAAGATTGGAACCTAGGTAGGCGGCGGGTCTACGCTGGCCCACAATCGTGAGTCCCCCACTCCGTAGGTTGTGGAATTTATAGACCCGCAAGGGGCACCACT' , 'AGGATGACACCCAGGATGAATCTGGATTAGGAACACCAACCCGACATATTTGTTACCGCTGCAGCATTTCGCTCTTGGACGCGTAACCCGAGATCCGTCTCGCGATCGTCACGGATCGGGATTATGCAGGCAATACCTTGTGATCACTCCGCGCTTGGTTTTGCTAGCG' , 'ACATCTCTAGTCACTTTTATTGAGCAGGTGGGCGGATTCATGATCCGGCTCTGTCGTACGTCCAACCACGGTGACATGTTCGGAGCTGTCGCCGTGGAGCAGAGATACATCGGATCTATCAATTTTACTAAGAGCAACTAGCCACGACAAACTGTGATCACCGATTGGA' , 'AATTTGCGTATCTCTAGGACTCCCTCATACAAATCAAAGCTTGGATGGGTAAGATGCCGCAGCAGCAGGTATCTCATATTGGCTATTAAGAGCCAGGCCCTATGGCCTTAGTATCACCGATCAGACGTCGCATGAGCGGGCCCGTTGTCCTATCTCTTTAGCTGCCGCA' , 'GAAGTAAAGGGGTTCCACTGCGTAGAGCGTGCCCCTCTGGTGTGCCGTACTGTTATGGTGATACAGCTTCCTTATACCCCTCGTAAAGCGGCTAATGGTCCTAATGAATGCCCTTGTGAAATCCGAATCGCTTTACAATTGCGTTCGGCGGAATGCAGTCACCAGTGTT' , 'TACACTACGCGTTATTTACTTTTACTGAGTCCTTGTCGCCACCGAACGAGGATTGTTCATTGTATCCGGAGATTAGGAGTTCGCATCGCTGACACAGCCAGTTCGTAGCAAATACCGCTGGCCCTGGGCACTCCAGATCAGAACTACTAGCCCTAAACTCTATGACACA' , 'TTGGGTCTCGATCCCTCTATGTTAAGCTGTTCCGTGGAGAATCTCCTGGGTTTTATGATTTGAATGACGAGAATTGGGAAGTCGGGATGTTGTGATCACCGCCGTTCGCTTTCATAAATGAACCCCTTTTTTTCAGCAGACGGTGGCCTTTCCCTTTCATCATTATACA' , 'TTTCAAGTTACTACCGCCCTCTAGCGATAGAACTGAGGCAAATCATACACCGTGATCACCGACCCATGGAGTTTGACTCAGATTTACACTTTTAGGGGAACATGTTTGTCGGTCAGAGGTGTCAATTATTAGCAGATATCCCCCAACGCAGCGAGAGAGCACGGAGTGA' , 'GATCCATTACCCTACGATATGTATATAGCGCCCTAGTACGGCTTCTCCCTTGCAGACACGCAGGCGCTGTGCGCTATCGGCTTCCTCGGACATTCCTGGATATAAGTAACGGCGAACTGGCTATCACTACCGCCGCTCCTTAAGCCTTGGTTTCACCGACGATTGTCGT' , 'TAGTAGATTATTACCTGTGGACCGTTAGCTTCAAGACCGAAACGTTGGTGATGCTACTTAAATGTCAAGAGTTGCGAAGTTGGGCGAAGCACATCCGTACTCCCAAGTGGACGATCGATAGATCCATGGAGTTTCCATCCATCTTAATCCGCCCTTTGCATCACCGACG' , 'TACAAGGCACAAACGAGACCTGATCGAACGGTGCACGGTCGAGGCAGCGAGATAAATGTACATTGAGAGCACCTTGTGATTTACGACCTGCATCGAAGGTTTCTTGGCACCCACCTGTCGTCCGCCAGGGCAGAGCCGACATTATATGACGCTGATGTACGAAGCCCCT' ] best = randomMotifSearch(DNA, k, t) min = score(best) for index in range(1000): print(index) a = randomMotifSearch(DNA, k, t) if score(a) < score(best): best = a min = score(a) print(min) for index in best: print(index) <|reserved_special_token_1|> import random def patternToNumber(pattern): if len(pattern) == 0: return 0 return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:]) def symbolToNumber(symbol): if symbol == "A": return 0 if symbol == "C": return 1 if symbol == "G": return 2 if symbol == "T": return 3 def numberToPattern(index, k): if k == 1: return numberToSymbol(index) return numberToPattern(index // 4, k-1) + numberToSymbol(index % 4) def numberToSymbol(index): if index == 0: return "A" if index == 1: return "C" if index == 2: return "G" if index == 3: return "T" def profileProbable(text, k, profile): maxprob = 0 kmer = text[0:k] for i in range(0, len(text) - k +1): prob =1 pattern =text[i:i+k] for j in range(k): l = symbolToNumber(pattern[j]) prob *= profile [l][j] if prob > maxprob: maxprob =prob kmer = pattern return kmer def hammingDistance(p, q): ham = 0 for index, y in zip(p, q): if index != y: ham +=1 return ham def distanceBetweenPatternAndString(pattern, DNA): k = len(pattern) distance = 0 for index in DNA: hamming = k+1 for i in range(len(index) - k + 1): z = hammingDistance(pattern, index[i:i+k]) if hamming > z: hamming = z distance += hamming return distance def profileForm(motifs): k= len(motifs[0]) profile = [[1 for i in range(k)] for j in range(4)] for index in motifs: for i in range(len(index)): j = symbolToNumber(index[i]) profile[j][i] +=1 for index in profile: for i in range(len(index)): index[i] = index[i]/len(motifs) return profile def consensus(profile): str = "" for i in range(len(profile[0])): max = 0 loc = 0 for j in range(4): if profile[j][i] > max: loc = j max = profile[j][i] str+=numberToSymbol(loc) return str def score(motifs): profile = profileForm(motifs) cons = consensus(profile) score = 0 for index in motifs: for i in range(len(index)): if cons[i] != index[i]: score +=1 return score def randomMotifSearch(DNA, k, t): bestMotifs = [] motifs = [] for index in range(t): random.seed() i= random.randint(0, len(DNA[index])-k) motifs.append(DNA[index][i:i+k]) bestMotifs = motifs.copy() count = 0 while True: profile = profileForm(motifs) for index in range(t): motifs[index] = profileProbable(DNA[index], k, profile) if score(motifs) < score(bestMotifs): bestMotifs = motifs.copy() count +=1 else: print(count) return bestMotifs k = 15 t = 20 DNA = ["ACTTATATCTAGAGTAAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCGAGTGATTGAACTGACTTATATCTAGAGT", "AAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCCTCTCGATCACCGACGAGTGATTGAACTGACTTATATCTAGAGT", "CACTCCCGTCCGTCTGACGCCAGGTGCTCTACCCCGCTGATTGTCTGGTACATAGCAGCCTATAGATCACCGATGCAGAAACACTTCGAGGCAGCCGATTTCGCTTATCACAACGTGACGGAATTTGATAAACCACGTACTCTAATACCGTCACGGGCCCATCAACGAA", "ACAAGAACTGGTGGGGAGACTATGACACTCTAGCGGTCGCATAAGGGCCGGAAACCAGGACAAATCGATAAGATGAAGCGGGGATATAAGCCTTATACTGCGACTGGTTCCTTATATTATTTAGCCCCGATTGATCACCGATTAAAATATTCTGCGGTTTTCGAGACGG", "TAACCACACCTAAAATTTTTCTTGGTGAGATGGACCCCCGCCGTAAATATCAGGATTAAATGTACGGATACCCATGACCCTCCAGTCATCTACCTTCCCGTGGTGGTCGCTCAGCCTTGTGCAGACCGAACTAGCACCTGTCACATACAATGTTGCCCGCATAGATCGT", "ATCCGACAGAGGCAGTGAATAAGGTTTCGTTTCCTCAGAGAGTAGAACTGCGTGTGACCTTGCCTTCACCGACATCCGTTTCCAATTGAGCTTTTCAGGACGTTTAGGTAACTGATTGTCATTGCAATTGTCCGGGGGATTTAGATGGCCGGGTACCTCTCGGACTATA", "CCTTGTTGCCACCGATTCGCGAGCAACATCGGAGTGCTCTGATTCACGGCGATGCTCCACGAAGAGGACCGCGGCACGACACGCCCTGTACCTACGTTTCTGGATATCCTCCGGCGAGTTAATAGAGCAATACGACCTGGTCGTCGAGATCGTGTATCTAGCCCTACCT", "ATAGGTTAACGAATCAGGAGAGTTAATTTTACCTAGCTAGAGCGGACGGTGCCTGGCTGTATTCGCGTTTGACTTTCGGGCTCGCTGATAACTTGTGATCACCTTTTACGCTTACTGGATCCAACGATGGATCAAAGTTGAGAATTTCTGTGCCTTGGGTGTGAGCTGT", "CTGACGAAAGGACGGGCGGTGTACTTAGTTTGGGGTAAAATAGTTGGTATAATTCTGTGCGACAGACATTTGGTCAGGCCATACTGCCATATCGTGATGTAACTATCCACACTACGTCATAGGCCCTTGTGATCAATTAAACGTTCCTCATGCCAGGCTATCTGTTTAA", "GGCTTCGCGTTTAAGGCTGGATTAAGTACTCCGCCTTGTGATCTGTGATCCTCCGACCTGTGATCAGCAAGATTGGAACCTAGGTAGGCGGCGGGTCTACGCTGGCCCACAATCGTGAGTCCCCCACTCCGTAGGTTGTGGAATTTATAGACCCGCAAGGGGCACCACT", "AGGATGACACCCAGGATGAATCTGGATTAGGAACACCAACCCGACATATTTGTTACCGCTGCAGCATTTCGCTCTTGGACGCGTAACCCGAGATCCGTCTCGCGATCGTCACGGATCGGGATTATGCAGGCAATACCTTGTGATCACTCCGCGCTTGGTTTTGCTAGCG", "ACATCTCTAGTCACTTTTATTGAGCAGGTGGGCGGATTCATGATCCGGCTCTGTCGTACGTCCAACCACGGTGACATGTTCGGAGCTGTCGCCGTGGAGCAGAGATACATCGGATCTATCAATTTTACTAAGAGCAACTAGCCACGACAAACTGTGATCACCGATTGGA", "AATTTGCGTATCTCTAGGACTCCCTCATACAAATCAAAGCTTGGATGGGTAAGATGCCGCAGCAGCAGGTATCTCATATTGGCTATTAAGAGCCAGGCCCTATGGCCTTAGTATCACCGATCAGACGTCGCATGAGCGGGCCCGTTGTCCTATCTCTTTAGCTGCCGCA", "GAAGTAAAGGGGTTCCACTGCGTAGAGCGTGCCCCTCTGGTGTGCCGTACTGTTATGGTGATACAGCTTCCTTATACCCCTCGTAAAGCGGCTAATGGTCCTAATGAATGCCCTTGTGAAATCCGAATCGCTTTACAATTGCGTTCGGCGGAATGCAGTCACCAGTGTT", "TACACTACGCGTTATTTACTTTTACTGAGTCCTTGTCGCCACCGAACGAGGATTGTTCATTGTATCCGGAGATTAGGAGTTCGCATCGCTGACACAGCCAGTTCGTAGCAAATACCGCTGGCCCTGGGCACTCCAGATCAGAACTACTAGCCCTAAACTCTATGACACA", "TTGGGTCTCGATCCCTCTATGTTAAGCTGTTCCGTGGAGAATCTCCTGGGTTTTATGATTTGAATGACGAGAATTGGGAAGTCGGGATGTTGTGATCACCGCCGTTCGCTTTCATAAATGAACCCCTTTTTTTCAGCAGACGGTGGCCTTTCCCTTTCATCATTATACA", "TTTCAAGTTACTACCGCCCTCTAGCGATAGAACTGAGGCAAATCATACACCGTGATCACCGACCCATGGAGTTTGACTCAGATTTACACTTTTAGGGGAACATGTTTGTCGGTCAGAGGTGTCAATTATTAGCAGATATCCCCCAACGCAGCGAGAGAGCACGGAGTGA", "GATCCATTACCCTACGATATGTATATAGCGCCCTAGTACGGCTTCTCCCTTGCAGACACGCAGGCGCTGTGCGCTATCGGCTTCCTCGGACATTCCTGGATATAAGTAACGGCGAACTGGCTATCACTACCGCCGCTCCTTAAGCCTTGGTTTCACCGACGATTGTCGT", "TAGTAGATTATTACCTGTGGACCGTTAGCTTCAAGACCGAAACGTTGGTGATGCTACTTAAATGTCAAGAGTTGCGAAGTTGGGCGAAGCACATCCGTACTCCCAAGTGGACGATCGATAGATCCATGGAGTTTCCATCCATCTTAATCCGCCCTTTGCATCACCGACG", "TACAAGGCACAAACGAGACCTGATCGAACGGTGCACGGTCGAGGCAGCGAGATAAATGTACATTGAGAGCACCTTGTGATTTACGACCTGCATCGAAGGTTTCTTGGCACCCACCTGTCGTCCGCCAGGGCAGAGCCGACATTATATGACGCTGATGTACGAAGCCCCT"] best = randomMotifSearch(DNA, k, t) min = score(best) for index in range(1000): print(index) a = randomMotifSearch(DNA, k, t) if score(a) < score(best): best = a min = score(a) print(min) for index in best: print(index)
flexible
{ "blob_id": "51848a64102f7fe8272fcf56a9792ed50c430538", "index": 9115, "step-1": "<mask token>\n\n\ndef patternToNumber(pattern):\n if len(pattern) == 0:\n return 0\n return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:])\n\n\ndef symbolToNumber(symbol):\n if symbol == 'A':\n return 0\n if symbol == 'C':\n return 1\n if symbol == 'G':\n return 2\n if symbol == 'T':\n return 3\n\n\ndef numberToPattern(index, k):\n if k == 1:\n return numberToSymbol(index)\n return numberToPattern(index // 4, k - 1) + numberToSymbol(index % 4)\n\n\n<mask token>\n\n\ndef profileProbable(text, k, profile):\n maxprob = 0\n kmer = text[0:k]\n for i in range(0, len(text) - k + 1):\n prob = 1\n pattern = text[i:i + k]\n for j in range(k):\n l = symbolToNumber(pattern[j])\n prob *= profile[l][j]\n if prob > maxprob:\n maxprob = prob\n kmer = pattern\n return kmer\n\n\ndef hammingDistance(p, q):\n ham = 0\n for index, y in zip(p, q):\n if index != y:\n ham += 1\n return ham\n\n\ndef distanceBetweenPatternAndString(pattern, DNA):\n k = len(pattern)\n distance = 0\n for index in DNA:\n hamming = k + 1\n for i in range(len(index) - k + 1):\n z = hammingDistance(pattern, index[i:i + k])\n if hamming > z:\n hamming = z\n distance += hamming\n return distance\n\n\ndef profileForm(motifs):\n k = len(motifs[0])\n profile = [[(1) for i in range(k)] for j in range(4)]\n for index in motifs:\n for i in range(len(index)):\n j = symbolToNumber(index[i])\n profile[j][i] += 1\n for index in profile:\n for i in range(len(index)):\n index[i] = index[i] / len(motifs)\n return profile\n\n\n<mask token>\n\n\ndef score(motifs):\n profile = profileForm(motifs)\n cons = consensus(profile)\n score = 0\n for index in motifs:\n for i in range(len(index)):\n if cons[i] != index[i]:\n score += 1\n return score\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef patternToNumber(pattern):\n if len(pattern) == 0:\n return 0\n return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:])\n\n\ndef symbolToNumber(symbol):\n if symbol == 'A':\n return 0\n if symbol == 'C':\n return 1\n if symbol == 'G':\n return 2\n if symbol == 'T':\n return 3\n\n\ndef numberToPattern(index, k):\n if k == 1:\n return numberToSymbol(index)\n return numberToPattern(index // 4, k - 1) + numberToSymbol(index % 4)\n\n\ndef numberToSymbol(index):\n if index == 0:\n return 'A'\n if index == 1:\n return 'C'\n if index == 2:\n return 'G'\n if index == 3:\n return 'T'\n\n\ndef profileProbable(text, k, profile):\n maxprob = 0\n kmer = text[0:k]\n for i in range(0, len(text) - k + 1):\n prob = 1\n pattern = text[i:i + k]\n for j in range(k):\n l = symbolToNumber(pattern[j])\n prob *= profile[l][j]\n if prob > maxprob:\n maxprob = prob\n kmer = pattern\n return kmer\n\n\ndef hammingDistance(p, q):\n ham = 0\n for index, y in zip(p, q):\n if index != y:\n ham += 1\n return ham\n\n\ndef distanceBetweenPatternAndString(pattern, DNA):\n k = len(pattern)\n distance = 0\n for index in DNA:\n hamming = k + 1\n for i in range(len(index) - k + 1):\n z = hammingDistance(pattern, index[i:i + k])\n if hamming > z:\n hamming = z\n distance += hamming\n return distance\n\n\ndef profileForm(motifs):\n k = len(motifs[0])\n profile = [[(1) for i in range(k)] for j in range(4)]\n for index in motifs:\n for i in range(len(index)):\n j = symbolToNumber(index[i])\n profile[j][i] += 1\n for index in profile:\n for i in range(len(index)):\n index[i] = index[i] / len(motifs)\n return profile\n\n\n<mask token>\n\n\ndef score(motifs):\n profile = profileForm(motifs)\n cons = consensus(profile)\n score = 0\n for index in motifs:\n for i in range(len(index)):\n if cons[i] != index[i]:\n score += 1\n return score\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef patternToNumber(pattern):\n if len(pattern) == 0:\n return 0\n return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:])\n\n\ndef symbolToNumber(symbol):\n if symbol == 'A':\n return 0\n if symbol == 'C':\n return 1\n if symbol == 'G':\n return 2\n if symbol == 'T':\n return 3\n\n\ndef numberToPattern(index, k):\n if k == 1:\n return numberToSymbol(index)\n return numberToPattern(index // 4, k - 1) + numberToSymbol(index % 4)\n\n\ndef numberToSymbol(index):\n if index == 0:\n return 'A'\n if index == 1:\n return 'C'\n if index == 2:\n return 'G'\n if index == 3:\n return 'T'\n\n\ndef profileProbable(text, k, profile):\n maxprob = 0\n kmer = text[0:k]\n for i in range(0, len(text) - k + 1):\n prob = 1\n pattern = text[i:i + k]\n for j in range(k):\n l = symbolToNumber(pattern[j])\n prob *= profile[l][j]\n if prob > maxprob:\n maxprob = prob\n kmer = pattern\n return kmer\n\n\ndef hammingDistance(p, q):\n ham = 0\n for index, y in zip(p, q):\n if index != y:\n ham += 1\n return ham\n\n\ndef distanceBetweenPatternAndString(pattern, DNA):\n k = len(pattern)\n distance = 0\n for index in DNA:\n hamming = k + 1\n for i in range(len(index) - k + 1):\n z = hammingDistance(pattern, index[i:i + k])\n if hamming > z:\n hamming = z\n distance += hamming\n return distance\n\n\ndef profileForm(motifs):\n k = len(motifs[0])\n profile = [[(1) for i in range(k)] for j in range(4)]\n for index in motifs:\n for i in range(len(index)):\n j = symbolToNumber(index[i])\n profile[j][i] += 1\n for index in profile:\n for i in range(len(index)):\n index[i] = index[i] / len(motifs)\n return profile\n\n\ndef consensus(profile):\n str = ''\n for i in range(len(profile[0])):\n max = 0\n loc = 0\n for j in range(4):\n if profile[j][i] > max:\n loc = j\n max = profile[j][i]\n str += numberToSymbol(loc)\n return str\n\n\ndef score(motifs):\n profile = profileForm(motifs)\n cons = consensus(profile)\n score = 0\n for index in motifs:\n for i in range(len(index)):\n if cons[i] != index[i]:\n score += 1\n return score\n\n\ndef randomMotifSearch(DNA, k, t):\n bestMotifs = []\n motifs = []\n for index in range(t):\n random.seed()\n i = random.randint(0, len(DNA[index]) - k)\n motifs.append(DNA[index][i:i + k])\n bestMotifs = motifs.copy()\n count = 0\n while True:\n profile = profileForm(motifs)\n for index in range(t):\n motifs[index] = profileProbable(DNA[index], k, profile)\n if score(motifs) < score(bestMotifs):\n bestMotifs = motifs.copy()\n count += 1\n else:\n print(count)\n return bestMotifs\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef patternToNumber(pattern):\n if len(pattern) == 0:\n return 0\n return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:])\n\n\ndef symbolToNumber(symbol):\n if symbol == 'A':\n return 0\n if symbol == 'C':\n return 1\n if symbol == 'G':\n return 2\n if symbol == 'T':\n return 3\n\n\ndef numberToPattern(index, k):\n if k == 1:\n return numberToSymbol(index)\n return numberToPattern(index // 4, k - 1) + numberToSymbol(index % 4)\n\n\ndef numberToSymbol(index):\n if index == 0:\n return 'A'\n if index == 1:\n return 'C'\n if index == 2:\n return 'G'\n if index == 3:\n return 'T'\n\n\ndef profileProbable(text, k, profile):\n maxprob = 0\n kmer = text[0:k]\n for i in range(0, len(text) - k + 1):\n prob = 1\n pattern = text[i:i + k]\n for j in range(k):\n l = symbolToNumber(pattern[j])\n prob *= profile[l][j]\n if prob > maxprob:\n maxprob = prob\n kmer = pattern\n return kmer\n\n\ndef hammingDistance(p, q):\n ham = 0\n for index, y in zip(p, q):\n if index != y:\n ham += 1\n return ham\n\n\ndef distanceBetweenPatternAndString(pattern, DNA):\n k = len(pattern)\n distance = 0\n for index in DNA:\n hamming = k + 1\n for i in range(len(index) - k + 1):\n z = hammingDistance(pattern, index[i:i + k])\n if hamming > z:\n hamming = z\n distance += hamming\n return distance\n\n\ndef profileForm(motifs):\n k = len(motifs[0])\n profile = [[(1) for i in range(k)] for j in range(4)]\n for index in motifs:\n for i in range(len(index)):\n j = symbolToNumber(index[i])\n profile[j][i] += 1\n for index in profile:\n for i in range(len(index)):\n index[i] = index[i] / len(motifs)\n return profile\n\n\ndef consensus(profile):\n str = ''\n for i in range(len(profile[0])):\n max = 0\n loc = 0\n for j in range(4):\n if profile[j][i] > max:\n loc = j\n max = profile[j][i]\n str += numberToSymbol(loc)\n return str\n\n\ndef score(motifs):\n profile = profileForm(motifs)\n cons = consensus(profile)\n score = 0\n for index in motifs:\n for i in range(len(index)):\n if cons[i] != index[i]:\n score += 1\n return score\n\n\ndef randomMotifSearch(DNA, k, t):\n bestMotifs = []\n motifs = []\n for index in range(t):\n random.seed()\n i = random.randint(0, len(DNA[index]) - k)\n motifs.append(DNA[index][i:i + k])\n bestMotifs = motifs.copy()\n count = 0\n while True:\n profile = profileForm(motifs)\n for index in range(t):\n motifs[index] = profileProbable(DNA[index], k, profile)\n if score(motifs) < score(bestMotifs):\n bestMotifs = motifs.copy()\n count += 1\n else:\n print(count)\n return bestMotifs\n\n\nk = 15\nt = 20\nDNA = [\n 'ACTTATATCTAGAGTAAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCGAGTGATTGAACTGACTTATATCTAGAGT'\n ,\n 'AAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCCTCTCGATCACCGACGAGTGATTGAACTGACTTATATCTAGAGT'\n ,\n 'CACTCCCGTCCGTCTGACGCCAGGTGCTCTACCCCGCTGATTGTCTGGTACATAGCAGCCTATAGATCACCGATGCAGAAACACTTCGAGGCAGCCGATTTCGCTTATCACAACGTGACGGAATTTGATAAACCACGTACTCTAATACCGTCACGGGCCCATCAACGAA'\n ,\n 'ACAAGAACTGGTGGGGAGACTATGACACTCTAGCGGTCGCATAAGGGCCGGAAACCAGGACAAATCGATAAGATGAAGCGGGGATATAAGCCTTATACTGCGACTGGTTCCTTATATTATTTAGCCCCGATTGATCACCGATTAAAATATTCTGCGGTTTTCGAGACGG'\n ,\n 'TAACCACACCTAAAATTTTTCTTGGTGAGATGGACCCCCGCCGTAAATATCAGGATTAAATGTACGGATACCCATGACCCTCCAGTCATCTACCTTCCCGTGGTGGTCGCTCAGCCTTGTGCAGACCGAACTAGCACCTGTCACATACAATGTTGCCCGCATAGATCGT'\n ,\n 'ATCCGACAGAGGCAGTGAATAAGGTTTCGTTTCCTCAGAGAGTAGAACTGCGTGTGACCTTGCCTTCACCGACATCCGTTTCCAATTGAGCTTTTCAGGACGTTTAGGTAACTGATTGTCATTGCAATTGTCCGGGGGATTTAGATGGCCGGGTACCTCTCGGACTATA'\n ,\n 'CCTTGTTGCCACCGATTCGCGAGCAACATCGGAGTGCTCTGATTCACGGCGATGCTCCACGAAGAGGACCGCGGCACGACACGCCCTGTACCTACGTTTCTGGATATCCTCCGGCGAGTTAATAGAGCAATACGACCTGGTCGTCGAGATCGTGTATCTAGCCCTACCT'\n ,\n 'ATAGGTTAACGAATCAGGAGAGTTAATTTTACCTAGCTAGAGCGGACGGTGCCTGGCTGTATTCGCGTTTGACTTTCGGGCTCGCTGATAACTTGTGATCACCTTTTACGCTTACTGGATCCAACGATGGATCAAAGTTGAGAATTTCTGTGCCTTGGGTGTGAGCTGT'\n ,\n 'CTGACGAAAGGACGGGCGGTGTACTTAGTTTGGGGTAAAATAGTTGGTATAATTCTGTGCGACAGACATTTGGTCAGGCCATACTGCCATATCGTGATGTAACTATCCACACTACGTCATAGGCCCTTGTGATCAATTAAACGTTCCTCATGCCAGGCTATCTGTTTAA'\n ,\n 'GGCTTCGCGTTTAAGGCTGGATTAAGTACTCCGCCTTGTGATCTGTGATCCTCCGACCTGTGATCAGCAAGATTGGAACCTAGGTAGGCGGCGGGTCTACGCTGGCCCACAATCGTGAGTCCCCCACTCCGTAGGTTGTGGAATTTATAGACCCGCAAGGGGCACCACT'\n ,\n 'AGGATGACACCCAGGATGAATCTGGATTAGGAACACCAACCCGACATATTTGTTACCGCTGCAGCATTTCGCTCTTGGACGCGTAACCCGAGATCCGTCTCGCGATCGTCACGGATCGGGATTATGCAGGCAATACCTTGTGATCACTCCGCGCTTGGTTTTGCTAGCG'\n ,\n 'ACATCTCTAGTCACTTTTATTGAGCAGGTGGGCGGATTCATGATCCGGCTCTGTCGTACGTCCAACCACGGTGACATGTTCGGAGCTGTCGCCGTGGAGCAGAGATACATCGGATCTATCAATTTTACTAAGAGCAACTAGCCACGACAAACTGTGATCACCGATTGGA'\n ,\n 'AATTTGCGTATCTCTAGGACTCCCTCATACAAATCAAAGCTTGGATGGGTAAGATGCCGCAGCAGCAGGTATCTCATATTGGCTATTAAGAGCCAGGCCCTATGGCCTTAGTATCACCGATCAGACGTCGCATGAGCGGGCCCGTTGTCCTATCTCTTTAGCTGCCGCA'\n ,\n 'GAAGTAAAGGGGTTCCACTGCGTAGAGCGTGCCCCTCTGGTGTGCCGTACTGTTATGGTGATACAGCTTCCTTATACCCCTCGTAAAGCGGCTAATGGTCCTAATGAATGCCCTTGTGAAATCCGAATCGCTTTACAATTGCGTTCGGCGGAATGCAGTCACCAGTGTT'\n ,\n 'TACACTACGCGTTATTTACTTTTACTGAGTCCTTGTCGCCACCGAACGAGGATTGTTCATTGTATCCGGAGATTAGGAGTTCGCATCGCTGACACAGCCAGTTCGTAGCAAATACCGCTGGCCCTGGGCACTCCAGATCAGAACTACTAGCCCTAAACTCTATGACACA'\n ,\n 'TTGGGTCTCGATCCCTCTATGTTAAGCTGTTCCGTGGAGAATCTCCTGGGTTTTATGATTTGAATGACGAGAATTGGGAAGTCGGGATGTTGTGATCACCGCCGTTCGCTTTCATAAATGAACCCCTTTTTTTCAGCAGACGGTGGCCTTTCCCTTTCATCATTATACA'\n ,\n 'TTTCAAGTTACTACCGCCCTCTAGCGATAGAACTGAGGCAAATCATACACCGTGATCACCGACCCATGGAGTTTGACTCAGATTTACACTTTTAGGGGAACATGTTTGTCGGTCAGAGGTGTCAATTATTAGCAGATATCCCCCAACGCAGCGAGAGAGCACGGAGTGA'\n ,\n 'GATCCATTACCCTACGATATGTATATAGCGCCCTAGTACGGCTTCTCCCTTGCAGACACGCAGGCGCTGTGCGCTATCGGCTTCCTCGGACATTCCTGGATATAAGTAACGGCGAACTGGCTATCACTACCGCCGCTCCTTAAGCCTTGGTTTCACCGACGATTGTCGT'\n ,\n 'TAGTAGATTATTACCTGTGGACCGTTAGCTTCAAGACCGAAACGTTGGTGATGCTACTTAAATGTCAAGAGTTGCGAAGTTGGGCGAAGCACATCCGTACTCCCAAGTGGACGATCGATAGATCCATGGAGTTTCCATCCATCTTAATCCGCCCTTTGCATCACCGACG'\n ,\n 'TACAAGGCACAAACGAGACCTGATCGAACGGTGCACGGTCGAGGCAGCGAGATAAATGTACATTGAGAGCACCTTGTGATTTACGACCTGCATCGAAGGTTTCTTGGCACCCACCTGTCGTCCGCCAGGGCAGAGCCGACATTATATGACGCTGATGTACGAAGCCCCT'\n ]\nbest = randomMotifSearch(DNA, k, t)\nmin = score(best)\nfor index in range(1000):\n print(index)\n a = randomMotifSearch(DNA, k, t)\n if score(a) < score(best):\n best = a\n min = score(a)\nprint(min)\nfor index in best:\n print(index)\n", "step-5": "import random\n\ndef patternToNumber(pattern):\n if len(pattern) == 0:\n return 0\n return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:])\n\ndef symbolToNumber(symbol):\n if symbol == \"A\":\n return 0\n if symbol == \"C\":\n return 1\n if symbol == \"G\":\n return 2\n if symbol == \"T\":\n return 3\n\ndef numberToPattern(index, k):\n if k == 1:\n return numberToSymbol(index)\n return numberToPattern(index // 4, k-1) + numberToSymbol(index % 4)\n\ndef numberToSymbol(index):\n if index == 0:\n return \"A\"\n if index == 1:\n return \"C\"\n if index == 2:\n return \"G\"\n if index == 3:\n return \"T\"\n\ndef profileProbable(text, k, profile):\n maxprob = 0\n kmer = text[0:k]\n for i in range(0, len(text) - k +1):\n prob =1\n pattern =text[i:i+k]\n for j in range(k):\n l = symbolToNumber(pattern[j])\n prob *= profile [l][j]\n if prob > maxprob:\n maxprob =prob\n kmer = pattern\n return kmer\n\ndef hammingDistance(p, q):\n ham = 0\n for index, y in zip(p, q):\n if index != y:\n ham +=1\n return ham\n\ndef distanceBetweenPatternAndString(pattern, DNA):\n k = len(pattern)\n distance = 0\n for index in DNA:\n hamming = k+1\n for i in range(len(index) - k + 1):\n z = hammingDistance(pattern, index[i:i+k])\n if hamming > z:\n hamming = z\n distance += hamming\n return distance\n\ndef profileForm(motifs):\n k= len(motifs[0])\n profile = [[1 for i in range(k)] for j in range(4)]\n for index in motifs:\n for i in range(len(index)):\n j = symbolToNumber(index[i])\n profile[j][i] +=1\n for index in profile:\n for i in range(len(index)):\n index[i] = index[i]/len(motifs)\n return profile\n\ndef consensus(profile):\n str = \"\"\n for i in range(len(profile[0])):\n max = 0\n loc = 0\n for j in range(4):\n if profile[j][i] > max:\n loc = j\n max = profile[j][i]\n str+=numberToSymbol(loc)\n return str\n\ndef score(motifs):\n profile = profileForm(motifs)\n cons = consensus(profile)\n score = 0\n for index in motifs:\n for i in range(len(index)):\n if cons[i] != index[i]:\n score +=1\n return score\n\ndef randomMotifSearch(DNA, k, t):\n bestMotifs = []\n motifs = []\n for index in range(t):\n random.seed()\n i= random.randint(0, len(DNA[index])-k)\n motifs.append(DNA[index][i:i+k])\n bestMotifs = motifs.copy()\n count = 0\n while True:\n profile = profileForm(motifs)\n for index in range(t):\n motifs[index] = profileProbable(DNA[index], k, profile)\n if score(motifs) < score(bestMotifs):\n bestMotifs = motifs.copy()\n count +=1\n else:\n print(count)\n return bestMotifs\n\nk = 15\nt = 20\nDNA = [\"ACTTATATCTAGAGTAAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCGAGTGATTGAACTGACTTATATCTAGAGT\", \"AAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCCTCTCGATCACCGACGAGTGATTGAACTGACTTATATCTAGAGT\", \"CACTCCCGTCCGTCTGACGCCAGGTGCTCTACCCCGCTGATTGTCTGGTACATAGCAGCCTATAGATCACCGATGCAGAAACACTTCGAGGCAGCCGATTTCGCTTATCACAACGTGACGGAATTTGATAAACCACGTACTCTAATACCGTCACGGGCCCATCAACGAA\", \"ACAAGAACTGGTGGGGAGACTATGACACTCTAGCGGTCGCATAAGGGCCGGAAACCAGGACAAATCGATAAGATGAAGCGGGGATATAAGCCTTATACTGCGACTGGTTCCTTATATTATTTAGCCCCGATTGATCACCGATTAAAATATTCTGCGGTTTTCGAGACGG\", \"TAACCACACCTAAAATTTTTCTTGGTGAGATGGACCCCCGCCGTAAATATCAGGATTAAATGTACGGATACCCATGACCCTCCAGTCATCTACCTTCCCGTGGTGGTCGCTCAGCCTTGTGCAGACCGAACTAGCACCTGTCACATACAATGTTGCCCGCATAGATCGT\", \"ATCCGACAGAGGCAGTGAATAAGGTTTCGTTTCCTCAGAGAGTAGAACTGCGTGTGACCTTGCCTTCACCGACATCCGTTTCCAATTGAGCTTTTCAGGACGTTTAGGTAACTGATTGTCATTGCAATTGTCCGGGGGATTTAGATGGCCGGGTACCTCTCGGACTATA\", \"CCTTGTTGCCACCGATTCGCGAGCAACATCGGAGTGCTCTGATTCACGGCGATGCTCCACGAAGAGGACCGCGGCACGACACGCCCTGTACCTACGTTTCTGGATATCCTCCGGCGAGTTAATAGAGCAATACGACCTGGTCGTCGAGATCGTGTATCTAGCCCTACCT\", \"ATAGGTTAACGAATCAGGAGAGTTAATTTTACCTAGCTAGAGCGGACGGTGCCTGGCTGTATTCGCGTTTGACTTTCGGGCTCGCTGATAACTTGTGATCACCTTTTACGCTTACTGGATCCAACGATGGATCAAAGTTGAGAATTTCTGTGCCTTGGGTGTGAGCTGT\", \"CTGACGAAAGGACGGGCGGTGTACTTAGTTTGGGGTAAAATAGTTGGTATAATTCTGTGCGACAGACATTTGGTCAGGCCATACTGCCATATCGTGATGTAACTATCCACACTACGTCATAGGCCCTTGTGATCAATTAAACGTTCCTCATGCCAGGCTATCTGTTTAA\", \"GGCTTCGCGTTTAAGGCTGGATTAAGTACTCCGCCTTGTGATCTGTGATCCTCCGACCTGTGATCAGCAAGATTGGAACCTAGGTAGGCGGCGGGTCTACGCTGGCCCACAATCGTGAGTCCCCCACTCCGTAGGTTGTGGAATTTATAGACCCGCAAGGGGCACCACT\", \"AGGATGACACCCAGGATGAATCTGGATTAGGAACACCAACCCGACATATTTGTTACCGCTGCAGCATTTCGCTCTTGGACGCGTAACCCGAGATCCGTCTCGCGATCGTCACGGATCGGGATTATGCAGGCAATACCTTGTGATCACTCCGCGCTTGGTTTTGCTAGCG\", \"ACATCTCTAGTCACTTTTATTGAGCAGGTGGGCGGATTCATGATCCGGCTCTGTCGTACGTCCAACCACGGTGACATGTTCGGAGCTGTCGCCGTGGAGCAGAGATACATCGGATCTATCAATTTTACTAAGAGCAACTAGCCACGACAAACTGTGATCACCGATTGGA\", \"AATTTGCGTATCTCTAGGACTCCCTCATACAAATCAAAGCTTGGATGGGTAAGATGCCGCAGCAGCAGGTATCTCATATTGGCTATTAAGAGCCAGGCCCTATGGCCTTAGTATCACCGATCAGACGTCGCATGAGCGGGCCCGTTGTCCTATCTCTTTAGCTGCCGCA\", \"GAAGTAAAGGGGTTCCACTGCGTAGAGCGTGCCCCTCTGGTGTGCCGTACTGTTATGGTGATACAGCTTCCTTATACCCCTCGTAAAGCGGCTAATGGTCCTAATGAATGCCCTTGTGAAATCCGAATCGCTTTACAATTGCGTTCGGCGGAATGCAGTCACCAGTGTT\", \"TACACTACGCGTTATTTACTTTTACTGAGTCCTTGTCGCCACCGAACGAGGATTGTTCATTGTATCCGGAGATTAGGAGTTCGCATCGCTGACACAGCCAGTTCGTAGCAAATACCGCTGGCCCTGGGCACTCCAGATCAGAACTACTAGCCCTAAACTCTATGACACA\", \"TTGGGTCTCGATCCCTCTATGTTAAGCTGTTCCGTGGAGAATCTCCTGGGTTTTATGATTTGAATGACGAGAATTGGGAAGTCGGGATGTTGTGATCACCGCCGTTCGCTTTCATAAATGAACCCCTTTTTTTCAGCAGACGGTGGCCTTTCCCTTTCATCATTATACA\", \"TTTCAAGTTACTACCGCCCTCTAGCGATAGAACTGAGGCAAATCATACACCGTGATCACCGACCCATGGAGTTTGACTCAGATTTACACTTTTAGGGGAACATGTTTGTCGGTCAGAGGTGTCAATTATTAGCAGATATCCCCCAACGCAGCGAGAGAGCACGGAGTGA\", \"GATCCATTACCCTACGATATGTATATAGCGCCCTAGTACGGCTTCTCCCTTGCAGACACGCAGGCGCTGTGCGCTATCGGCTTCCTCGGACATTCCTGGATATAAGTAACGGCGAACTGGCTATCACTACCGCCGCTCCTTAAGCCTTGGTTTCACCGACGATTGTCGT\", \"TAGTAGATTATTACCTGTGGACCGTTAGCTTCAAGACCGAAACGTTGGTGATGCTACTTAAATGTCAAGAGTTGCGAAGTTGGGCGAAGCACATCCGTACTCCCAAGTGGACGATCGATAGATCCATGGAGTTTCCATCCATCTTAATCCGCCCTTTGCATCACCGACG\", \"TACAAGGCACAAACGAGACCTGATCGAACGGTGCACGGTCGAGGCAGCGAGATAAATGTACATTGAGAGCACCTTGTGATTTACGACCTGCATCGAAGGTTTCTTGGCACCCACCTGTCGTCCGCCAGGGCAGAGCCGACATTATATGACGCTGATGTACGAAGCCCCT\"]\nbest = randomMotifSearch(DNA, k, t)\nmin = score(best)\nfor index in range(1000):\n print(index)\n a = randomMotifSearch(DNA, k, t)\n if score(a) < score(best):\n best = a\n min = score(a)\nprint(min)\nfor index in best:\n print(index)", "step-ids": [ 8, 9, 11, 13, 15 ] }
[ 8, 9, 11, 13, 15 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> sys.path.append(os.pardir) <|reserved_special_token_0|> for key in optimizers.keys(): networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100], output_size=10) train_loss[key] = [] for i in range(max_iterations): batch_mask = np.random.choice(train_size, batch_size) x_batch = x_train[batch_mask] t_batch = t_train[batch_mask] for key in optimizers.keys(): grads = networks[key].gradient(x_batch, t_batch) optimizers[key].update(networks[key].params, grads) loss = networks[key].loss(x_batch, t_batch) train_loss[key].append(loss) if i % 100 == 0: print('===========' + 'iteration:' + str(i) + '===========') for key in optimizers.keys(): loss = networks[key].loss(x_batch, t_batch) print(key + ':' + str(loss)) <|reserved_special_token_0|> for key in optimizers.keys(): plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key) plt.xlabel('iterations') plt.ylabel('loss') plt.ylim(0, 1) plt.legend() plt.show() <|reserved_special_token_1|> <|reserved_special_token_0|> sys.path.append(os.pardir) <|reserved_special_token_0|> (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True) train_size = x_train.shape[0] batch_size = 128 max_iterations = 2000 optimizers = {} optimizers['SGD'] = SGD() optimizers['Momentum'] = Momentum() optimizers['AdaGrad'] = AdaGrad() optimizers['Adam'] = Adam() networks = {} train_loss = {} for key in optimizers.keys(): networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100], output_size=10) train_loss[key] = [] for i in range(max_iterations): batch_mask = np.random.choice(train_size, batch_size) x_batch = x_train[batch_mask] t_batch = t_train[batch_mask] for key in optimizers.keys(): grads = networks[key].gradient(x_batch, t_batch) optimizers[key].update(networks[key].params, grads) loss = networks[key].loss(x_batch, t_batch) train_loss[key].append(loss) if i % 100 == 0: print('===========' + 'iteration:' + str(i) + '===========') for key in optimizers.keys(): loss = networks[key].loss(x_batch, t_batch) print(key + ':' + str(loss)) markers = {'SGD': 'o', 'Momentum': 'x', 'AdaGrad': 's', 'Adam': 'D'} x = np.arange(max_iterations) for key in optimizers.keys(): plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key) plt.xlabel('iterations') plt.ylabel('loss') plt.ylim(0, 1) plt.legend() plt.show() <|reserved_special_token_1|> import sys, os sys.path.append(os.pardir) import matplotlib.pyplot as plt from dataset.mnist import load_mnist from common.util import smooth_curve from common.multi_layer_net import MultiLayerNet from common.optimizer import * (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True) train_size = x_train.shape[0] batch_size = 128 max_iterations = 2000 optimizers = {} optimizers['SGD'] = SGD() optimizers['Momentum'] = Momentum() optimizers['AdaGrad'] = AdaGrad() optimizers['Adam'] = Adam() networks = {} train_loss = {} for key in optimizers.keys(): networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100], output_size=10) train_loss[key] = [] for i in range(max_iterations): batch_mask = np.random.choice(train_size, batch_size) x_batch = x_train[batch_mask] t_batch = t_train[batch_mask] for key in optimizers.keys(): grads = networks[key].gradient(x_batch, t_batch) optimizers[key].update(networks[key].params, grads) loss = networks[key].loss(x_batch, t_batch) train_loss[key].append(loss) if i % 100 == 0: print('===========' + 'iteration:' + str(i) + '===========') for key in optimizers.keys(): loss = networks[key].loss(x_batch, t_batch) print(key + ':' + str(loss)) markers = {'SGD': 'o', 'Momentum': 'x', 'AdaGrad': 's', 'Adam': 'D'} x = np.arange(max_iterations) for key in optimizers.keys(): plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key) plt.xlabel('iterations') plt.ylabel('loss') plt.ylim(0, 1) plt.legend() plt.show() <|reserved_special_token_1|> # coding: utf-8 import sys, os sys.path.append(os.pardir) import matplotlib.pyplot as plt from dataset.mnist import load_mnist from common.util import smooth_curve from common.multi_layer_net import MultiLayerNet from common.optimizer import * # 0. MNIST 데이터 로딩 (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True) train_size = x_train.shape[0] batch_size = 128 max_iterations = 2000 # 1. 실험용 설정 셋팅 optimizers = {} optimizers['SGD'] = SGD() optimizers['Momentum'] = Momentum() optimizers['AdaGrad'] = AdaGrad() optimizers['Adam'] = Adam() #network, loss를 저장할 dictionary를 설정 networks = {} train_loss = {} #각 optimizer마다 network를 MultiLayerNet을 이용해서 똑같은 구조로 만들고, train_loss 딕셔너리를 초기화 한다. for key in optimizers.keys(): networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100], output_size=10) train_loss[key] = [] # 2. 훈련 시작 for i in range(max_iterations): #4개의 최적화 기법에 똑같이 들어갈 batch 생성 batch_mask = np.random.choice(train_size, batch_size) x_batch = x_train[batch_mask] t_batch = t_train[batch_mask] for key in optimizers.keys(): grads = networks[key].gradient(x_batch, t_batch) #배치를 넣어서 각 네트워크의 기울기를 구함 optimizers[key].update(networks[key].params, grads) #네트워크의 parameter를 기울기에 대해 update함 loss = networks[key].loss(x_batch, t_batch) #사실 이것이 먼저 계산되어야 하지만, 이 코드에서는 기록용으로 저장 train_loss[key].append(loss) #각 최적화 기법의 학습 loss 리스트에 저장 #학습 진행 경과 및 각 최적화 기법에 해당하는 loss 확인 if i % 100 == 0: print("===========" + "iteration:" + str(i) + "===========") for key in optimizers.keys(): loss = networks[key].loss(x_batch, t_batch) print(key + ':' + str(loss)) # 3. 그래프 그리기 markers = {"SGD": "o", "Momentum": "x", "AdaGrad": "s", "Adam": "D"} x = np.arange(max_iterations) for key in optimizers.keys(): plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key) plt.xlabel("iterations") plt.ylabel("loss") plt.ylim(0, 1) plt.legend() plt.show()
flexible
{ "blob_id": "85d40a49341c7bd7af7a5dc62e4bce0253eb25e6", "index": 9944, "step-1": "<mask token>\n", "step-2": "<mask token>\nsys.path.append(os.pardir)\n<mask token>\nfor key in optimizers.keys():\n networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, \n 100, 100, 100], output_size=10)\n train_loss[key] = []\nfor i in range(max_iterations):\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n for key in optimizers.keys():\n grads = networks[key].gradient(x_batch, t_batch)\n optimizers[key].update(networks[key].params, grads)\n loss = networks[key].loss(x_batch, t_batch)\n train_loss[key].append(loss)\n if i % 100 == 0:\n print('===========' + 'iteration:' + str(i) + '===========')\n for key in optimizers.keys():\n loss = networks[key].loss(x_batch, t_batch)\n print(key + ':' + str(loss))\n<mask token>\nfor key in optimizers.keys():\n plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key],\n markevery=100, label=key)\nplt.xlabel('iterations')\nplt.ylabel('loss')\nplt.ylim(0, 1)\nplt.legend()\nplt.show()\n", "step-3": "<mask token>\nsys.path.append(os.pardir)\n<mask token>\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)\ntrain_size = x_train.shape[0]\nbatch_size = 128\nmax_iterations = 2000\noptimizers = {}\noptimizers['SGD'] = SGD()\noptimizers['Momentum'] = Momentum()\noptimizers['AdaGrad'] = AdaGrad()\noptimizers['Adam'] = Adam()\nnetworks = {}\ntrain_loss = {}\nfor key in optimizers.keys():\n networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, \n 100, 100, 100], output_size=10)\n train_loss[key] = []\nfor i in range(max_iterations):\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n for key in optimizers.keys():\n grads = networks[key].gradient(x_batch, t_batch)\n optimizers[key].update(networks[key].params, grads)\n loss = networks[key].loss(x_batch, t_batch)\n train_loss[key].append(loss)\n if i % 100 == 0:\n print('===========' + 'iteration:' + str(i) + '===========')\n for key in optimizers.keys():\n loss = networks[key].loss(x_batch, t_batch)\n print(key + ':' + str(loss))\nmarkers = {'SGD': 'o', 'Momentum': 'x', 'AdaGrad': 's', 'Adam': 'D'}\nx = np.arange(max_iterations)\nfor key in optimizers.keys():\n plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key],\n markevery=100, label=key)\nplt.xlabel('iterations')\nplt.ylabel('loss')\nplt.ylim(0, 1)\nplt.legend()\nplt.show()\n", "step-4": "import sys, os\nsys.path.append(os.pardir)\nimport matplotlib.pyplot as plt\nfrom dataset.mnist import load_mnist\nfrom common.util import smooth_curve\nfrom common.multi_layer_net import MultiLayerNet\nfrom common.optimizer import *\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)\ntrain_size = x_train.shape[0]\nbatch_size = 128\nmax_iterations = 2000\noptimizers = {}\noptimizers['SGD'] = SGD()\noptimizers['Momentum'] = Momentum()\noptimizers['AdaGrad'] = AdaGrad()\noptimizers['Adam'] = Adam()\nnetworks = {}\ntrain_loss = {}\nfor key in optimizers.keys():\n networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, \n 100, 100, 100], output_size=10)\n train_loss[key] = []\nfor i in range(max_iterations):\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n for key in optimizers.keys():\n grads = networks[key].gradient(x_batch, t_batch)\n optimizers[key].update(networks[key].params, grads)\n loss = networks[key].loss(x_batch, t_batch)\n train_loss[key].append(loss)\n if i % 100 == 0:\n print('===========' + 'iteration:' + str(i) + '===========')\n for key in optimizers.keys():\n loss = networks[key].loss(x_batch, t_batch)\n print(key + ':' + str(loss))\nmarkers = {'SGD': 'o', 'Momentum': 'x', 'AdaGrad': 's', 'Adam': 'D'}\nx = np.arange(max_iterations)\nfor key in optimizers.keys():\n plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key],\n markevery=100, label=key)\nplt.xlabel('iterations')\nplt.ylabel('loss')\nplt.ylim(0, 1)\nplt.legend()\nplt.show()\n", "step-5": "# coding: utf-8\r\n\r\n\r\nimport sys, os\r\nsys.path.append(os.pardir)\r\nimport matplotlib.pyplot as plt\r\nfrom dataset.mnist import load_mnist\r\nfrom common.util import smooth_curve\r\nfrom common.multi_layer_net import MultiLayerNet\r\nfrom common.optimizer import *\r\n\r\n# 0. MNIST 데이터 로딩\r\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)\r\n\r\ntrain_size = x_train.shape[0]\r\nbatch_size = 128\r\nmax_iterations = 2000\r\n\r\n# 1. 실험용 설정 셋팅\r\noptimizers = {}\r\noptimizers['SGD'] = SGD()\r\noptimizers['Momentum'] = Momentum()\r\noptimizers['AdaGrad'] = AdaGrad()\r\noptimizers['Adam'] = Adam()\r\n\r\n#network, loss를 저장할 dictionary를 설정\r\nnetworks = {}\r\ntrain_loss = {}\r\n\r\n#각 optimizer마다 network를 MultiLayerNet을 이용해서 똑같은 구조로 만들고, train_loss 딕셔너리를 초기화 한다.\r\nfor key in optimizers.keys():\r\n networks[key] = MultiLayerNet(input_size=784,\r\n hidden_size_list=[100, 100, 100, 100],\r\n output_size=10)\r\n train_loss[key] = []\r\n\r\n# 2. 훈련 시작\r\nfor i in range(max_iterations):\r\n #4개의 최적화 기법에 똑같이 들어갈 batch 생성\r\n batch_mask = np.random.choice(train_size, batch_size)\r\n x_batch = x_train[batch_mask]\r\n t_batch = t_train[batch_mask]\r\n\r\n for key in optimizers.keys():\r\n grads = networks[key].gradient(x_batch, t_batch) #배치를 넣어서 각 네트워크의 기울기를 구함\r\n optimizers[key].update(networks[key].params, grads) #네트워크의 parameter를 기울기에 대해 update함\r\n loss = networks[key].loss(x_batch, t_batch) #사실 이것이 먼저 계산되어야 하지만, 이 코드에서는 기록용으로 저장\r\n train_loss[key].append(loss) #각 최적화 기법의 학습 loss 리스트에 저장\r\n\r\n #학습 진행 경과 및 각 최적화 기법에 해당하는 loss 확인\r\n if i % 100 == 0:\r\n print(\"===========\" + \"iteration:\" + str(i) + \"===========\")\r\n for key in optimizers.keys():\r\n loss = networks[key].loss(x_batch, t_batch)\r\n print(key + ':' + str(loss))\r\n\r\n\r\n# 3. 그래프 그리기\r\nmarkers = {\"SGD\": \"o\", \"Momentum\": \"x\", \"AdaGrad\": \"s\", \"Adam\": \"D\"}\r\nx = np.arange(max_iterations)\r\nfor key in optimizers.keys():\r\n plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key)\r\nplt.xlabel(\"iterations\")\r\nplt.ylabel(\"loss\")\r\nplt.ylim(0, 1)\r\nplt.legend()\r\nplt.show()\r\n\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from models import Ban from django.shortcuts import render_to_response class IPBanMiddleware(object): """ Simple middleware for taking care of bans from specific IP's Redirects the banned user to a ban-page with an explanation """ def process_request(self, request): ip = request.META['REMOTE_ADDR'] # user's IP # see if user is banned try: # if this doesnt throw an exception, user is banned ban = Ban.objects.get(ip=ip) if ban.banned(): # return the "ban page" return render_to_response("ban/banned.html", {"reason": ban.reason, "unbandate": ban.unbandate()}) else: # User was previously banned, but the ban is over by now ban.delete() pass except Ban.DoesNotExist: # not banned! goodie pass
normal
{ "blob_id": "9289eb32db145187c5b4140e32acff520be8366e", "index": 7620, "step-1": "<mask token>\n\n\nclass IPBanMiddleware(object):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass IPBanMiddleware(object):\n <mask token>\n\n def process_request(self, request):\n ip = request.META['REMOTE_ADDR']\n try:\n ban = Ban.objects.get(ip=ip)\n if ban.banned():\n return render_to_response('ban/banned.html', {'reason': ban\n .reason, 'unbandate': ban.unbandate()})\n else:\n ban.delete()\n pass\n except Ban.DoesNotExist:\n pass\n", "step-3": "<mask token>\n\n\nclass IPBanMiddleware(object):\n \"\"\"\n Simple middleware for taking care of bans from specific IP's\n Redirects the banned user to a ban-page with an explanation\n \"\"\"\n\n def process_request(self, request):\n ip = request.META['REMOTE_ADDR']\n try:\n ban = Ban.objects.get(ip=ip)\n if ban.banned():\n return render_to_response('ban/banned.html', {'reason': ban\n .reason, 'unbandate': ban.unbandate()})\n else:\n ban.delete()\n pass\n except Ban.DoesNotExist:\n pass\n", "step-4": "from models import Ban\nfrom django.shortcuts import render_to_response\n\n\nclass IPBanMiddleware(object):\n \"\"\"\n Simple middleware for taking care of bans from specific IP's\n Redirects the banned user to a ban-page with an explanation\n \"\"\"\n\n def process_request(self, request):\n ip = request.META['REMOTE_ADDR']\n try:\n ban = Ban.objects.get(ip=ip)\n if ban.banned():\n return render_to_response('ban/banned.html', {'reason': ban\n .reason, 'unbandate': ban.unbandate()})\n else:\n ban.delete()\n pass\n except Ban.DoesNotExist:\n pass\n", "step-5": "from models import Ban\nfrom django.shortcuts import render_to_response\n\nclass IPBanMiddleware(object):\n \"\"\"\n Simple middleware for taking care of bans from specific IP's\n Redirects the banned user to a ban-page with an explanation\n \"\"\"\n def process_request(self, request):\n ip = request.META['REMOTE_ADDR'] # user's IP\n\n # see if user is banned\n try:\n # if this doesnt throw an exception, user is banned\n ban = Ban.objects.get(ip=ip)\n \n if ban.banned():\n # return the \"ban page\"\n return render_to_response(\"ban/banned.html\",\n {\"reason\": ban.reason, \"unbandate\": ban.unbandate()})\n else:\n # User was previously banned, but the ban is over by now\n ban.delete()\n pass\n\n except Ban.DoesNotExist: # not banned! goodie\n pass\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import ray import os import sys import random path_join = os.path.join real_path = os.path.realpath perfd_dir = real_path(path_join(os.getcwd())) microps_dir = path_join(perfd_dir, "thirdparty", "microps") sys.path += [perfd_dir, microps_dir] from thirdparty.microps.oracle.experiments.spark_sql_perf.main import SparkExperiment, SparkBenchMaker from thirdparty.microps.build.spark.driver import add_role as add_spk_role import thirdparty.microps.oracle.apps.spark_sql_perf.configs as spk import thirdparty.microps.oracle.experiments.spark_sql_perf.utils as utils @ray.remote def run(run_config: dict, wrks: dict) -> dict: try: add_spk_role() except: print("run, spark: ignore") os.chdir(microps_dir) # TODO: add virtual cluster labels to the pods base_spk_config = spk.apps_config_map["sparkperfml"] # TODO: update driver and executor memory base_spk_config = spk.patched_app_config(base_spk_config, { "app_name": run_config["appName"], "ins_type": run_config["serverInstanceType"], "ins_num": run_config["numExecutor"] + 1, # "node_selectors": cur_node_selectors, "driver_adaptive_gc": run_config["driverAdaptiveGC"], }) bench = None for b in SparkBenchMaker.load_benchmarks(): if b["name"] == run_config["appName"]: bench = b if bench is None: print("run, spark: unable to find bench", run_config["appName"]) # spark sql perf configurations config_base = SparkBenchMaker.load_base() # change the dataset scale utils.update_bench_params(base=config_base, bench=bench, key="numExamples", value=run_config["inputScale"], is_scale=True) # change number of partition, each executor has at least one partition utils.update_bench_params(base=config_base, bench=bench, key="numPartitions", value=run_config["numPartition"], is_scale=False) utils.update_bench_params(base=config_base, bench=bench, key="randomSeed", value=random.randint(0, 10000) if run_config.get("randomSeed", 1) == "random" else 1, is_scale=False) bc = SparkBenchMaker.patched_bench_config(config_base, { "benchmarks": [bench] }) print(bc) exp = SparkExperiment( { "app_configs": base_spk_config, "exp_configs": { "s3_log_bucket": run_config["logBucket"], "num_executor": run_config["numExecutor"], "ins_type": run_config["serverInstanceType"], "ins_num": run_config["numServerInstance"], "run_interval": 0.5, "runs": 1, "bench_config": bc, }, "ins_type_num": [(run_config["serverInstanceType"], run_config["numServerInstance"])], "variables": {}, } ) exp.run() return {}
normal
{ "blob_id": "25595b5f86a41fee1dc43f199f3bcff73f6d256b", "index": 9418, "step-1": "<mask token>\n\n\n@ray.remote\ndef run(run_config: dict, wrks: dict) ->dict:\n try:\n add_spk_role()\n except:\n print('run, spark: ignore')\n os.chdir(microps_dir)\n base_spk_config = spk.apps_config_map['sparkperfml']\n base_spk_config = spk.patched_app_config(base_spk_config, {'app_name':\n run_config['appName'], 'ins_type': run_config['serverInstanceType'],\n 'ins_num': run_config['numExecutor'] + 1, 'driver_adaptive_gc':\n run_config['driverAdaptiveGC']})\n bench = None\n for b in SparkBenchMaker.load_benchmarks():\n if b['name'] == run_config['appName']:\n bench = b\n if bench is None:\n print('run, spark: unable to find bench', run_config['appName'])\n config_base = SparkBenchMaker.load_base()\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numExamples', value=run_config['inputScale'], is_scale=True)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numPartitions', value=run_config['numPartition'], is_scale=False)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'randomSeed', value=random.randint(0, 10000) if run_config.get(\n 'randomSeed', 1) == 'random' else 1, is_scale=False)\n bc = SparkBenchMaker.patched_bench_config(config_base, {'benchmarks': [\n bench]})\n print(bc)\n exp = SparkExperiment({'app_configs': base_spk_config, 'exp_configs': {\n 's3_log_bucket': run_config['logBucket'], 'num_executor':\n run_config['numExecutor'], 'ins_type': run_config[\n 'serverInstanceType'], 'ins_num': run_config['numServerInstance'],\n 'run_interval': 0.5, 'runs': 1, 'bench_config': bc}, 'ins_type_num':\n [(run_config['serverInstanceType'], run_config['numServerInstance']\n )], 'variables': {}})\n exp.run()\n return {}\n", "step-2": "<mask token>\nsys.path += [perfd_dir, microps_dir]\n<mask token>\n\n\n@ray.remote\ndef run(run_config: dict, wrks: dict) ->dict:\n try:\n add_spk_role()\n except:\n print('run, spark: ignore')\n os.chdir(microps_dir)\n base_spk_config = spk.apps_config_map['sparkperfml']\n base_spk_config = spk.patched_app_config(base_spk_config, {'app_name':\n run_config['appName'], 'ins_type': run_config['serverInstanceType'],\n 'ins_num': run_config['numExecutor'] + 1, 'driver_adaptive_gc':\n run_config['driverAdaptiveGC']})\n bench = None\n for b in SparkBenchMaker.load_benchmarks():\n if b['name'] == run_config['appName']:\n bench = b\n if bench is None:\n print('run, spark: unable to find bench', run_config['appName'])\n config_base = SparkBenchMaker.load_base()\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numExamples', value=run_config['inputScale'], is_scale=True)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numPartitions', value=run_config['numPartition'], is_scale=False)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'randomSeed', value=random.randint(0, 10000) if run_config.get(\n 'randomSeed', 1) == 'random' else 1, is_scale=False)\n bc = SparkBenchMaker.patched_bench_config(config_base, {'benchmarks': [\n bench]})\n print(bc)\n exp = SparkExperiment({'app_configs': base_spk_config, 'exp_configs': {\n 's3_log_bucket': run_config['logBucket'], 'num_executor':\n run_config['numExecutor'], 'ins_type': run_config[\n 'serverInstanceType'], 'ins_num': run_config['numServerInstance'],\n 'run_interval': 0.5, 'runs': 1, 'bench_config': bc}, 'ins_type_num':\n [(run_config['serverInstanceType'], run_config['numServerInstance']\n )], 'variables': {}})\n exp.run()\n return {}\n", "step-3": "<mask token>\npath_join = os.path.join\nreal_path = os.path.realpath\nperfd_dir = real_path(path_join(os.getcwd()))\nmicrops_dir = path_join(perfd_dir, 'thirdparty', 'microps')\nsys.path += [perfd_dir, microps_dir]\n<mask token>\n\n\n@ray.remote\ndef run(run_config: dict, wrks: dict) ->dict:\n try:\n add_spk_role()\n except:\n print('run, spark: ignore')\n os.chdir(microps_dir)\n base_spk_config = spk.apps_config_map['sparkperfml']\n base_spk_config = spk.patched_app_config(base_spk_config, {'app_name':\n run_config['appName'], 'ins_type': run_config['serverInstanceType'],\n 'ins_num': run_config['numExecutor'] + 1, 'driver_adaptive_gc':\n run_config['driverAdaptiveGC']})\n bench = None\n for b in SparkBenchMaker.load_benchmarks():\n if b['name'] == run_config['appName']:\n bench = b\n if bench is None:\n print('run, spark: unable to find bench', run_config['appName'])\n config_base = SparkBenchMaker.load_base()\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numExamples', value=run_config['inputScale'], is_scale=True)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numPartitions', value=run_config['numPartition'], is_scale=False)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'randomSeed', value=random.randint(0, 10000) if run_config.get(\n 'randomSeed', 1) == 'random' else 1, is_scale=False)\n bc = SparkBenchMaker.patched_bench_config(config_base, {'benchmarks': [\n bench]})\n print(bc)\n exp = SparkExperiment({'app_configs': base_spk_config, 'exp_configs': {\n 's3_log_bucket': run_config['logBucket'], 'num_executor':\n run_config['numExecutor'], 'ins_type': run_config[\n 'serverInstanceType'], 'ins_num': run_config['numServerInstance'],\n 'run_interval': 0.5, 'runs': 1, 'bench_config': bc}, 'ins_type_num':\n [(run_config['serverInstanceType'], run_config['numServerInstance']\n )], 'variables': {}})\n exp.run()\n return {}\n", "step-4": "import ray\nimport os\nimport sys\nimport random\npath_join = os.path.join\nreal_path = os.path.realpath\nperfd_dir = real_path(path_join(os.getcwd()))\nmicrops_dir = path_join(perfd_dir, 'thirdparty', 'microps')\nsys.path += [perfd_dir, microps_dir]\nfrom thirdparty.microps.oracle.experiments.spark_sql_perf.main import SparkExperiment, SparkBenchMaker\nfrom thirdparty.microps.build.spark.driver import add_role as add_spk_role\nimport thirdparty.microps.oracle.apps.spark_sql_perf.configs as spk\nimport thirdparty.microps.oracle.experiments.spark_sql_perf.utils as utils\n\n\n@ray.remote\ndef run(run_config: dict, wrks: dict) ->dict:\n try:\n add_spk_role()\n except:\n print('run, spark: ignore')\n os.chdir(microps_dir)\n base_spk_config = spk.apps_config_map['sparkperfml']\n base_spk_config = spk.patched_app_config(base_spk_config, {'app_name':\n run_config['appName'], 'ins_type': run_config['serverInstanceType'],\n 'ins_num': run_config['numExecutor'] + 1, 'driver_adaptive_gc':\n run_config['driverAdaptiveGC']})\n bench = None\n for b in SparkBenchMaker.load_benchmarks():\n if b['name'] == run_config['appName']:\n bench = b\n if bench is None:\n print('run, spark: unable to find bench', run_config['appName'])\n config_base = SparkBenchMaker.load_base()\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numExamples', value=run_config['inputScale'], is_scale=True)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numPartitions', value=run_config['numPartition'], is_scale=False)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'randomSeed', value=random.randint(0, 10000) if run_config.get(\n 'randomSeed', 1) == 'random' else 1, is_scale=False)\n bc = SparkBenchMaker.patched_bench_config(config_base, {'benchmarks': [\n bench]})\n print(bc)\n exp = SparkExperiment({'app_configs': base_spk_config, 'exp_configs': {\n 's3_log_bucket': run_config['logBucket'], 'num_executor':\n run_config['numExecutor'], 'ins_type': run_config[\n 'serverInstanceType'], 'ins_num': run_config['numServerInstance'],\n 'run_interval': 0.5, 'runs': 1, 'bench_config': bc}, 'ins_type_num':\n [(run_config['serverInstanceType'], run_config['numServerInstance']\n )], 'variables': {}})\n exp.run()\n return {}\n", "step-5": "import ray\nimport os\nimport sys\nimport random\n\npath_join = os.path.join\nreal_path = os.path.realpath\n\nperfd_dir = real_path(path_join(os.getcwd()))\nmicrops_dir = path_join(perfd_dir, \"thirdparty\", \"microps\")\nsys.path += [perfd_dir, microps_dir]\n\nfrom thirdparty.microps.oracle.experiments.spark_sql_perf.main import SparkExperiment, SparkBenchMaker\nfrom thirdparty.microps.build.spark.driver import add_role as add_spk_role\nimport thirdparty.microps.oracle.apps.spark_sql_perf.configs as spk\nimport thirdparty.microps.oracle.experiments.spark_sql_perf.utils as utils\n\n\n@ray.remote\ndef run(run_config: dict, wrks: dict) -> dict:\n try:\n add_spk_role()\n except:\n print(\"run, spark: ignore\")\n os.chdir(microps_dir)\n\n # TODO: add virtual cluster labels to the pods\n base_spk_config = spk.apps_config_map[\"sparkperfml\"]\n\n # TODO: update driver and executor memory\n base_spk_config = spk.patched_app_config(base_spk_config,\n {\n \"app_name\": run_config[\"appName\"],\n \"ins_type\": run_config[\"serverInstanceType\"],\n \"ins_num\": run_config[\"numExecutor\"] + 1,\n # \"node_selectors\": cur_node_selectors,\n \"driver_adaptive_gc\": run_config[\"driverAdaptiveGC\"],\n })\n\n bench = None\n for b in SparkBenchMaker.load_benchmarks():\n if b[\"name\"] == run_config[\"appName\"]:\n bench = b\n if bench is None:\n print(\"run, spark: unable to find bench\", run_config[\"appName\"])\n\n # spark sql perf configurations\n config_base = SparkBenchMaker.load_base()\n # change the dataset scale\n utils.update_bench_params(base=config_base, bench=bench,\n key=\"numExamples\", value=run_config[\"inputScale\"], is_scale=True)\n\n # change number of partition, each executor has at least one partition\n utils.update_bench_params(base=config_base, bench=bench,\n key=\"numPartitions\", value=run_config[\"numPartition\"], is_scale=False)\n utils.update_bench_params(base=config_base, bench=bench,\n key=\"randomSeed\",\n value=random.randint(0, 10000) if run_config.get(\"randomSeed\", 1) == \"random\" else 1,\n is_scale=False)\n\n bc = SparkBenchMaker.patched_bench_config(config_base,\n {\n \"benchmarks\": [bench]\n })\n\n print(bc)\n exp = SparkExperiment(\n {\n \"app_configs\": base_spk_config,\n \"exp_configs\": {\n \"s3_log_bucket\": run_config[\"logBucket\"],\n \"num_executor\": run_config[\"numExecutor\"],\n \"ins_type\": run_config[\"serverInstanceType\"],\n \"ins_num\": run_config[\"numServerInstance\"],\n \"run_interval\": 0.5,\n \"runs\": 1,\n \"bench_config\": bc,\n },\n \"ins_type_num\": [(run_config[\"serverInstanceType\"], run_config[\"numServerInstance\"])],\n \"variables\": {},\n }\n )\n exp.run()\n return {}\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): dependencies = [('grafit', '0002_article')] operations = [migrations.RunSQL( """ INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License. 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6] On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size. Indexing Fields in a MongoDB document can be indexed with primary and secondary indices. Replication MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default. Load balancing[10] MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution. MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. '); INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to "non SQL" or "non relational")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the "NoSQL" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called "Not only SQL" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8] Motivations for this approach include: simplicity of design, simpler "horizontal" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as "more flexible" than relational database tables.[9] Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.) Instead, most NoSQL databases offer a concept of "eventual consistency" in which database changes are propagated to all nodes "eventually" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases "do not allow referential integrity constraints to span databases."[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. '); INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16] Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because "SEQUEL" was a trademark of the UK-based Hawker Siddeley aircraft company.[17] In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard "Database Language SQL" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] '); INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. "Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system." "We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services." "We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs."'); INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link. Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7). Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action. According to an advisory released by phpMyAdmin, "by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc." phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms. Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases. Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link. "A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice." Barot explains in a blog post. However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table. "If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name," Barot says. "This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc." Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible. '); INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed. When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design. "Who care about database design? What about mockups? What about workflows?" Let me tell you about "Bob''s Luxury Goods." I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a "one-to-many" relationship between customers and addresses. That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as "in use" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES. We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was "cheaper" to remove the restriction on "flagged" addresses and allow a duplicate address to be used. Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the "typo", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it. That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this. Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer. "Curtis," they said, "just enter a dummy customer called ''Occupant'' and attach all addresses to that." Except you couldn''t enter a customer without an order. Except you couldn''t enter an order without at least one item on it. Except you couldn''t enter an item unless it was listed in inventory. Except that reserved the "inventory" item and made it unavailable. Except, except, except ... It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a "paid" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember. Then, and only then, could I write the code to provide "generic" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them. If they had simply had a proper database design up front, they could have reused their existing system with little trouble. That''s what bad database design costs you and why I usually start with that before writing my software. Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.'); INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub. Learn more about actions As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you'); INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', ' The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves. Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update. Affected products GitHub Desktop GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app. Atom Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch. Ensure you’re on the latest Atom release by completing any of the following: Windows: From the toolbar, click Help -> Check for Updates MacOS: From the menu bar, click Atom -> Check for Update Linux: Update manually by downloading the latest release from atom.io Git on the command line and other clients In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other. Additional notes Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9. Details of the vulnerability This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself. The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix. The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability). The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands. We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added). Please update your copy of Git soon, and happy cloning! '); INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet. The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux. The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet. So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others. To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs. What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.'); INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres. rbanffy on Aug 18, 2012 [-] I think this would be a mistake. This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle. All that is lost is the MySQL name and brand. PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now. Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this. Udo on Aug 18, 2012 [-] I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project! MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing. Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem. Hence, sensational and petulant "RIP $PRODUCTNAME" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own. The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. '); INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts? Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.'); INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code. Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files. '); INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness. This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming. What is PostgreSQL? PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge. A Brief History of PostgreSQL PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates. 1977-1985 − A project called INGRES was developed. Proof-of-concept for relational databases Established the company Ingres in 1980 Bought by Computer Associates in 1994 1986-1994 − POSTGRES Development of the concepts in INGRES with a focus on object orientation and the query language - Quel The code base of INGRES was not used as a basis for POSTGRES Commercialized as Illustra (bought by Informix, bought by IBM) 1994-1995 − Postgres95 Support for SQL was added in 1994 Released as Postgres95 in 1995 Re-released as PostgreSQL 6.0 in 1996 Establishment of the PostgreSQL Global Development Team Key Features of PostgreSQL PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC). PostgreSQL supports a large part of the SQL standard and offers many modern features including the following − Complex SQL queries SQL Sub-selects Foreign keys Trigger Views Transactions Multiversion concurrency control (MVCC) Streaming Replication (as of 9.0) Hot Standby (as of 9.0) You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new − Data types Functions Operators Aggregate functions Index methods Procedural Languages Support PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.'); INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup. I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try. Install Directly or not? On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do. In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM. Installing Docker Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled. After logging back in I then got the following message about hardware-assisted virtualization not being enabled. After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen. Open a command prompt and run the following command. docker run hello-world You should output that starts with the following if your installation is working. Hello from Docker! This message shows that your installation appears to be working correctly. What about Postgres? Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host. docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container. docker create -v /var/lib/postgresql/data --name PostgresData alpine The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects. Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command. docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container. If you run the docker ps -a command it will show you all your containers. As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running. '); INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it. The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.) Step 1: Install PostgreSQL Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution): Open a terminal window. Issue the command sudo apt-get install postgresql. Type the sudo password necessary to give you admin rights and hit Enter. Allow apt to pick up any necessary dependencies. Once the installation is complete, it''s time to set this baby up. Step 2: Change the default user password Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure. Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so: Open a terminal window. Issue the command sudo passwd postgres. Type (and confirm) that password to be used for this user. The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like: postgres=# All other users have to gain access to the prompt like so: psql DB_NAME where DB_NAME is the name of an existing database. '); INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL. This seems to be the journey: 1. Lack of migrations is awesome! We can iterate so quickly for MVP 2. Get users 3. Add features, still enjoying the speed of iteration 4. Get more users 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts) 6. Realise you desperately need joins, transactions and other SQL features 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back. I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with? My thought is definitely yes. brandur on Aug 29, 2017 [-] > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with? I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project. The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this. The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about. Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week. martinald on Aug 29, 2017 [-] I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away. I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done. Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly. I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly. I think I assumed the "crowd" had done the tech due diligence on this stuff and it definitely wasn''t the case. '); INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan. The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto. Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message. As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial. A large amount of "engineering" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision. Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.'); INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts. I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.” I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices. Red Bull could sponsor it. I’d buy a T-shirt. kbenson 8 months ago [-] That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs. You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something. If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource. Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already... 1: https://github.com/gothinkster/realworld 2: https://www.techempower.com/benchmarks/ etxm 8 months ago [-] Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :) It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time. It would be cool to see things like disaster recovery and chaos proofing as well. '); INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community. Leveraging the community There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed. On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release. For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system. Always quality focused No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert. Learn more'); INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query. Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack. Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads. maxxxxx 8 months ago [-] Agreed. Switching to another system is expensive and the benefit is pretty questionable. emsy 8 months ago [-] Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once. TremendousJudge 8 months ago [-] expand, please? maxxxxx 8 months ago [-] I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure. In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other. gopalv 8 months ago [-] > Depending on your data some databases may be better than others and that should be easy to measure. And the performance difference could be an accidental feature of the design and completely unintentional. Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated. Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs). When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural. And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two. Though how it came about isn''t really intentional. '); """ )] <|reserved_special_token_1|> import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone import uuid class Migration(migrations.Migration): dependencies = [('grafit', '0002_article')] operations = [migrations.RunSQL( """ INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License. 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6] On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size. Indexing Fields in a MongoDB document can be indexed with primary and secondary indices. Replication MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default. Load balancing[10] MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution. MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. '); INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to "non SQL" or "non relational")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the "NoSQL" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called "Not only SQL" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8] Motivations for this approach include: simplicity of design, simpler "horizontal" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as "more flexible" than relational database tables.[9] Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.) Instead, most NoSQL databases offer a concept of "eventual consistency" in which database changes are propagated to all nodes "eventually" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases "do not allow referential integrity constraints to span databases."[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. '); INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16] Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because "SEQUEL" was a trademark of the UK-based Hawker Siddeley aircraft company.[17] In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard "Database Language SQL" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] '); INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. "Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system." "We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services." "We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs."'); INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link. Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7). Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action. According to an advisory released by phpMyAdmin, "by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc." phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms. Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases. Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link. "A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice." Barot explains in a blog post. However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table. "If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name," Barot says. "This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc." Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible. '); INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed. When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design. "Who care about database design? What about mockups? What about workflows?" Let me tell you about "Bob''s Luxury Goods." I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a "one-to-many" relationship between customers and addresses. That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as "in use" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES. We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was "cheaper" to remove the restriction on "flagged" addresses and allow a duplicate address to be used. Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the "typo", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it. That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this. Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer. "Curtis," they said, "just enter a dummy customer called ''Occupant'' and attach all addresses to that." Except you couldn''t enter a customer without an order. Except you couldn''t enter an order without at least one item on it. Except you couldn''t enter an item unless it was listed in inventory. Except that reserved the "inventory" item and made it unavailable. Except, except, except ... It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a "paid" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember. Then, and only then, could I write the code to provide "generic" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them. If they had simply had a proper database design up front, they could have reused their existing system with little trouble. That''s what bad database design costs you and why I usually start with that before writing my software. Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.'); INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub. Learn more about actions As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you'); INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', ' The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves. Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update. Affected products GitHub Desktop GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app. Atom Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch. Ensure you’re on the latest Atom release by completing any of the following: Windows: From the toolbar, click Help -> Check for Updates MacOS: From the menu bar, click Atom -> Check for Update Linux: Update manually by downloading the latest release from atom.io Git on the command line and other clients In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other. Additional notes Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9. Details of the vulnerability This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself. The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix. The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability). The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands. We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added). Please update your copy of Git soon, and happy cloning! '); INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet. The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux. The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet. So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others. To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs. What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.'); INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres. rbanffy on Aug 18, 2012 [-] I think this would be a mistake. This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle. All that is lost is the MySQL name and brand. PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now. Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this. Udo on Aug 18, 2012 [-] I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project! MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing. Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem. Hence, sensational and petulant "RIP $PRODUCTNAME" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own. The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. '); INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts? Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.'); INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code. Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files. '); INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness. This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming. What is PostgreSQL? PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge. A Brief History of PostgreSQL PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates. 1977-1985 − A project called INGRES was developed. Proof-of-concept for relational databases Established the company Ingres in 1980 Bought by Computer Associates in 1994 1986-1994 − POSTGRES Development of the concepts in INGRES with a focus on object orientation and the query language - Quel The code base of INGRES was not used as a basis for POSTGRES Commercialized as Illustra (bought by Informix, bought by IBM) 1994-1995 − Postgres95 Support for SQL was added in 1994 Released as Postgres95 in 1995 Re-released as PostgreSQL 6.0 in 1996 Establishment of the PostgreSQL Global Development Team Key Features of PostgreSQL PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC). PostgreSQL supports a large part of the SQL standard and offers many modern features including the following − Complex SQL queries SQL Sub-selects Foreign keys Trigger Views Transactions Multiversion concurrency control (MVCC) Streaming Replication (as of 9.0) Hot Standby (as of 9.0) You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new − Data types Functions Operators Aggregate functions Index methods Procedural Languages Support PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.'); INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup. I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try. Install Directly or not? On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do. In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM. Installing Docker Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled. After logging back in I then got the following message about hardware-assisted virtualization not being enabled. After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen. Open a command prompt and run the following command. docker run hello-world You should output that starts with the following if your installation is working. Hello from Docker! This message shows that your installation appears to be working correctly. What about Postgres? Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host. docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container. docker create -v /var/lib/postgresql/data --name PostgresData alpine The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects. Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command. docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container. If you run the docker ps -a command it will show you all your containers. As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running. '); INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it. The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.) Step 1: Install PostgreSQL Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution): Open a terminal window. Issue the command sudo apt-get install postgresql. Type the sudo password necessary to give you admin rights and hit Enter. Allow apt to pick up any necessary dependencies. Once the installation is complete, it''s time to set this baby up. Step 2: Change the default user password Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure. Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so: Open a terminal window. Issue the command sudo passwd postgres. Type (and confirm) that password to be used for this user. The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like: postgres=# All other users have to gain access to the prompt like so: psql DB_NAME where DB_NAME is the name of an existing database. '); INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL. This seems to be the journey: 1. Lack of migrations is awesome! We can iterate so quickly for MVP 2. Get users 3. Add features, still enjoying the speed of iteration 4. Get more users 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts) 6. Realise you desperately need joins, transactions and other SQL features 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back. I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with? My thought is definitely yes. brandur on Aug 29, 2017 [-] > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with? I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project. The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this. The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about. Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week. martinald on Aug 29, 2017 [-] I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away. I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done. Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly. I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly. I think I assumed the "crowd" had done the tech due diligence on this stuff and it definitely wasn''t the case. '); INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan. The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto. Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message. As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial. A large amount of "engineering" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision. Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.'); INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts. I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.” I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices. Red Bull could sponsor it. I’d buy a T-shirt. kbenson 8 months ago [-] That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs. You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something. If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource. Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already... 1: https://github.com/gothinkster/realworld 2: https://www.techempower.com/benchmarks/ etxm 8 months ago [-] Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :) It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time. It would be cool to see things like disaster recovery and chaos proofing as well. '); INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community. Leveraging the community There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed. On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release. For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system. Always quality focused No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert. Learn more'); INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query. Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack. Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads. maxxxxx 8 months ago [-] Agreed. Switching to another system is expensive and the benefit is pretty questionable. emsy 8 months ago [-] Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once. TremendousJudge 8 months ago [-] expand, please? maxxxxx 8 months ago [-] I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure. In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other. gopalv 8 months ago [-] > Depending on your data some databases may be better than others and that should be easy to measure. And the performance difference could be an accidental feature of the design and completely unintentional. Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated. Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs). When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural. And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two. Though how it came about isn''t really intentional. '); """ )] <|reserved_special_token_1|> # Generated by Django 2.1.2 on 2018-10-25 09:36 import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone import uuid class Migration(migrations.Migration): dependencies = [ ('grafit', '0002_article'), ] operations = [ migrations.RunSQL(""" INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License. 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6] On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size. Indexing Fields in a MongoDB document can be indexed with primary and secondary indices. Replication MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default. Load balancing[10] MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution. MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. '); INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to "non SQL" or "non relational")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the "NoSQL" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called "Not only SQL" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8] Motivations for this approach include: simplicity of design, simpler "horizontal" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as "more flexible" than relational database tables.[9] Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.) Instead, most NoSQL databases offer a concept of "eventual consistency" in which database changes are propagated to all nodes "eventually" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases "do not allow referential integrity constraints to span databases."[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. '); INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16] Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because "SEQUEL" was a trademark of the UK-based Hawker Siddeley aircraft company.[17] In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard "Database Language SQL" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] '); INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. "Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system." "We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services." "We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs."'); INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link. Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7). Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action. According to an advisory released by phpMyAdmin, "by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc." phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms. Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases. Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link. "A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice." Barot explains in a blog post. However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table. "If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name," Barot says. "This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc." Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible. '); INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed. When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design. "Who care about database design? What about mockups? What about workflows?" Let me tell you about "Bob''s Luxury Goods." I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a "one-to-many" relationship between customers and addresses. That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as "in use" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES. We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was "cheaper" to remove the restriction on "flagged" addresses and allow a duplicate address to be used. Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the "typo", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it. That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this. Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer. "Curtis," they said, "just enter a dummy customer called ''Occupant'' and attach all addresses to that." Except you couldn''t enter a customer without an order. Except you couldn''t enter an order without at least one item on it. Except you couldn''t enter an item unless it was listed in inventory. Except that reserved the "inventory" item and made it unavailable. Except, except, except ... It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a "paid" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember. Then, and only then, could I write the code to provide "generic" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them. If they had simply had a proper database design up front, they could have reused their existing system with little trouble. That''s what bad database design costs you and why I usually start with that before writing my software. Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.'); INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub. Learn more about actions As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you'); INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', ' The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves. Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update. Affected products GitHub Desktop GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app. Atom Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch. Ensure you’re on the latest Atom release by completing any of the following: Windows: From the toolbar, click Help -> Check for Updates MacOS: From the menu bar, click Atom -> Check for Update Linux: Update manually by downloading the latest release from atom.io Git on the command line and other clients In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other. Additional notes Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9. Details of the vulnerability This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself. The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix. The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability). The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands. We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added). Please update your copy of Git soon, and happy cloning! '); INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet. The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux. The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet. So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others. To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs. What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.'); INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres. rbanffy on Aug 18, 2012 [-] I think this would be a mistake. This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle. All that is lost is the MySQL name and brand. PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now. Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this. Udo on Aug 18, 2012 [-] I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project! MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing. Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem. Hence, sensational and petulant "RIP $PRODUCTNAME" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own. The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. '); INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts? Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.'); INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code. Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files. '); INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness. This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming. What is PostgreSQL? PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge. A Brief History of PostgreSQL PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates. 1977-1985 − A project called INGRES was developed. Proof-of-concept for relational databases Established the company Ingres in 1980 Bought by Computer Associates in 1994 1986-1994 − POSTGRES Development of the concepts in INGRES with a focus on object orientation and the query language - Quel The code base of INGRES was not used as a basis for POSTGRES Commercialized as Illustra (bought by Informix, bought by IBM) 1994-1995 − Postgres95 Support for SQL was added in 1994 Released as Postgres95 in 1995 Re-released as PostgreSQL 6.0 in 1996 Establishment of the PostgreSQL Global Development Team Key Features of PostgreSQL PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC). PostgreSQL supports a large part of the SQL standard and offers many modern features including the following − Complex SQL queries SQL Sub-selects Foreign keys Trigger Views Transactions Multiversion concurrency control (MVCC) Streaming Replication (as of 9.0) Hot Standby (as of 9.0) You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new − Data types Functions Operators Aggregate functions Index methods Procedural Languages Support PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.'); INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup. I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try. Install Directly or not? On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do. In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM. Installing Docker Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled. After logging back in I then got the following message about hardware-assisted virtualization not being enabled. After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen. Open a command prompt and run the following command. docker run hello-world You should output that starts with the following if your installation is working. Hello from Docker! This message shows that your installation appears to be working correctly. What about Postgres? Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host. docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container. docker create -v /var/lib/postgresql/data --name PostgresData alpine The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects. Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command. docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container. If you run the docker ps -a command it will show you all your containers. As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running. '); INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it. The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.) Step 1: Install PostgreSQL Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution): Open a terminal window. Issue the command sudo apt-get install postgresql. Type the sudo password necessary to give you admin rights and hit Enter. Allow apt to pick up any necessary dependencies. Once the installation is complete, it''s time to set this baby up. Step 2: Change the default user password Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure. Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so: Open a terminal window. Issue the command sudo passwd postgres. Type (and confirm) that password to be used for this user. The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like: postgres=# All other users have to gain access to the prompt like so: psql DB_NAME where DB_NAME is the name of an existing database. '); INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL. This seems to be the journey: 1. Lack of migrations is awesome! We can iterate so quickly for MVP 2. Get users 3. Add features, still enjoying the speed of iteration 4. Get more users 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts) 6. Realise you desperately need joins, transactions and other SQL features 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back. I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with? My thought is definitely yes. brandur on Aug 29, 2017 [-] > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with? I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project. The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this. The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about. Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week. martinald on Aug 29, 2017 [-] I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away. I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done. Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly. I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly. I think I assumed the "crowd" had done the tech due diligence on this stuff and it definitely wasn''t the case. '); INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan. The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto. Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message. As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial. A large amount of "engineering" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision. Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.'); INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts. I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.” I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices. Red Bull could sponsor it. I’d buy a T-shirt. kbenson 8 months ago [-] That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs. You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something. If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource. Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already... 1: https://github.com/gothinkster/realworld 2: https://www.techempower.com/benchmarks/ etxm 8 months ago [-] Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :) It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time. It would be cool to see things like disaster recovery and chaos proofing as well. '); INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community. Leveraging the community There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed. On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release. For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system. Always quality focused No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert. Learn more'); INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query. Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack. Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads. maxxxxx 8 months ago [-] Agreed. Switching to another system is expensive and the benefit is pretty questionable. emsy 8 months ago [-] Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once. TremendousJudge 8 months ago [-] expand, please? maxxxxx 8 months ago [-] I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure. In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other. gopalv 8 months ago [-] > Depending on your data some databases may be better than others and that should be easy to measure. And the performance difference could be an accidental feature of the design and completely unintentional. Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated. Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs). When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural. And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two. Though how it came about isn''t really intentional. '); """), ]
flexible
{ "blob_id": "8b0eed6d1f24b5dd30726ce08c97354a5d5ab69b", "index": 7597, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('grafit', '0002_article')]\n operations = [migrations.RunSQL(\n \"\"\"\n INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License.\n 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6]\n\n On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries\n\n MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size.\n Indexing\n\n Fields in a MongoDB document can be indexed with primary and secondary indices.\n Replication\n\n MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default.\n Load balancing[10]\n\n MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution.\n\n MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. ');\n INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to \"non SQL\" or \"non relational\")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the \"NoSQL\" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called \"Not only SQL\" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8]\n\n Motivations for this approach include: simplicity of design, simpler \"horizontal\" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as \"more flexible\" than relational database tables.[9]\n\n Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.)\n\n Instead, most NoSQL databases offer a concept of \"eventual consistency\" in which database changes are propagated to all nodes \"eventually\" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases \"do not allow referential integrity constraints to span databases.\"[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. ');\n INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16]\n\n Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because \"SEQUEL\" was a trademark of the UK-based Hawker Siddeley aircraft company.[17]\n\n In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard \"Database Language SQL\" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] ');\n INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. \"Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system.\"\n \n\n \"We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services.\"\n \n\n \"We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs.\"');\n INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link.\n\n Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7).\n\n Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action.\n\n According to an advisory released by phpMyAdmin, \"by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc.\"\n\n phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms.\n\n Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases.\n Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link.\n\n \"A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice.\" Barot explains in a blog post.\n\n However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table.\n\n \"If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name,\" Barot says. \"This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc.\"\n\n Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed.\n\n When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design.\n\n \"Who care about database design? What about mockups? What about workflows?\"\n\n Let me tell you about \"Bob''s Luxury Goods.\" I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a \"one-to-many\" relationship between customers and addresses.\n\n That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as \"in use\" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES.\n\n We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was \"cheaper\" to remove the restriction on \"flagged\" addresses and allow a duplicate address to be used.\n\n Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the \"typo\", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it.\n\n That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this.\n\n Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer.\n\n \"Curtis,\" they said, \"just enter a dummy customer called ''Occupant'' and attach all addresses to that.\"\n\n Except you couldn''t enter a customer without an order.\n\n Except you couldn''t enter an order without at least one item on it.\n\n Except you couldn''t enter an item unless it was listed in inventory.\n\n Except that reserved the \"inventory\" item and made it unavailable.\n\n Except, except, except ...\n\n It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a \"paid\" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember.\n\n Then, and only then, could I write the code to provide \"generic\" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them.\n\n If they had simply had a proper database design up front, they could have reused their existing system with little trouble.\n\n That''s what bad database design costs you and why I usually start with that before writing my software.\n\n Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.');\n INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub.\n\n Learn more about actions\n\n As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you');\n INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', '\n\n The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves.\n\n Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update.\n Affected products\n GitHub Desktop\n\n GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app.\n Atom\n\n Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch.\n\n Ensure you’re on the latest Atom release by completing any of the following:\n\n Windows: From the toolbar, click Help -> Check for Updates\n MacOS: From the menu bar, click Atom -> Check for Update\n Linux: Update manually by downloading the latest release from atom.io\n\n Git on the command line and other clients\n\n In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other.\n Additional notes\n\n Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9.\n Details of the vulnerability\n\n This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself.\n\n The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix.\n\n The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability).\n\n The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands.\n\n We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added).\n\n Please update your copy of Git soon, and happy cloning!\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet.\n\n The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux.\n\n The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet.\n\n So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others.\n\n To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs.\n\n What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.');\n INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres.\n\n \n \n rbanffy on Aug 18, 2012 [-]\n\n I think this would be a mistake.\n\n This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle.\n\n All that is lost is the MySQL name and brand.\n\n PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now.\n\n Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this.\n\n \n \n Udo on Aug 18, 2012 [-]\n\n I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project!\n\n MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing.\n\n Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem.\n\n Hence, sensational and petulant \"RIP $PRODUCTNAME\" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own.\n\n The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. ');\n INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts?\n\n Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.');\n INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code.\n\n Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness.\n\n This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming.\n What is PostgreSQL?\n\n PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge.\n A Brief History of PostgreSQL\n\n PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates.\n\n 1977-1985 − A project called INGRES was developed.\n\n Proof-of-concept for relational databases\n\n Established the company Ingres in 1980\n\n Bought by Computer Associates in 1994\n\n 1986-1994 − POSTGRES\n\n Development of the concepts in INGRES with a focus on object orientation and the query language - Quel\n\n The code base of INGRES was not used as a basis for POSTGRES\n\n Commercialized as Illustra (bought by Informix, bought by IBM)\n\n 1994-1995 − Postgres95\n\n Support for SQL was added in 1994\n\n Released as Postgres95 in 1995\n\n Re-released as PostgreSQL 6.0 in 1996\n\n Establishment of the PostgreSQL Global Development Team\n\n Key Features of PostgreSQL\n\n PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC).\n\n PostgreSQL supports a large part of the SQL standard and offers many modern features including the following −\n\n Complex SQL queries\n SQL Sub-selects\n Foreign keys\n Trigger\n Views\n Transactions\n Multiversion concurrency control (MVCC)\n Streaming Replication (as of 9.0)\n Hot Standby (as of 9.0)\n\n You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new −\n\n Data types\n Functions\n Operators\n Aggregate functions\n Index methods\n\n Procedural Languages Support\n\n PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.');\n INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup.\n\n I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try.\n Install Directly or not?\n\n On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do.\n\n In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM.\n Installing Docker\n\n Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled.\n\n After logging back in I then got the following message about hardware-assisted virtualization not being enabled.\n\n After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen.\n\n Open a command prompt and run the following command.\n\n docker run hello-world\n\n You should output that starts with the following if your installation is working.\n\n Hello from Docker!\n This message shows that your installation appears to be working correctly.\n\n What about Postgres?\n\n Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres\n\n The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container.\n\n docker create -v /var/lib/postgresql/data --name PostgresData alpine\n\n The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects.\n\n Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres\n\n The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container.\n\n If you run the docker ps -a command it will show you all your containers.\n\n As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it.\n\n The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.)\n Step 1: Install PostgreSQL\n\n Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution):\n\n Open a terminal window.\n Issue the command sudo apt-get install postgresql.\n Type the sudo password necessary to give you admin rights and hit Enter.\n Allow apt to pick up any necessary dependencies.\n\n Once the installation is complete, it''s time to set this baby up.\n Step 2: Change the default user password\n\n Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure.\n\n Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so:\n\n Open a terminal window.\n Issue the command sudo passwd postgres.\n Type (and confirm) that password to be used for this user.\n\n The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like:\n\n postgres=#\n\n All other users have to gain access to the prompt like so:\n\n psql DB_NAME\n\n where DB_NAME is the name of an existing database.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL.\n\n This seems to be the journey:\n\n 1. Lack of migrations is awesome! We can iterate so quickly for MVP\n\n 2. Get users\n\n 3. Add features, still enjoying the speed of iteration\n\n 4. Get more users\n\n 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts)\n\n 6. Realise you desperately need joins, transactions and other SQL features\n\n 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back.\n\n I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n My thought is definitely yes.\n\n \n \n brandur on Aug 29, 2017 [-]\n\n > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project.\n\n The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this.\n\n The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about.\n\n Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week.\n\n \n \n martinald on Aug 29, 2017 [-]\n\n I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away.\n\n I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done.\n\n Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly.\n\n I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly.\n\n I think I assumed the \"crowd\" had done the tech due diligence on this stuff and it definitely wasn''t the case. ');\n INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan.\n\n The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto.\n\n Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message.\n\n As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial.\n\n A large amount of \"engineering\" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision.\n\n Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2\n\n 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.');\n INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts.\n\n I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.”\n\n I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices.\n\n Red Bull could sponsor it. I’d buy a T-shirt.\n\n \n \n kbenson 8 months ago [-]\n\n That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs.\n\n You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something.\n\n If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource.\n\n Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already...\n\n 1: https://github.com/gothinkster/realworld\n\n 2: https://www.techempower.com/benchmarks/\n\n \n \n etxm 8 months ago [-]\n\n Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :)\n\n It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time.\n\n It would be cool to see things like disaster recovery and chaos proofing as well. ');\n INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community.\n Leveraging the community\n\n There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed.\n\n On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release.\n\n For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system.\n Always quality focused\n\n No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert.\n Learn more');\n INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query.\n\n Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack.\n\n Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads.\n\n \n \n maxxxxx 8 months ago [-]\n\n Agreed. Switching to another system is expensive and the benefit is pretty questionable.\n\n \n \n emsy 8 months ago [-]\n\n Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once.\n\n \n \n TremendousJudge 8 months ago [-]\n\n expand, please?\n\n \n \n maxxxxx 8 months ago [-]\n\n I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure.\n\n In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other.\n\n \n \n gopalv 8 months ago [-]\n\n > Depending on your data some databases may be better than others and that should be easy to measure.\n\n And the performance difference could be an accidental feature of the design and completely unintentional.\n\n Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated.\n\n Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs).\n\n When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural.\n\n And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two.\n\n Though how it came about isn''t really intentional. ');\n \"\"\"\n )]\n", "step-4": "import django.contrib.auth.models\nimport django.contrib.auth.validators\nfrom django.db import migrations, models\nimport django.utils.timezone\nimport uuid\n\n\nclass Migration(migrations.Migration):\n dependencies = [('grafit', '0002_article')]\n operations = [migrations.RunSQL(\n \"\"\"\n INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License.\n 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6]\n\n On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries\n\n MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size.\n Indexing\n\n Fields in a MongoDB document can be indexed with primary and secondary indices.\n Replication\n\n MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default.\n Load balancing[10]\n\n MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution.\n\n MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. ');\n INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to \"non SQL\" or \"non relational\")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the \"NoSQL\" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called \"Not only SQL\" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8]\n\n Motivations for this approach include: simplicity of design, simpler \"horizontal\" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as \"more flexible\" than relational database tables.[9]\n\n Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.)\n\n Instead, most NoSQL databases offer a concept of \"eventual consistency\" in which database changes are propagated to all nodes \"eventually\" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases \"do not allow referential integrity constraints to span databases.\"[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. ');\n INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16]\n\n Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because \"SEQUEL\" was a trademark of the UK-based Hawker Siddeley aircraft company.[17]\n\n In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard \"Database Language SQL\" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] ');\n INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. \"Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system.\"\n \n\n \"We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services.\"\n \n\n \"We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs.\"');\n INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link.\n\n Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7).\n\n Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action.\n\n According to an advisory released by phpMyAdmin, \"by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc.\"\n\n phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms.\n\n Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases.\n Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link.\n\n \"A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice.\" Barot explains in a blog post.\n\n However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table.\n\n \"If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name,\" Barot says. \"This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc.\"\n\n Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed.\n\n When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design.\n\n \"Who care about database design? What about mockups? What about workflows?\"\n\n Let me tell you about \"Bob''s Luxury Goods.\" I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a \"one-to-many\" relationship between customers and addresses.\n\n That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as \"in use\" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES.\n\n We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was \"cheaper\" to remove the restriction on \"flagged\" addresses and allow a duplicate address to be used.\n\n Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the \"typo\", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it.\n\n That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this.\n\n Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer.\n\n \"Curtis,\" they said, \"just enter a dummy customer called ''Occupant'' and attach all addresses to that.\"\n\n Except you couldn''t enter a customer without an order.\n\n Except you couldn''t enter an order without at least one item on it.\n\n Except you couldn''t enter an item unless it was listed in inventory.\n\n Except that reserved the \"inventory\" item and made it unavailable.\n\n Except, except, except ...\n\n It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a \"paid\" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember.\n\n Then, and only then, could I write the code to provide \"generic\" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them.\n\n If they had simply had a proper database design up front, they could have reused their existing system with little trouble.\n\n That''s what bad database design costs you and why I usually start with that before writing my software.\n\n Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.');\n INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub.\n\n Learn more about actions\n\n As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you');\n INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', '\n\n The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves.\n\n Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update.\n Affected products\n GitHub Desktop\n\n GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app.\n Atom\n\n Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch.\n\n Ensure you’re on the latest Atom release by completing any of the following:\n\n Windows: From the toolbar, click Help -> Check for Updates\n MacOS: From the menu bar, click Atom -> Check for Update\n Linux: Update manually by downloading the latest release from atom.io\n\n Git on the command line and other clients\n\n In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other.\n Additional notes\n\n Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9.\n Details of the vulnerability\n\n This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself.\n\n The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix.\n\n The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability).\n\n The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands.\n\n We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added).\n\n Please update your copy of Git soon, and happy cloning!\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet.\n\n The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux.\n\n The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet.\n\n So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others.\n\n To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs.\n\n What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.');\n INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres.\n\n \n \n rbanffy on Aug 18, 2012 [-]\n\n I think this would be a mistake.\n\n This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle.\n\n All that is lost is the MySQL name and brand.\n\n PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now.\n\n Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this.\n\n \n \n Udo on Aug 18, 2012 [-]\n\n I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project!\n\n MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing.\n\n Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem.\n\n Hence, sensational and petulant \"RIP $PRODUCTNAME\" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own.\n\n The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. ');\n INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts?\n\n Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.');\n INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code.\n\n Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness.\n\n This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming.\n What is PostgreSQL?\n\n PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge.\n A Brief History of PostgreSQL\n\n PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates.\n\n 1977-1985 − A project called INGRES was developed.\n\n Proof-of-concept for relational databases\n\n Established the company Ingres in 1980\n\n Bought by Computer Associates in 1994\n\n 1986-1994 − POSTGRES\n\n Development of the concepts in INGRES with a focus on object orientation and the query language - Quel\n\n The code base of INGRES was not used as a basis for POSTGRES\n\n Commercialized as Illustra (bought by Informix, bought by IBM)\n\n 1994-1995 − Postgres95\n\n Support for SQL was added in 1994\n\n Released as Postgres95 in 1995\n\n Re-released as PostgreSQL 6.0 in 1996\n\n Establishment of the PostgreSQL Global Development Team\n\n Key Features of PostgreSQL\n\n PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC).\n\n PostgreSQL supports a large part of the SQL standard and offers many modern features including the following −\n\n Complex SQL queries\n SQL Sub-selects\n Foreign keys\n Trigger\n Views\n Transactions\n Multiversion concurrency control (MVCC)\n Streaming Replication (as of 9.0)\n Hot Standby (as of 9.0)\n\n You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new −\n\n Data types\n Functions\n Operators\n Aggregate functions\n Index methods\n\n Procedural Languages Support\n\n PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.');\n INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup.\n\n I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try.\n Install Directly or not?\n\n On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do.\n\n In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM.\n Installing Docker\n\n Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled.\n\n After logging back in I then got the following message about hardware-assisted virtualization not being enabled.\n\n After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen.\n\n Open a command prompt and run the following command.\n\n docker run hello-world\n\n You should output that starts with the following if your installation is working.\n\n Hello from Docker!\n This message shows that your installation appears to be working correctly.\n\n What about Postgres?\n\n Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres\n\n The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container.\n\n docker create -v /var/lib/postgresql/data --name PostgresData alpine\n\n The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects.\n\n Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres\n\n The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container.\n\n If you run the docker ps -a command it will show you all your containers.\n\n As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it.\n\n The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.)\n Step 1: Install PostgreSQL\n\n Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution):\n\n Open a terminal window.\n Issue the command sudo apt-get install postgresql.\n Type the sudo password necessary to give you admin rights and hit Enter.\n Allow apt to pick up any necessary dependencies.\n\n Once the installation is complete, it''s time to set this baby up.\n Step 2: Change the default user password\n\n Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure.\n\n Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so:\n\n Open a terminal window.\n Issue the command sudo passwd postgres.\n Type (and confirm) that password to be used for this user.\n\n The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like:\n\n postgres=#\n\n All other users have to gain access to the prompt like so:\n\n psql DB_NAME\n\n where DB_NAME is the name of an existing database.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL.\n\n This seems to be the journey:\n\n 1. Lack of migrations is awesome! We can iterate so quickly for MVP\n\n 2. Get users\n\n 3. Add features, still enjoying the speed of iteration\n\n 4. Get more users\n\n 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts)\n\n 6. Realise you desperately need joins, transactions and other SQL features\n\n 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back.\n\n I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n My thought is definitely yes.\n\n \n \n brandur on Aug 29, 2017 [-]\n\n > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project.\n\n The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this.\n\n The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about.\n\n Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week.\n\n \n \n martinald on Aug 29, 2017 [-]\n\n I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away.\n\n I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done.\n\n Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly.\n\n I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly.\n\n I think I assumed the \"crowd\" had done the tech due diligence on this stuff and it definitely wasn''t the case. ');\n INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan.\n\n The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto.\n\n Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message.\n\n As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial.\n\n A large amount of \"engineering\" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision.\n\n Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2\n\n 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.');\n INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts.\n\n I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.”\n\n I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices.\n\n Red Bull could sponsor it. I’d buy a T-shirt.\n\n \n \n kbenson 8 months ago [-]\n\n That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs.\n\n You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something.\n\n If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource.\n\n Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already...\n\n 1: https://github.com/gothinkster/realworld\n\n 2: https://www.techempower.com/benchmarks/\n\n \n \n etxm 8 months ago [-]\n\n Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :)\n\n It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time.\n\n It would be cool to see things like disaster recovery and chaos proofing as well. ');\n INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community.\n Leveraging the community\n\n There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed.\n\n On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release.\n\n For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system.\n Always quality focused\n\n No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert.\n Learn more');\n INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query.\n\n Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack.\n\n Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads.\n\n \n \n maxxxxx 8 months ago [-]\n\n Agreed. Switching to another system is expensive and the benefit is pretty questionable.\n\n \n \n emsy 8 months ago [-]\n\n Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once.\n\n \n \n TremendousJudge 8 months ago [-]\n\n expand, please?\n\n \n \n maxxxxx 8 months ago [-]\n\n I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure.\n\n In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other.\n\n \n \n gopalv 8 months ago [-]\n\n > Depending on your data some databases may be better than others and that should be easy to measure.\n\n And the performance difference could be an accidental feature of the design and completely unintentional.\n\n Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated.\n\n Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs).\n\n When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural.\n\n And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two.\n\n Though how it came about isn''t really intentional. ');\n \"\"\"\n )]\n", "step-5": "# Generated by Django 2.1.2 on 2018-10-25 09:36\n\nimport django.contrib.auth.models\nimport django.contrib.auth.validators\nfrom django.db import migrations, models\nimport django.utils.timezone\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('grafit', '0002_article'),\n ]\n\n operations = [\n migrations.RunSQL(\"\"\"\n INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License.\n 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6]\n\n On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries\n\n MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size.\n Indexing\n\n Fields in a MongoDB document can be indexed with primary and secondary indices.\n Replication\n\n MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default.\n Load balancing[10]\n\n MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution.\n\n MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. ');\n INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to \"non SQL\" or \"non relational\")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the \"NoSQL\" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called \"Not only SQL\" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8]\n\n Motivations for this approach include: simplicity of design, simpler \"horizontal\" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as \"more flexible\" than relational database tables.[9]\n\n Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.)\n\n Instead, most NoSQL databases offer a concept of \"eventual consistency\" in which database changes are propagated to all nodes \"eventually\" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases \"do not allow referential integrity constraints to span databases.\"[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. ');\n INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16]\n\n Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because \"SEQUEL\" was a trademark of the UK-based Hawker Siddeley aircraft company.[17]\n\n In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard \"Database Language SQL\" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] ');\n INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. \"Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system.\"\n \n\n \"We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services.\"\n \n\n \"We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs.\"');\n INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link.\n\n Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7).\n\n Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action.\n\n According to an advisory released by phpMyAdmin, \"by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc.\"\n\n phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms.\n\n Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases.\n Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link.\n\n \"A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice.\" Barot explains in a blog post.\n\n However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table.\n\n \"If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name,\" Barot says. \"This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc.\"\n\n Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed.\n\n When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design.\n\n \"Who care about database design? What about mockups? What about workflows?\"\n\n Let me tell you about \"Bob''s Luxury Goods.\" I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a \"one-to-many\" relationship between customers and addresses.\n\n That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as \"in use\" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES.\n\n We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was \"cheaper\" to remove the restriction on \"flagged\" addresses and allow a duplicate address to be used.\n\n Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the \"typo\", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it.\n\n That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this.\n\n Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer.\n\n \"Curtis,\" they said, \"just enter a dummy customer called ''Occupant'' and attach all addresses to that.\"\n\n Except you couldn''t enter a customer without an order.\n\n Except you couldn''t enter an order without at least one item on it.\n\n Except you couldn''t enter an item unless it was listed in inventory.\n\n Except that reserved the \"inventory\" item and made it unavailable.\n\n Except, except, except ...\n\n It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a \"paid\" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember.\n\n Then, and only then, could I write the code to provide \"generic\" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them.\n\n If they had simply had a proper database design up front, they could have reused their existing system with little trouble.\n\n That''s what bad database design costs you and why I usually start with that before writing my software.\n\n Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.');\n INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub.\n\n Learn more about actions\n\n As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you');\n INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', '\n\n The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves.\n\n Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update.\n Affected products\n GitHub Desktop\n\n GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app.\n Atom\n\n Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch.\n\n Ensure you’re on the latest Atom release by completing any of the following:\n\n Windows: From the toolbar, click Help -> Check for Updates\n MacOS: From the menu bar, click Atom -> Check for Update\n Linux: Update manually by downloading the latest release from atom.io\n\n Git on the command line and other clients\n\n In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other.\n Additional notes\n\n Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9.\n Details of the vulnerability\n\n This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself.\n\n The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix.\n\n The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability).\n\n The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands.\n\n We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added).\n\n Please update your copy of Git soon, and happy cloning!\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet.\n\n The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux.\n\n The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet.\n\n So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others.\n\n To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs.\n\n What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.');\n INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres.\n\n \n \n rbanffy on Aug 18, 2012 [-]\n\n I think this would be a mistake.\n\n This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle.\n\n All that is lost is the MySQL name and brand.\n\n PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now.\n\n Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this.\n\n \n \n Udo on Aug 18, 2012 [-]\n\n I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project!\n\n MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing.\n\n Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem.\n\n Hence, sensational and petulant \"RIP $PRODUCTNAME\" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own.\n\n The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. ');\n INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts?\n\n Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.');\n INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code.\n\n Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness.\n\n This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming.\n What is PostgreSQL?\n\n PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge.\n A Brief History of PostgreSQL\n\n PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates.\n\n 1977-1985 − A project called INGRES was developed.\n\n Proof-of-concept for relational databases\n\n Established the company Ingres in 1980\n\n Bought by Computer Associates in 1994\n\n 1986-1994 − POSTGRES\n\n Development of the concepts in INGRES with a focus on object orientation and the query language - Quel\n\n The code base of INGRES was not used as a basis for POSTGRES\n\n Commercialized as Illustra (bought by Informix, bought by IBM)\n\n 1994-1995 − Postgres95\n\n Support for SQL was added in 1994\n\n Released as Postgres95 in 1995\n\n Re-released as PostgreSQL 6.0 in 1996\n\n Establishment of the PostgreSQL Global Development Team\n\n Key Features of PostgreSQL\n\n PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC).\n\n PostgreSQL supports a large part of the SQL standard and offers many modern features including the following −\n\n Complex SQL queries\n SQL Sub-selects\n Foreign keys\n Trigger\n Views\n Transactions\n Multiversion concurrency control (MVCC)\n Streaming Replication (as of 9.0)\n Hot Standby (as of 9.0)\n\n You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new −\n\n Data types\n Functions\n Operators\n Aggregate functions\n Index methods\n\n Procedural Languages Support\n\n PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.');\n INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup.\n\n I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try.\n Install Directly or not?\n\n On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do.\n\n In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM.\n Installing Docker\n\n Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled.\n\n After logging back in I then got the following message about hardware-assisted virtualization not being enabled.\n\n After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen.\n\n Open a command prompt and run the following command.\n\n docker run hello-world\n\n You should output that starts with the following if your installation is working.\n\n Hello from Docker!\n This message shows that your installation appears to be working correctly.\n\n What about Postgres?\n\n Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres\n\n The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container.\n\n docker create -v /var/lib/postgresql/data --name PostgresData alpine\n\n The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects.\n\n Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres\n\n The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container.\n\n If you run the docker ps -a command it will show you all your containers.\n\n As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it.\n\n The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.)\n Step 1: Install PostgreSQL\n\n Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution):\n\n Open a terminal window.\n Issue the command sudo apt-get install postgresql.\n Type the sudo password necessary to give you admin rights and hit Enter.\n Allow apt to pick up any necessary dependencies.\n\n Once the installation is complete, it''s time to set this baby up.\n Step 2: Change the default user password\n\n Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure.\n\n Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so:\n\n Open a terminal window.\n Issue the command sudo passwd postgres.\n Type (and confirm) that password to be used for this user.\n\n The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like:\n\n postgres=#\n\n All other users have to gain access to the prompt like so:\n\n psql DB_NAME\n\n where DB_NAME is the name of an existing database.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL.\n\n This seems to be the journey:\n\n 1. Lack of migrations is awesome! We can iterate so quickly for MVP\n\n 2. Get users\n\n 3. Add features, still enjoying the speed of iteration\n\n 4. Get more users\n\n 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts)\n\n 6. Realise you desperately need joins, transactions and other SQL features\n\n 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back.\n\n I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n My thought is definitely yes.\n\n \n \n brandur on Aug 29, 2017 [-]\n\n > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project.\n\n The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this.\n\n The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about.\n\n Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week.\n\n \n \n martinald on Aug 29, 2017 [-]\n\n I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away.\n\n I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done.\n\n Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly.\n\n I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly.\n\n I think I assumed the \"crowd\" had done the tech due diligence on this stuff and it definitely wasn''t the case. ');\n INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan.\n\n The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto.\n\n Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message.\n\n As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial.\n\n A large amount of \"engineering\" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision.\n\n Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2\n\n 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.');\n INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts.\n\n I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.”\n\n I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices.\n\n Red Bull could sponsor it. I’d buy a T-shirt.\n\n \n \n kbenson 8 months ago [-]\n\n That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs.\n\n You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something.\n\n If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource.\n\n Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already...\n\n 1: https://github.com/gothinkster/realworld\n\n 2: https://www.techempower.com/benchmarks/\n\n \n \n etxm 8 months ago [-]\n\n Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :)\n\n It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time.\n\n It would be cool to see things like disaster recovery and chaos proofing as well. ');\n INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community.\n Leveraging the community\n\n There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed.\n\n On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release.\n\n For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system.\n Always quality focused\n\n No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert.\n Learn more');\n INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query.\n\n Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack.\n\n Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads.\n\n \n \n maxxxxx 8 months ago [-]\n\n Agreed. Switching to another system is expensive and the benefit is pretty questionable.\n\n \n \n emsy 8 months ago [-]\n\n Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once.\n\n \n \n TremendousJudge 8 months ago [-]\n\n expand, please?\n\n \n \n maxxxxx 8 months ago [-]\n\n I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure.\n\n In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other.\n\n \n \n gopalv 8 months ago [-]\n\n > Depending on your data some databases may be better than others and that should be easy to measure.\n\n And the performance difference could be an accidental feature of the design and completely unintentional.\n\n Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated.\n\n Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs).\n\n When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural.\n\n And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two.\n\n Though how it came about isn''t really intentional. ');\n \"\"\"),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def main(): if len(sys.argv) != 3: sys.stderr.write('USAGE: %s input output\n' % sys.argv[0]) sys.stderr.flush() sys.exit(0) with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp: process(inpt, outp) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def process(inpt, outp): def traverse(node): for child in node.childNodes: if child.nodeType != minidom.Node.ELEMENT_NODE: continue elif child.tagName in ('animate', 'animateTransform'): node.removeChild(child) elif child.tagName in ('style', 'script'): if child.getAttribute('key') == 'animation': node.removeChild(child) else: traverse(child) node.normalize() if len(node.childNodes) == 0: return for child in (node.childNodes[0], node.childNodes[-1]): if child.nodeType != minidom.Node.TEXT_NODE: continue if not child.data.isspace() or child.data.count('\n') <= 1: continue if len(node.childNodes) == 1: node.removeChild(child) return child.data = re.sub('\\n.*\\n', '\\n', child.data) document = minidom.parse(inpt) traverse(document.documentElement) outp.write('<?xml version="1.0" encoding="utf-8"?>\n') document.documentElement.writexml(outp) outp.write('\n') def main(): if len(sys.argv) != 3: sys.stderr.write('USAGE: %s input output\n' % sys.argv[0]) sys.stderr.flush() sys.exit(0) with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp: process(inpt, outp) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def process(inpt, outp): def traverse(node): for child in node.childNodes: if child.nodeType != minidom.Node.ELEMENT_NODE: continue elif child.tagName in ('animate', 'animateTransform'): node.removeChild(child) elif child.tagName in ('style', 'script'): if child.getAttribute('key') == 'animation': node.removeChild(child) else: traverse(child) node.normalize() if len(node.childNodes) == 0: return for child in (node.childNodes[0], node.childNodes[-1]): if child.nodeType != minidom.Node.TEXT_NODE: continue if not child.data.isspace() or child.data.count('\n') <= 1: continue if len(node.childNodes) == 1: node.removeChild(child) return child.data = re.sub('\\n.*\\n', '\\n', child.data) document = minidom.parse(inpt) traverse(document.documentElement) outp.write('<?xml version="1.0" encoding="utf-8"?>\n') document.documentElement.writexml(outp) outp.write('\n') def main(): if len(sys.argv) != 3: sys.stderr.write('USAGE: %s input output\n' % sys.argv[0]) sys.stderr.flush() sys.exit(0) with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp: process(inpt, outp) if __name__ == '__main__': main() <|reserved_special_token_1|> <|reserved_special_token_0|> import sys, os, re from xml.dom import minidom def process(inpt, outp): def traverse(node): for child in node.childNodes: if child.nodeType != minidom.Node.ELEMENT_NODE: continue elif child.tagName in ('animate', 'animateTransform'): node.removeChild(child) elif child.tagName in ('style', 'script'): if child.getAttribute('key') == 'animation': node.removeChild(child) else: traverse(child) node.normalize() if len(node.childNodes) == 0: return for child in (node.childNodes[0], node.childNodes[-1]): if child.nodeType != minidom.Node.TEXT_NODE: continue if not child.data.isspace() or child.data.count('\n') <= 1: continue if len(node.childNodes) == 1: node.removeChild(child) return child.data = re.sub('\\n.*\\n', '\\n', child.data) document = minidom.parse(inpt) traverse(document.documentElement) outp.write('<?xml version="1.0" encoding="utf-8"?>\n') document.documentElement.writexml(outp) outp.write('\n') def main(): if len(sys.argv) != 3: sys.stderr.write('USAGE: %s input output\n' % sys.argv[0]) sys.stderr.flush() sys.exit(0) with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp: process(inpt, outp) if __name__ == '__main__': main() <|reserved_special_token_1|> #!/usr/bin/env python3 # -*- coding: ascii -*- """ A script removing animations from SVG graphics. """ import sys, os, re # etree fails utterly at producing nice-looking XML from xml.dom import minidom def process(inpt, outp): def traverse(node): for child in node.childNodes: if child.nodeType != minidom.Node.ELEMENT_NODE: continue elif child.tagName in ('animate', 'animateTransform'): node.removeChild(child) elif child.tagName in ('style', 'script'): if child.getAttribute('key') == 'animation': node.removeChild(child) else: traverse(child) node.normalize() if len(node.childNodes) == 0: return for child in (node.childNodes[0], node.childNodes[-1]): if child.nodeType != minidom.Node.TEXT_NODE: continue if not child.data.isspace() or child.data.count('\n') <= 1: continue if len(node.childNodes) == 1: node.removeChild(child) return child.data = re.sub(r'\n.*\n', r'\n', child.data) document = minidom.parse(inpt) traverse(document.documentElement) outp.write('<?xml version="1.0" encoding="utf-8"?>\n') document.documentElement.writexml(outp) outp.write('\n') def main(): if len(sys.argv) != 3: sys.stderr.write('USAGE: %s input output\n' % sys.argv[0]) sys.stderr.flush() sys.exit(0) with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp: process(inpt, outp) if __name__ == '__main__': main()
flexible
{ "blob_id": "f819d1b1f2f6f3052247cda592007eac40aca37a", "index": 7927, "step-1": "<mask token>\n\n\ndef main():\n if len(sys.argv) != 3:\n sys.stderr.write('USAGE: %s input output\\n' % sys.argv[0])\n sys.stderr.flush()\n sys.exit(0)\n with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:\n process(inpt, outp)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef process(inpt, outp):\n\n def traverse(node):\n for child in node.childNodes:\n if child.nodeType != minidom.Node.ELEMENT_NODE:\n continue\n elif child.tagName in ('animate', 'animateTransform'):\n node.removeChild(child)\n elif child.tagName in ('style', 'script'):\n if child.getAttribute('key') == 'animation':\n node.removeChild(child)\n else:\n traverse(child)\n node.normalize()\n if len(node.childNodes) == 0:\n return\n for child in (node.childNodes[0], node.childNodes[-1]):\n if child.nodeType != minidom.Node.TEXT_NODE:\n continue\n if not child.data.isspace() or child.data.count('\\n') <= 1:\n continue\n if len(node.childNodes) == 1:\n node.removeChild(child)\n return\n child.data = re.sub('\\\\n.*\\\\n', '\\\\n', child.data)\n document = minidom.parse(inpt)\n traverse(document.documentElement)\n outp.write('<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n')\n document.documentElement.writexml(outp)\n outp.write('\\n')\n\n\ndef main():\n if len(sys.argv) != 3:\n sys.stderr.write('USAGE: %s input output\\n' % sys.argv[0])\n sys.stderr.flush()\n sys.exit(0)\n with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:\n process(inpt, outp)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef process(inpt, outp):\n\n def traverse(node):\n for child in node.childNodes:\n if child.nodeType != minidom.Node.ELEMENT_NODE:\n continue\n elif child.tagName in ('animate', 'animateTransform'):\n node.removeChild(child)\n elif child.tagName in ('style', 'script'):\n if child.getAttribute('key') == 'animation':\n node.removeChild(child)\n else:\n traverse(child)\n node.normalize()\n if len(node.childNodes) == 0:\n return\n for child in (node.childNodes[0], node.childNodes[-1]):\n if child.nodeType != minidom.Node.TEXT_NODE:\n continue\n if not child.data.isspace() or child.data.count('\\n') <= 1:\n continue\n if len(node.childNodes) == 1:\n node.removeChild(child)\n return\n child.data = re.sub('\\\\n.*\\\\n', '\\\\n', child.data)\n document = minidom.parse(inpt)\n traverse(document.documentElement)\n outp.write('<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n')\n document.documentElement.writexml(outp)\n outp.write('\\n')\n\n\ndef main():\n if len(sys.argv) != 3:\n sys.stderr.write('USAGE: %s input output\\n' % sys.argv[0])\n sys.stderr.flush()\n sys.exit(0)\n with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:\n process(inpt, outp)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "<mask token>\nimport sys, os, re\nfrom xml.dom import minidom\n\n\ndef process(inpt, outp):\n\n def traverse(node):\n for child in node.childNodes:\n if child.nodeType != minidom.Node.ELEMENT_NODE:\n continue\n elif child.tagName in ('animate', 'animateTransform'):\n node.removeChild(child)\n elif child.tagName in ('style', 'script'):\n if child.getAttribute('key') == 'animation':\n node.removeChild(child)\n else:\n traverse(child)\n node.normalize()\n if len(node.childNodes) == 0:\n return\n for child in (node.childNodes[0], node.childNodes[-1]):\n if child.nodeType != minidom.Node.TEXT_NODE:\n continue\n if not child.data.isspace() or child.data.count('\\n') <= 1:\n continue\n if len(node.childNodes) == 1:\n node.removeChild(child)\n return\n child.data = re.sub('\\\\n.*\\\\n', '\\\\n', child.data)\n document = minidom.parse(inpt)\n traverse(document.documentElement)\n outp.write('<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n')\n document.documentElement.writexml(outp)\n outp.write('\\n')\n\n\ndef main():\n if len(sys.argv) != 3:\n sys.stderr.write('USAGE: %s input output\\n' % sys.argv[0])\n sys.stderr.flush()\n sys.exit(0)\n with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:\n process(inpt, outp)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/usr/bin/env python3\n# -*- coding: ascii -*-\n\n\"\"\"\nA script removing animations from SVG graphics.\n\"\"\"\n\nimport sys, os, re\n\n# etree fails utterly at producing nice-looking XML\nfrom xml.dom import minidom\n\ndef process(inpt, outp):\n def traverse(node):\n for child in node.childNodes:\n if child.nodeType != minidom.Node.ELEMENT_NODE:\n continue\n elif child.tagName in ('animate', 'animateTransform'):\n node.removeChild(child)\n elif child.tagName in ('style', 'script'):\n if child.getAttribute('key') == 'animation':\n node.removeChild(child)\n else:\n traverse(child)\n node.normalize()\n if len(node.childNodes) == 0: return\n for child in (node.childNodes[0], node.childNodes[-1]):\n if child.nodeType != minidom.Node.TEXT_NODE:\n continue\n if not child.data.isspace() or child.data.count('\\n') <= 1:\n continue\n if len(node.childNodes) == 1:\n node.removeChild(child)\n return\n child.data = re.sub(r'\\n.*\\n', r'\\n', child.data)\n document = minidom.parse(inpt)\n traverse(document.documentElement)\n outp.write('<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n')\n document.documentElement.writexml(outp)\n outp.write('\\n')\n\ndef main():\n if len(sys.argv) != 3:\n sys.stderr.write('USAGE: %s input output\\n' % sys.argv[0])\n sys.stderr.flush()\n sys.exit(0)\n with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:\n process(inpt, outp)\n\nif __name__ == '__main__': main()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> class StardogGraphStore(GraphStore): <|reserved_special_token_0|> def check_whether_db_exists(self): logger.debug("Checking whether a triple store with db '{}' exists..." .format(self._node_ts_url)) url = self._get_ts_db_url() r = requests.get(url, auth=(self._ts_user, self._ts_pass)) status_code = r.status_code logger.debug('Status type of response whether db exists: {}.'. format(status_code)) return status_code == 200 def add_graph(self, raw_graph, graph_format, graph_hash): logger.debug("Adding graph to the triple store with URL '{}'...". format(self._get_sparql_endpoint_for_update())) ihash = GraphStore.IHASH_PREFIX.format(graph_hash) g = Graph() g.parse(data=raw_graph, format=graph_format) sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(), self._get_sparql_endpoint_for_update()) query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g. serialize(format='nt').decode()) sparql_query.setQuery(query) sparql_query.method = 'POST' sparql_query.setCredentials(self._ts_user, self._ts_pass) sparql_query.query() def check_if_graph_is_already_stored(self, graph_hash: str) ->bool: ihash = GraphStore.IHASH_PREFIX.format(graph_hash) logger.debug( "Checking whether graph '{}' is already in the triple store..." .format(ihash)) query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash) sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(), self._get_sparql_endpoint_for_update()) sparql_query.setQuery(query) sparql_query.method = 'POST' sparql_query.setReturnFormat(JSON) sparql_query.setCredentials(self._ts_user, self._ts_pass) result = sparql_query.query() return result.convert()['boolean'] <|reserved_special_token_1|> <|reserved_special_token_0|> class StardogGraphStore(GraphStore): def __init__(self, ts_db_name, ts_url, ts_user, ts_pass): super(StardogGraphStore, self).__init__(ts_db_name, ts_url) self._ts_user = ts_user self._ts_pass = ts_pass msg = ( "Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'." .format(ts_user, self._node_ts_url)) logger.info(msg) def check_whether_db_exists(self): logger.debug("Checking whether a triple store with db '{}' exists..." .format(self._node_ts_url)) url = self._get_ts_db_url() r = requests.get(url, auth=(self._ts_user, self._ts_pass)) status_code = r.status_code logger.debug('Status type of response whether db exists: {}.'. format(status_code)) return status_code == 200 def add_graph(self, raw_graph, graph_format, graph_hash): logger.debug("Adding graph to the triple store with URL '{}'...". format(self._get_sparql_endpoint_for_update())) ihash = GraphStore.IHASH_PREFIX.format(graph_hash) g = Graph() g.parse(data=raw_graph, format=graph_format) sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(), self._get_sparql_endpoint_for_update()) query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g. serialize(format='nt').decode()) sparql_query.setQuery(query) sparql_query.method = 'POST' sparql_query.setCredentials(self._ts_user, self._ts_pass) sparql_query.query() def check_if_graph_is_already_stored(self, graph_hash: str) ->bool: ihash = GraphStore.IHASH_PREFIX.format(graph_hash) logger.debug( "Checking whether graph '{}' is already in the triple store..." .format(ihash)) query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash) sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(), self._get_sparql_endpoint_for_update()) sparql_query.setQuery(query) sparql_query.method = 'POST' sparql_query.setReturnFormat(JSON) sparql_query.setCredentials(self._ts_user, self._ts_pass) result = sparql_query.query() return result.convert()['boolean'] <|reserved_special_token_1|> <|reserved_special_token_0|> logger = get_debug_logger() class StardogGraphStore(GraphStore): def __init__(self, ts_db_name, ts_url, ts_user, ts_pass): super(StardogGraphStore, self).__init__(ts_db_name, ts_url) self._ts_user = ts_user self._ts_pass = ts_pass msg = ( "Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'." .format(ts_user, self._node_ts_url)) logger.info(msg) def check_whether_db_exists(self): logger.debug("Checking whether a triple store with db '{}' exists..." .format(self._node_ts_url)) url = self._get_ts_db_url() r = requests.get(url, auth=(self._ts_user, self._ts_pass)) status_code = r.status_code logger.debug('Status type of response whether db exists: {}.'. format(status_code)) return status_code == 200 def add_graph(self, raw_graph, graph_format, graph_hash): logger.debug("Adding graph to the triple store with URL '{}'...". format(self._get_sparql_endpoint_for_update())) ihash = GraphStore.IHASH_PREFIX.format(graph_hash) g = Graph() g.parse(data=raw_graph, format=graph_format) sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(), self._get_sparql_endpoint_for_update()) query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g. serialize(format='nt').decode()) sparql_query.setQuery(query) sparql_query.method = 'POST' sparql_query.setCredentials(self._ts_user, self._ts_pass) sparql_query.query() def check_if_graph_is_already_stored(self, graph_hash: str) ->bool: ihash = GraphStore.IHASH_PREFIX.format(graph_hash) logger.debug( "Checking whether graph '{}' is already in the triple store..." .format(ihash)) query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash) sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(), self._get_sparql_endpoint_for_update()) sparql_query.setQuery(query) sparql_query.method = 'POST' sparql_query.setReturnFormat(JSON) sparql_query.setCredentials(self._ts_user, self._ts_pass) result = sparql_query.query() return result.convert()['boolean'] <|reserved_special_token_1|> import requests from SPARQLWrapper import SPARQLWrapper, JSON from rdflib import Graph from plenum.server.plugin.graphchain.graph_store import GraphStore from plenum.server.plugin.graphchain.logger import get_debug_logger logger = get_debug_logger() class StardogGraphStore(GraphStore): def __init__(self, ts_db_name, ts_url, ts_user, ts_pass): super(StardogGraphStore, self).__init__(ts_db_name, ts_url) self._ts_user = ts_user self._ts_pass = ts_pass msg = ( "Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'." .format(ts_user, self._node_ts_url)) logger.info(msg) def check_whether_db_exists(self): logger.debug("Checking whether a triple store with db '{}' exists..." .format(self._node_ts_url)) url = self._get_ts_db_url() r = requests.get(url, auth=(self._ts_user, self._ts_pass)) status_code = r.status_code logger.debug('Status type of response whether db exists: {}.'. format(status_code)) return status_code == 200 def add_graph(self, raw_graph, graph_format, graph_hash): logger.debug("Adding graph to the triple store with URL '{}'...". format(self._get_sparql_endpoint_for_update())) ihash = GraphStore.IHASH_PREFIX.format(graph_hash) g = Graph() g.parse(data=raw_graph, format=graph_format) sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(), self._get_sparql_endpoint_for_update()) query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g. serialize(format='nt').decode()) sparql_query.setQuery(query) sparql_query.method = 'POST' sparql_query.setCredentials(self._ts_user, self._ts_pass) sparql_query.query() def check_if_graph_is_already_stored(self, graph_hash: str) ->bool: ihash = GraphStore.IHASH_PREFIX.format(graph_hash) logger.debug( "Checking whether graph '{}' is already in the triple store..." .format(ihash)) query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash) sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(), self._get_sparql_endpoint_for_update()) sparql_query.setQuery(query) sparql_query.method = 'POST' sparql_query.setReturnFormat(JSON) sparql_query.setCredentials(self._ts_user, self._ts_pass) result = sparql_query.query() return result.convert()['boolean'] <|reserved_special_token_1|> import requests from SPARQLWrapper import SPARQLWrapper, JSON from rdflib import Graph from plenum.server.plugin.graphchain.graph_store import GraphStore from plenum.server.plugin.graphchain.logger import get_debug_logger logger = get_debug_logger() class StardogGraphStore(GraphStore): def __init__(self, ts_db_name, ts_url, ts_user, ts_pass): super(StardogGraphStore, self).__init__(ts_db_name, ts_url) self._ts_user = ts_user self._ts_pass = ts_pass msg = "Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'." \ .format(ts_user, self._node_ts_url) logger.info(msg) def check_whether_db_exists(self): logger.debug("Checking whether a triple store with db '{}' exists...".format(self._node_ts_url)) url = self._get_ts_db_url() r = requests.get(url, auth=(self._ts_user, self._ts_pass)) status_code = r.status_code logger.debug("Status type of response whether db exists: {}.".format(status_code)) return status_code == 200 def add_graph(self, raw_graph, graph_format, graph_hash): logger.debug("Adding graph to the triple store with URL '{}'...".format(self._get_sparql_endpoint_for_update())) ihash = GraphStore.IHASH_PREFIX.format(graph_hash) g = Graph() g.parse(data=raw_graph, format=graph_format) sparql_query = SPARQLWrapper( self._get_sparql_endpoint_for_query(), self._get_sparql_endpoint_for_update()) query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.serialize(format='nt').decode()) sparql_query.setQuery(query) sparql_query.method = 'POST' sparql_query.setCredentials(self._ts_user, self._ts_pass) sparql_query.query() def check_if_graph_is_already_stored(self, graph_hash: str) -> bool: ihash = GraphStore.IHASH_PREFIX.format(graph_hash) logger.debug("Checking whether graph '{}' is already in the triple store...".format(ihash)) query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash) sparql_query = SPARQLWrapper( self._get_sparql_endpoint_for_query(), self._get_sparql_endpoint_for_update()) sparql_query.setQuery(query) sparql_query.method = 'POST' sparql_query.setReturnFormat(JSON) sparql_query.setCredentials(self._ts_user, self._ts_pass) result = sparql_query.query() return result.convert()['boolean']
flexible
{ "blob_id": "a42a94798d176e20646d41cf0f4b7e4f99e0790b", "index": 105, "step-1": "<mask token>\n\n\nclass StardogGraphStore(GraphStore):\n <mask token>\n\n def check_whether_db_exists(self):\n logger.debug(\"Checking whether a triple store with db '{}' exists...\"\n .format(self._node_ts_url))\n url = self._get_ts_db_url()\n r = requests.get(url, auth=(self._ts_user, self._ts_pass))\n status_code = r.status_code\n logger.debug('Status type of response whether db exists: {}.'.\n format(status_code))\n return status_code == 200\n\n def add_graph(self, raw_graph, graph_format, graph_hash):\n logger.debug(\"Adding graph to the triple store with URL '{}'...\".\n format(self._get_sparql_endpoint_for_update()))\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n g = Graph()\n g.parse(data=raw_graph, format=graph_format)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.\n serialize(format='nt').decode())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n sparql_query.query()\n\n def check_if_graph_is_already_stored(self, graph_hash: str) ->bool:\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n logger.debug(\n \"Checking whether graph '{}' is already in the triple store...\"\n .format(ihash))\n query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setReturnFormat(JSON)\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n result = sparql_query.query()\n return result.convert()['boolean']\n", "step-2": "<mask token>\n\n\nclass StardogGraphStore(GraphStore):\n\n def __init__(self, ts_db_name, ts_url, ts_user, ts_pass):\n super(StardogGraphStore, self).__init__(ts_db_name, ts_url)\n self._ts_user = ts_user\n self._ts_pass = ts_pass\n msg = (\n \"Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'.\"\n .format(ts_user, self._node_ts_url))\n logger.info(msg)\n\n def check_whether_db_exists(self):\n logger.debug(\"Checking whether a triple store with db '{}' exists...\"\n .format(self._node_ts_url))\n url = self._get_ts_db_url()\n r = requests.get(url, auth=(self._ts_user, self._ts_pass))\n status_code = r.status_code\n logger.debug('Status type of response whether db exists: {}.'.\n format(status_code))\n return status_code == 200\n\n def add_graph(self, raw_graph, graph_format, graph_hash):\n logger.debug(\"Adding graph to the triple store with URL '{}'...\".\n format(self._get_sparql_endpoint_for_update()))\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n g = Graph()\n g.parse(data=raw_graph, format=graph_format)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.\n serialize(format='nt').decode())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n sparql_query.query()\n\n def check_if_graph_is_already_stored(self, graph_hash: str) ->bool:\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n logger.debug(\n \"Checking whether graph '{}' is already in the triple store...\"\n .format(ihash))\n query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setReturnFormat(JSON)\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n result = sparql_query.query()\n return result.convert()['boolean']\n", "step-3": "<mask token>\nlogger = get_debug_logger()\n\n\nclass StardogGraphStore(GraphStore):\n\n def __init__(self, ts_db_name, ts_url, ts_user, ts_pass):\n super(StardogGraphStore, self).__init__(ts_db_name, ts_url)\n self._ts_user = ts_user\n self._ts_pass = ts_pass\n msg = (\n \"Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'.\"\n .format(ts_user, self._node_ts_url))\n logger.info(msg)\n\n def check_whether_db_exists(self):\n logger.debug(\"Checking whether a triple store with db '{}' exists...\"\n .format(self._node_ts_url))\n url = self._get_ts_db_url()\n r = requests.get(url, auth=(self._ts_user, self._ts_pass))\n status_code = r.status_code\n logger.debug('Status type of response whether db exists: {}.'.\n format(status_code))\n return status_code == 200\n\n def add_graph(self, raw_graph, graph_format, graph_hash):\n logger.debug(\"Adding graph to the triple store with URL '{}'...\".\n format(self._get_sparql_endpoint_for_update()))\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n g = Graph()\n g.parse(data=raw_graph, format=graph_format)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.\n serialize(format='nt').decode())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n sparql_query.query()\n\n def check_if_graph_is_already_stored(self, graph_hash: str) ->bool:\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n logger.debug(\n \"Checking whether graph '{}' is already in the triple store...\"\n .format(ihash))\n query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setReturnFormat(JSON)\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n result = sparql_query.query()\n return result.convert()['boolean']\n", "step-4": "import requests\nfrom SPARQLWrapper import SPARQLWrapper, JSON\nfrom rdflib import Graph\nfrom plenum.server.plugin.graphchain.graph_store import GraphStore\nfrom plenum.server.plugin.graphchain.logger import get_debug_logger\nlogger = get_debug_logger()\n\n\nclass StardogGraphStore(GraphStore):\n\n def __init__(self, ts_db_name, ts_url, ts_user, ts_pass):\n super(StardogGraphStore, self).__init__(ts_db_name, ts_url)\n self._ts_user = ts_user\n self._ts_pass = ts_pass\n msg = (\n \"Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'.\"\n .format(ts_user, self._node_ts_url))\n logger.info(msg)\n\n def check_whether_db_exists(self):\n logger.debug(\"Checking whether a triple store with db '{}' exists...\"\n .format(self._node_ts_url))\n url = self._get_ts_db_url()\n r = requests.get(url, auth=(self._ts_user, self._ts_pass))\n status_code = r.status_code\n logger.debug('Status type of response whether db exists: {}.'.\n format(status_code))\n return status_code == 200\n\n def add_graph(self, raw_graph, graph_format, graph_hash):\n logger.debug(\"Adding graph to the triple store with URL '{}'...\".\n format(self._get_sparql_endpoint_for_update()))\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n g = Graph()\n g.parse(data=raw_graph, format=graph_format)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.\n serialize(format='nt').decode())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n sparql_query.query()\n\n def check_if_graph_is_already_stored(self, graph_hash: str) ->bool:\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n logger.debug(\n \"Checking whether graph '{}' is already in the triple store...\"\n .format(ihash))\n query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setReturnFormat(JSON)\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n result = sparql_query.query()\n return result.convert()['boolean']\n", "step-5": "import requests\nfrom SPARQLWrapper import SPARQLWrapper, JSON\nfrom rdflib import Graph\n\nfrom plenum.server.plugin.graphchain.graph_store import GraphStore\nfrom plenum.server.plugin.graphchain.logger import get_debug_logger\n\nlogger = get_debug_logger()\n\n\nclass StardogGraphStore(GraphStore):\n def __init__(self, ts_db_name, ts_url, ts_user, ts_pass):\n super(StardogGraphStore, self).__init__(ts_db_name, ts_url)\n\n self._ts_user = ts_user\n self._ts_pass = ts_pass\n\n msg = \"Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'.\" \\\n .format(ts_user, self._node_ts_url)\n logger.info(msg)\n\n def check_whether_db_exists(self):\n logger.debug(\"Checking whether a triple store with db '{}' exists...\".format(self._node_ts_url))\n\n url = self._get_ts_db_url()\n r = requests.get(url, auth=(self._ts_user, self._ts_pass))\n status_code = r.status_code\n logger.debug(\"Status type of response whether db exists: {}.\".format(status_code))\n\n return status_code == 200\n\n def add_graph(self, raw_graph, graph_format, graph_hash):\n logger.debug(\"Adding graph to the triple store with URL '{}'...\".format(self._get_sparql_endpoint_for_update()))\n\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n\n g = Graph()\n g.parse(data=raw_graph, format=graph_format)\n\n sparql_query = SPARQLWrapper(\n self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n\n query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.serialize(format='nt').decode())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n sparql_query.query()\n\n def check_if_graph_is_already_stored(self, graph_hash: str) -> bool:\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n\n logger.debug(\"Checking whether graph '{}' is already in the triple store...\".format(ihash))\n\n query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)\n\n sparql_query = SPARQLWrapper(\n self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setReturnFormat(JSON)\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n result = sparql_query.query()\n return result.convert()['boolean']\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
from kivy.app import App from kivy.uix.floatlayout import FloatLayout class LayoutWindow(FloatLayout): pass class floatlayoutApp(App): def build(self): return LayoutWindow() if __name__== "__main__": display = floatlayoutApp() display.run()
normal
{ "blob_id": "2af8677e76b77b9bfa579012a85ea331c0c7f390", "index": 136, "step-1": "<mask token>\n\n\nclass floatlayoutApp(App):\n\n def build(self):\n return LayoutWindow()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass LayoutWindow(FloatLayout):\n pass\n\n\nclass floatlayoutApp(App):\n\n def build(self):\n return LayoutWindow()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass LayoutWindow(FloatLayout):\n pass\n\n\nclass floatlayoutApp(App):\n\n def build(self):\n return LayoutWindow()\n\n\nif __name__ == '__main__':\n display = floatlayoutApp()\n display.run()\n", "step-4": "from kivy.app import App\nfrom kivy.uix.floatlayout import FloatLayout\n\n\nclass LayoutWindow(FloatLayout):\n pass\n\n\nclass floatlayoutApp(App):\n\n def build(self):\n return LayoutWindow()\n\n\nif __name__ == '__main__':\n display = floatlayoutApp()\n display.run()\n", "step-5": "from kivy.app import App\nfrom kivy.uix.floatlayout import FloatLayout\n\n\nclass LayoutWindow(FloatLayout):\n pass\n\n\nclass floatlayoutApp(App):\n def build(self):\n return LayoutWindow()\n\n\nif __name__== \"__main__\":\n display = floatlayoutApp()\n display.run()", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
class TflearnDataSourceExtraTemplate(object): """ Base class for TFLearn's DataSource (if we use wrapping). Parameters: ---------- rewrite_data_aug : bool use wrapper for data augmentation """ def __init__(self, rewrite_data_aug=False): self.rewrite_data_aug = rewrite_data_aug
normal
{ "blob_id": "70c084dab8469ca34b0e3e5174101111e695f1ca", "index": 6638, "step-1": "<mask token>\n", "step-2": "class TflearnDataSourceExtraTemplate(object):\n <mask token>\n <mask token>\n", "step-3": "class TflearnDataSourceExtraTemplate(object):\n <mask token>\n\n def __init__(self, rewrite_data_aug=False):\n self.rewrite_data_aug = rewrite_data_aug\n", "step-4": "class TflearnDataSourceExtraTemplate(object):\n \"\"\"\n Base class for TFLearn's DataSource (if we use wrapping).\n\n Parameters:\n ----------\n rewrite_data_aug : bool\n use wrapper for data augmentation\n \"\"\"\n\n def __init__(self, rewrite_data_aug=False):\n self.rewrite_data_aug = rewrite_data_aug\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> def downgrade(): op.drop_column('stakeholder', 'archived') <|reserved_special_token_1|> <|reserved_special_token_0|> def upgrade(): op.add_column('stakeholder', sa.Column('archived', sa.Boolean(), nullable=False, default=False, server_default='false')) def downgrade(): op.drop_column('stakeholder', 'archived') <|reserved_special_token_1|> <|reserved_special_token_0|> revision = '42cf7f6532dd' down_revision = 'e6d4ac8564fb' branch_labels = None depends_on = None def upgrade(): op.add_column('stakeholder', sa.Column('archived', sa.Boolean(), nullable=False, default=False, server_default='false')) def downgrade(): op.drop_column('stakeholder', 'archived') <|reserved_special_token_1|> <|reserved_special_token_0|> from alembic import op import sqlalchemy as sa revision = '42cf7f6532dd' down_revision = 'e6d4ac8564fb' branch_labels = None depends_on = None def upgrade(): op.add_column('stakeholder', sa.Column('archived', sa.Boolean(), nullable=False, default=False, server_default='false')) def downgrade(): op.drop_column('stakeholder', 'archived') <|reserved_special_token_1|> """empty message Revision ID: 42cf7f6532dd Revises: e6d4ac8564fb Create Date: 2019-04-01 16:13:37.207305 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '42cf7f6532dd' down_revision = 'e6d4ac8564fb' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('stakeholder', sa.Column('archived', sa.Boolean(), nullable=False, default=False, server_default="false")) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('stakeholder', 'archived') # ### end Alembic commands ###
flexible
{ "blob_id": "42d9f40dd50056b1c258508a6cb3f9875680276a", "index": 3393, "step-1": "<mask token>\n\n\ndef downgrade():\n op.drop_column('stakeholder', 'archived')\n", "step-2": "<mask token>\n\n\ndef upgrade():\n op.add_column('stakeholder', sa.Column('archived', sa.Boolean(),\n nullable=False, default=False, server_default='false'))\n\n\ndef downgrade():\n op.drop_column('stakeholder', 'archived')\n", "step-3": "<mask token>\nrevision = '42cf7f6532dd'\ndown_revision = 'e6d4ac8564fb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('stakeholder', sa.Column('archived', sa.Boolean(),\n nullable=False, default=False, server_default='false'))\n\n\ndef downgrade():\n op.drop_column('stakeholder', 'archived')\n", "step-4": "<mask token>\nfrom alembic import op\nimport sqlalchemy as sa\nrevision = '42cf7f6532dd'\ndown_revision = 'e6d4ac8564fb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('stakeholder', sa.Column('archived', sa.Boolean(),\n nullable=False, default=False, server_default='false'))\n\n\ndef downgrade():\n op.drop_column('stakeholder', 'archived')\n", "step-5": "\"\"\"empty message\n\nRevision ID: 42cf7f6532dd\nRevises: e6d4ac8564fb\nCreate Date: 2019-04-01 16:13:37.207305\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '42cf7f6532dd'\ndown_revision = 'e6d4ac8564fb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('stakeholder', sa.Column('archived', sa.Boolean(), nullable=False, default=False, server_default=\"false\"))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('stakeholder', 'archived')\n # ### end Alembic commands ###\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author : 河北雪域网络科技有限公司 A.Star # @contact: astar@snowland.ltd # @site: # @file: img_to_sketch.py # @time: 2018/8/6 1:15 # @Software: PyCharm from skimage.color import rgb2grey import numpy as np def sketch(img, threshold=15): """ 素描画生成 param img: Image实例   param threshold: 介于0到100 :return: """ if threshold < 0: threshold = 0 if threshold > 100: threshold = 100 if len(img.shape) == 3: img = rgb2grey(img) m, n = img.shape diff = np.abs(img[:m - 1, :n - 1] - img[1:, 1:]) img = np.zeros((m - 1, n - 1)) img[diff < threshold/255] = 1 return img
normal
{ "blob_id": "065354d2a8fd8a75e16bf85f624b12641377029a", "index": 8568, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef sketch(img, threshold=15):\n \"\"\"\n 素描画生成\n param img: Image实例\n  param threshold: 介于0到100\n :return:\n \"\"\"\n if threshold < 0:\n threshold = 0\n if threshold > 100:\n threshold = 100\n if len(img.shape) == 3:\n img = rgb2grey(img)\n m, n = img.shape\n diff = np.abs(img[:m - 1, :n - 1] - img[1:, 1:])\n img = np.zeros((m - 1, n - 1))\n img[diff < threshold / 255] = 1\n return img\n", "step-3": "from skimage.color import rgb2grey\nimport numpy as np\n\n\ndef sketch(img, threshold=15):\n \"\"\"\n 素描画生成\n param img: Image实例\n  param threshold: 介于0到100\n :return:\n \"\"\"\n if threshold < 0:\n threshold = 0\n if threshold > 100:\n threshold = 100\n if len(img.shape) == 3:\n img = rgb2grey(img)\n m, n = img.shape\n diff = np.abs(img[:m - 1, :n - 1] - img[1:, 1:])\n img = np.zeros((m - 1, n - 1))\n img[diff < threshold / 255] = 1\n return img\n", "step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : 河北雪域网络科技有限公司 A.Star\n# @contact: astar@snowland.ltd\n# @site: \n# @file: img_to_sketch.py\n# @time: 2018/8/6 1:15\n# @Software: PyCharm\n\nfrom skimage.color import rgb2grey\nimport numpy as np\n\n\ndef sketch(img, threshold=15):\n \"\"\"\n 素描画生成\n param img: Image实例\n  param threshold: 介于0到100\n :return:\n \"\"\"\n if threshold < 0:\n threshold = 0\n if threshold > 100:\n threshold = 100\n if len(img.shape) == 3:\n img = rgb2grey(img)\n m, n = img.shape\n diff = np.abs(img[:m - 1, :n - 1] - img[1:, 1:])\n img = np.zeros((m - 1, n - 1))\n img[diff < threshold/255] = 1\n return img\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
/Users/tanzy/anaconda3/lib/python3.6/_dummy_thread.py
normal
{ "blob_id": "08a5a903d3757f8821554aa3649ec2ac2b2995a5", "index": 911, "step-1": "/Users/tanzy/anaconda3/lib/python3.6/_dummy_thread.py", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from math import ceil, log2, sqrt def constructST(s, start, end, st, i): if start == end: st[i] = 0 openst[i] = 1 if s[start] == '(' else 0 closedst[i] = 1 if s[start] == ')' else 0 return st[i], openst[i], closedst[i] else: mid = (start+end)//2 st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2*i+1) a, b, c = constructST(s, mid+1, end, st, 2*i+2) tmp = min(openst[2*i+1], closedst[2*i+2]) st[i] += tmp + a openst[i] += b-tmp closedst[i] += c -tmp return st[i], openst[i], closedst[i] def query(s, start, end, l, r, st, i): if l > end or r < start: return 0, 0, 0 elif start >= l and end <= r: return st[i], openst[i], closedst[i] else: mid = (start + end)//2 a, b, c = query(s, start, mid, l, r, st, 2*i+1) d, e, f = query(s, mid+1, end, l, r, st, 2*i+2) tmp = min(b, f) T = a+d +tmp O = b+e - tmp C = c+f - tmp return T, O, C s = input() n = len(s) x = int(ceil(log2(n))) max_size = 2*pow(2, x) -1 st = [0 for i in range(0, max_size)] openst = [0 for i in range(0, max_size)] closedst = [0 for i in range(0, max_size)] constructST(s, 0, n-1, st, 0) # print(st) # print(openst) # print(closedst) for _ in range(int(input())): l, r = map(int, input().split()) print(2*query(s, 0, n-1, l-1, r-1, st, 0)[0])
normal
{ "blob_id": "ccc74f58eff3bb00f0be8c2c963de4208b7f0933", "index": 9125, "step-1": "<mask token>\n\n\ndef constructST(s, start, end, st, i):\n if start == end:\n st[i] = 0\n openst[i] = 1 if s[start] == '(' else 0\n closedst[i] = 1 if s[start] == ')' else 0\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *\n i + 1)\n a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)\n tmp = min(openst[2 * i + 1], closedst[2 * i + 2])\n st[i] += tmp + a\n openst[i] += b - tmp\n closedst[i] += c - tmp\n return st[i], openst[i], closedst[i]\n\n\ndef query(s, start, end, l, r, st, i):\n if l > end or r < start:\n return 0, 0, 0\n elif start >= l and end <= r:\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)\n d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)\n tmp = min(b, f)\n T = a + d + tmp\n O = b + e - tmp\n C = c + f - tmp\n return T, O, C\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef constructST(s, start, end, st, i):\n if start == end:\n st[i] = 0\n openst[i] = 1 if s[start] == '(' else 0\n closedst[i] = 1 if s[start] == ')' else 0\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *\n i + 1)\n a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)\n tmp = min(openst[2 * i + 1], closedst[2 * i + 2])\n st[i] += tmp + a\n openst[i] += b - tmp\n closedst[i] += c - tmp\n return st[i], openst[i], closedst[i]\n\n\ndef query(s, start, end, l, r, st, i):\n if l > end or r < start:\n return 0, 0, 0\n elif start >= l and end <= r:\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)\n d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)\n tmp = min(b, f)\n T = a + d + tmp\n O = b + e - tmp\n C = c + f - tmp\n return T, O, C\n\n\n<mask token>\nconstructST(s, 0, n - 1, st, 0)\nfor _ in range(int(input())):\n l, r = map(int, input().split())\n print(2 * query(s, 0, n - 1, l - 1, r - 1, st, 0)[0])\n", "step-3": "<mask token>\n\n\ndef constructST(s, start, end, st, i):\n if start == end:\n st[i] = 0\n openst[i] = 1 if s[start] == '(' else 0\n closedst[i] = 1 if s[start] == ')' else 0\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *\n i + 1)\n a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)\n tmp = min(openst[2 * i + 1], closedst[2 * i + 2])\n st[i] += tmp + a\n openst[i] += b - tmp\n closedst[i] += c - tmp\n return st[i], openst[i], closedst[i]\n\n\ndef query(s, start, end, l, r, st, i):\n if l > end or r < start:\n return 0, 0, 0\n elif start >= l and end <= r:\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)\n d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)\n tmp = min(b, f)\n T = a + d + tmp\n O = b + e - tmp\n C = c + f - tmp\n return T, O, C\n\n\ns = input()\nn = len(s)\nx = int(ceil(log2(n)))\nmax_size = 2 * pow(2, x) - 1\nst = [(0) for i in range(0, max_size)]\nopenst = [(0) for i in range(0, max_size)]\nclosedst = [(0) for i in range(0, max_size)]\nconstructST(s, 0, n - 1, st, 0)\nfor _ in range(int(input())):\n l, r = map(int, input().split())\n print(2 * query(s, 0, n - 1, l - 1, r - 1, st, 0)[0])\n", "step-4": "from math import ceil, log2, sqrt\n\n\ndef constructST(s, start, end, st, i):\n if start == end:\n st[i] = 0\n openst[i] = 1 if s[start] == '(' else 0\n closedst[i] = 1 if s[start] == ')' else 0\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *\n i + 1)\n a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)\n tmp = min(openst[2 * i + 1], closedst[2 * i + 2])\n st[i] += tmp + a\n openst[i] += b - tmp\n closedst[i] += c - tmp\n return st[i], openst[i], closedst[i]\n\n\ndef query(s, start, end, l, r, st, i):\n if l > end or r < start:\n return 0, 0, 0\n elif start >= l and end <= r:\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)\n d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)\n tmp = min(b, f)\n T = a + d + tmp\n O = b + e - tmp\n C = c + f - tmp\n return T, O, C\n\n\ns = input()\nn = len(s)\nx = int(ceil(log2(n)))\nmax_size = 2 * pow(2, x) - 1\nst = [(0) for i in range(0, max_size)]\nopenst = [(0) for i in range(0, max_size)]\nclosedst = [(0) for i in range(0, max_size)]\nconstructST(s, 0, n - 1, st, 0)\nfor _ in range(int(input())):\n l, r = map(int, input().split())\n print(2 * query(s, 0, n - 1, l - 1, r - 1, st, 0)[0])\n", "step-5": "from math import ceil, log2, sqrt\r\n\r\ndef constructST(s, start, end, st, i):\r\n\tif start == end:\r\n\t\tst[i] = 0\r\n\t\topenst[i] = 1 if s[start] == '(' else 0\r\n\t\tclosedst[i] = 1 if s[start] == ')' else 0\r\n\t\treturn st[i], openst[i], closedst[i]\r\n\r\n\telse:\r\n\t\tmid = (start+end)//2\r\n\t\tst[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2*i+1) \r\n\t\ta, b, c = constructST(s, mid+1, end, st, 2*i+2)\r\n\t\ttmp = min(openst[2*i+1], closedst[2*i+2])\r\n\t\tst[i] += tmp + a\r\n\t\topenst[i] += b-tmp\r\n\t\tclosedst[i] += c -tmp\r\n\r\n\t\treturn st[i], openst[i], closedst[i]\r\n\r\ndef query(s, start, end, l, r, st, i):\r\n\tif l > end or r < start:\r\n\t\treturn 0, 0, 0\r\n\telif start >= l and end <= r:\r\n\t\treturn st[i], openst[i], closedst[i]\r\n\telse:\r\n\t\tmid = (start + end)//2\r\n\t\ta, b, c = query(s, start, mid, l, r, st, 2*i+1) \r\n\t\td, e, f = query(s, mid+1, end, l, r, st, 2*i+2)\r\n\t\ttmp = min(b, f)\r\n\t\tT = a+d +tmp\r\n\t\tO = b+e - tmp\r\n\t\tC = c+f - tmp\r\n\treturn T, O, C\r\n\r\n\r\n\r\ns = input()\r\nn = len(s)\r\nx = int(ceil(log2(n)))\r\nmax_size = 2*pow(2, x) -1\t\r\n\r\nst = [0 for i in range(0, max_size)]\r\nopenst = [0 for i in range(0, max_size)]\r\nclosedst = [0 for i in range(0, max_size)]\r\n\r\nconstructST(s, 0, n-1, st, 0)\r\n# print(st)\r\n# print(openst)\r\n# print(closedst)\r\nfor _ in range(int(input())):\r\n\tl, r = map(int, input().split())\r\n\tprint(2*query(s, 0, n-1, l-1, r-1, st, 0)[0])\r\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> if recipe not in recipes: user.add_recipes([recipe]) db.session.commit() <|reserved_special_token_1|> <|reserved_special_token_0|> user = User.query.filter_by(username='xiaofan').first() recipe = Recipe.query.filter_by(recipename='Jerry').first() recipes = Recipe.query.filter(Recipe.users.any(username='xiaofan')).all() if recipe not in recipes: user.add_recipes([recipe]) db.session.commit() <|reserved_special_token_1|> from project import db from project.models import User, Recipe, Association, Ingre, Recipe_ingre user = User.query.filter_by(username='xiaofan').first() recipe = Recipe.query.filter_by(recipename='Jerry').first() recipes = Recipe.query.filter(Recipe.users.any(username='xiaofan')).all() if recipe not in recipes: user.add_recipes([recipe]) db.session.commit() <|reserved_special_token_1|> from project import db from project.models import User, Recipe, Association, Ingre, Recipe_ingre user=User.query.filter_by(username="xiaofan").first() recipe=Recipe.query.filter_by(recipename="Jerry").first() recipes = Recipe.query.filter(Recipe.users.any(username="xiaofan")).all() if recipe not in recipes: user.add_recipes([recipe]) # commit the changes db.session.commit()
flexible
{ "blob_id": "07f8fd305e2311c0e37a785da0a826b8ea4e78ba", "index": 4154, "step-1": "<mask token>\n", "step-2": "<mask token>\nif recipe not in recipes:\n user.add_recipes([recipe])\n db.session.commit()\n", "step-3": "<mask token>\nuser = User.query.filter_by(username='xiaofan').first()\nrecipe = Recipe.query.filter_by(recipename='Jerry').first()\nrecipes = Recipe.query.filter(Recipe.users.any(username='xiaofan')).all()\nif recipe not in recipes:\n user.add_recipes([recipe])\n db.session.commit()\n", "step-4": "from project import db\nfrom project.models import User, Recipe, Association, Ingre, Recipe_ingre\nuser = User.query.filter_by(username='xiaofan').first()\nrecipe = Recipe.query.filter_by(recipename='Jerry').first()\nrecipes = Recipe.query.filter(Recipe.users.any(username='xiaofan')).all()\nif recipe not in recipes:\n user.add_recipes([recipe])\n db.session.commit()\n", "step-5": "from project import db\nfrom project.models import User, Recipe, Association, Ingre, Recipe_ingre\n\n\n\n\nuser=User.query.filter_by(username=\"xiaofan\").first()\nrecipe=Recipe.query.filter_by(recipename=\"Jerry\").first()\nrecipes = Recipe.query.filter(Recipe.users.any(username=\"xiaofan\")).all()\n\nif recipe not in recipes:\n user.add_recipes([recipe])\n\n # commit the changes\n db.session.commit()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def get_basename(name, split_num): return f'{name}.split{split_num:d}' <|reserved_special_token_0|> def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch, batch_norm, l1_factor, l2_factor, optimizer): """ Attempt to load the specified model (including architecture, weights, and even optimizer states). If this is not possible, build a new model from scratch. """ basename = get_basename(name, split_num) model_filename_fmt = get_model_filename_fmt(basename) model_filename = model_filename_fmt.format(epoch=resume_from_epoch) checkpoint_path = os.path.join(checkpoint_dir, model_filename) if resume_from_epoch > 0 and os.path.isfile(checkpoint_path): click.secho( f"Found model checkpoint '{checkpoint_path}'. Resuming from epoch {resume_from_epoch}." , fg='green') model = load_model(checkpoint_path) initial_epoch = resume_from_epoch else: click.secho( f"Could not load model checkpoint '{checkpoint_path}' or `resume_from_epoch == 0`. Building new model." , fg='yellow') model = build_model(output_dim=1, batch_norm=batch_norm, kernel_regularizer=l1_l2(l1_factor, l2_factor)) model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) initial_epoch = 0 return model, initial_epoch def build_callbacks(name, split_num, summary_dir, checkpoint_dir, checkpoint_period): basename = get_basename(name, split_num) model_filename_fmt = get_model_filename_fmt(basename) tensorboard_path = os.path.join(summary_dir, basename) csv_path = os.path.join(summary_dir, f'{basename}.csv') checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt) callbacks = [] callbacks.append(TensorBoard(tensorboard_path, profile_batch=0)) callbacks.append(CSVLogger(csv_path, append=True)) callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period) ) return callbacks <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def get_basename(name, split_num): return f'{name}.split{split_num:d}' def get_model_filename_fmt(basename): return f'{basename}.{{epoch:02d}}.h5' def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch, batch_norm, l1_factor, l2_factor, optimizer): """ Attempt to load the specified model (including architecture, weights, and even optimizer states). If this is not possible, build a new model from scratch. """ basename = get_basename(name, split_num) model_filename_fmt = get_model_filename_fmt(basename) model_filename = model_filename_fmt.format(epoch=resume_from_epoch) checkpoint_path = os.path.join(checkpoint_dir, model_filename) if resume_from_epoch > 0 and os.path.isfile(checkpoint_path): click.secho( f"Found model checkpoint '{checkpoint_path}'. Resuming from epoch {resume_from_epoch}." , fg='green') model = load_model(checkpoint_path) initial_epoch = resume_from_epoch else: click.secho( f"Could not load model checkpoint '{checkpoint_path}' or `resume_from_epoch == 0`. Building new model." , fg='yellow') model = build_model(output_dim=1, batch_norm=batch_norm, kernel_regularizer=l1_l2(l1_factor, l2_factor)) model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) initial_epoch = 0 return model, initial_epoch def build_callbacks(name, split_num, summary_dir, checkpoint_dir, checkpoint_period): basename = get_basename(name, split_num) model_filename_fmt = get_model_filename_fmt(basename) tensorboard_path = os.path.join(summary_dir, basename) csv_path = os.path.join(summary_dir, f'{basename}.csv') checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt) callbacks = [] callbacks.append(TensorBoard(tensorboard_path, profile_batch=0)) callbacks.append(CSVLogger(csv_path, append=True)) callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period) ) return callbacks <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def get_basename(name, split_num): return f'{name}.split{split_num:d}' def get_model_filename_fmt(basename): return f'{basename}.{{epoch:02d}}.h5' def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch, batch_norm, l1_factor, l2_factor, optimizer): """ Attempt to load the specified model (including architecture, weights, and even optimizer states). If this is not possible, build a new model from scratch. """ basename = get_basename(name, split_num) model_filename_fmt = get_model_filename_fmt(basename) model_filename = model_filename_fmt.format(epoch=resume_from_epoch) checkpoint_path = os.path.join(checkpoint_dir, model_filename) if resume_from_epoch > 0 and os.path.isfile(checkpoint_path): click.secho( f"Found model checkpoint '{checkpoint_path}'. Resuming from epoch {resume_from_epoch}." , fg='green') model = load_model(checkpoint_path) initial_epoch = resume_from_epoch else: click.secho( f"Could not load model checkpoint '{checkpoint_path}' or `resume_from_epoch == 0`. Building new model." , fg='yellow') model = build_model(output_dim=1, batch_norm=batch_norm, kernel_regularizer=l1_l2(l1_factor, l2_factor)) model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) initial_epoch = 0 return model, initial_epoch def build_callbacks(name, split_num, summary_dir, checkpoint_dir, checkpoint_period): basename = get_basename(name, split_num) model_filename_fmt = get_model_filename_fmt(basename) tensorboard_path = os.path.join(summary_dir, basename) csv_path = os.path.join(summary_dir, f'{basename}.csv') checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt) callbacks = [] callbacks.append(TensorBoard(tensorboard_path, profile_batch=0)) callbacks.append(CSVLogger(csv_path, append=True)) callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period) ) return callbacks def make_plot_data(names, splits, summary_dir, pretty_name_mapping=None): df_list = [] for name in names: for split_num in splits: basename = get_basename(name, split_num) csv_path = os.path.join(summary_dir, f'{basename}.csv') df = pd.read_csv(csv_path).assign(name=name, split=split_num) df_list.append(df) data = pd.concat(df_list, axis='index', sort=True).rename(columns=dict( acc='train', val_acc='validation')) if pretty_name_mapping is not None: data = data.assign(name=data.name.replace(pretty_name_mapping)) wide_data = pd.melt(data, id_vars=['name', 'split', 'epoch'], value_vars=['train', 'validation'], value_name='accuracy', var_name ='partition') return wide_data <|reserved_special_token_1|> <|reserved_special_token_0|> import click import os.path import pandas as pd from tensorflow.keras.models import load_model from tensorflow.keras.regularizers import l1_l2 from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, TensorBoard from zalando_classification.models import build_model def get_basename(name, split_num): return f'{name}.split{split_num:d}' def get_model_filename_fmt(basename): return f'{basename}.{{epoch:02d}}.h5' def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch, batch_norm, l1_factor, l2_factor, optimizer): """ Attempt to load the specified model (including architecture, weights, and even optimizer states). If this is not possible, build a new model from scratch. """ basename = get_basename(name, split_num) model_filename_fmt = get_model_filename_fmt(basename) model_filename = model_filename_fmt.format(epoch=resume_from_epoch) checkpoint_path = os.path.join(checkpoint_dir, model_filename) if resume_from_epoch > 0 and os.path.isfile(checkpoint_path): click.secho( f"Found model checkpoint '{checkpoint_path}'. Resuming from epoch {resume_from_epoch}." , fg='green') model = load_model(checkpoint_path) initial_epoch = resume_from_epoch else: click.secho( f"Could not load model checkpoint '{checkpoint_path}' or `resume_from_epoch == 0`. Building new model." , fg='yellow') model = build_model(output_dim=1, batch_norm=batch_norm, kernel_regularizer=l1_l2(l1_factor, l2_factor)) model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) initial_epoch = 0 return model, initial_epoch def build_callbacks(name, split_num, summary_dir, checkpoint_dir, checkpoint_period): basename = get_basename(name, split_num) model_filename_fmt = get_model_filename_fmt(basename) tensorboard_path = os.path.join(summary_dir, basename) csv_path = os.path.join(summary_dir, f'{basename}.csv') checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt) callbacks = [] callbacks.append(TensorBoard(tensorboard_path, profile_batch=0)) callbacks.append(CSVLogger(csv_path, append=True)) callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period) ) return callbacks def make_plot_data(names, splits, summary_dir, pretty_name_mapping=None): df_list = [] for name in names: for split_num in splits: basename = get_basename(name, split_num) csv_path = os.path.join(summary_dir, f'{basename}.csv') df = pd.read_csv(csv_path).assign(name=name, split=split_num) df_list.append(df) data = pd.concat(df_list, axis='index', sort=True).rename(columns=dict( acc='train', val_acc='validation')) if pretty_name_mapping is not None: data = data.assign(name=data.name.replace(pretty_name_mapping)) wide_data = pd.melt(data, id_vars=['name', 'split', 'epoch'], value_vars=['train', 'validation'], value_name='accuracy', var_name ='partition') return wide_data <|reserved_special_token_1|> """Utils module.""" import click import os.path import pandas as pd from tensorflow.keras.models import load_model from tensorflow.keras.regularizers import l1_l2 from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, TensorBoard from zalando_classification.models import build_model def get_basename(name, split_num): return f"{name}.split{split_num:d}" def get_model_filename_fmt(basename): return f"{basename}.{{epoch:02d}}.h5" def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch, batch_norm, l1_factor, l2_factor, optimizer): """ Attempt to load the specified model (including architecture, weights, and even optimizer states). If this is not possible, build a new model from scratch. """ basename = get_basename(name, split_num) model_filename_fmt = get_model_filename_fmt(basename) model_filename = model_filename_fmt.format(epoch=resume_from_epoch) checkpoint_path = os.path.join(checkpoint_dir, model_filename) if resume_from_epoch > 0 and os.path.isfile(checkpoint_path): click.secho(f"Found model checkpoint '{checkpoint_path}'. " f"Resuming from epoch {resume_from_epoch}.", fg='green') model = load_model(checkpoint_path) initial_epoch = resume_from_epoch else: click.secho(f"Could not load model checkpoint '{checkpoint_path}' " "or `resume_from_epoch == 0`. Building new model.", fg='yellow') model = build_model(output_dim=1, batch_norm=batch_norm, kernel_regularizer=l1_l2(l1_factor, l2_factor)) # optimizer = Adam(beta_1=0.5) model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) initial_epoch = 0 return model, initial_epoch def build_callbacks(name, split_num, summary_dir, checkpoint_dir, checkpoint_period): basename = get_basename(name, split_num) model_filename_fmt = get_model_filename_fmt(basename) tensorboard_path = os.path.join(summary_dir, basename) csv_path = os.path.join(summary_dir, f"{basename}.csv") checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt) callbacks = [] callbacks.append(TensorBoard(tensorboard_path, profile_batch=0)) callbacks.append(CSVLogger(csv_path, append=True)) callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period)) return callbacks def make_plot_data(names, splits, summary_dir, pretty_name_mapping=None): df_list = [] for name in names: for split_num in splits: basename = get_basename(name, split_num) csv_path = os.path.join(summary_dir, f"{basename}.csv") df = pd.read_csv(csv_path).assign(name=name, split=split_num) df_list.append(df) data = pd.concat(df_list, axis="index", sort=True) \ .rename(columns=dict(acc="train", val_acc="validation")) if pretty_name_mapping is not None: data = data.assign(name=data.name.replace(pretty_name_mapping)) wide_data = pd.melt(data, id_vars=["name", "split", "epoch"], value_vars=["train", "validation"], value_name="accuracy", var_name="partition") return wide_data
flexible
{ "blob_id": "6553312c9655c821444ff5f60e4d68c7fc08bd08", "index": 1118, "step-1": "<mask token>\n\n\ndef get_basename(name, split_num):\n return f'{name}.split{split_num:d}'\n\n\n<mask token>\n\n\ndef maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n \"\"\"\n Attempt to load the specified model (including architecture, weights, and\n even optimizer states). If this is not possible, build a new model from\n scratch.\n \"\"\"\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n click.secho(\n f\"Found model checkpoint '{checkpoint_path}'. Resuming from epoch {resume_from_epoch}.\"\n , fg='green')\n model = load_model(checkpoint_path)\n initial_epoch = resume_from_epoch\n else:\n click.secho(\n f\"Could not load model checkpoint '{checkpoint_path}' or `resume_from_epoch == 0`. Building new model.\"\n , fg='yellow')\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n initial_epoch = 0\n return model, initial_epoch\n\n\ndef build_callbacks(name, split_num, summary_dir, checkpoint_dir,\n checkpoint_period):\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n tensorboard_path = os.path.join(summary_dir, basename)\n csv_path = os.path.join(summary_dir, f'{basename}.csv')\n checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt)\n callbacks = []\n callbacks.append(TensorBoard(tensorboard_path, profile_batch=0))\n callbacks.append(CSVLogger(csv_path, append=True))\n callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period)\n )\n return callbacks\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_basename(name, split_num):\n return f'{name}.split{split_num:d}'\n\n\ndef get_model_filename_fmt(basename):\n return f'{basename}.{{epoch:02d}}.h5'\n\n\ndef maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n \"\"\"\n Attempt to load the specified model (including architecture, weights, and\n even optimizer states). If this is not possible, build a new model from\n scratch.\n \"\"\"\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n click.secho(\n f\"Found model checkpoint '{checkpoint_path}'. Resuming from epoch {resume_from_epoch}.\"\n , fg='green')\n model = load_model(checkpoint_path)\n initial_epoch = resume_from_epoch\n else:\n click.secho(\n f\"Could not load model checkpoint '{checkpoint_path}' or `resume_from_epoch == 0`. Building new model.\"\n , fg='yellow')\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n initial_epoch = 0\n return model, initial_epoch\n\n\ndef build_callbacks(name, split_num, summary_dir, checkpoint_dir,\n checkpoint_period):\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n tensorboard_path = os.path.join(summary_dir, basename)\n csv_path = os.path.join(summary_dir, f'{basename}.csv')\n checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt)\n callbacks = []\n callbacks.append(TensorBoard(tensorboard_path, profile_batch=0))\n callbacks.append(CSVLogger(csv_path, append=True))\n callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period)\n )\n return callbacks\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_basename(name, split_num):\n return f'{name}.split{split_num:d}'\n\n\ndef get_model_filename_fmt(basename):\n return f'{basename}.{{epoch:02d}}.h5'\n\n\ndef maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n \"\"\"\n Attempt to load the specified model (including architecture, weights, and\n even optimizer states). If this is not possible, build a new model from\n scratch.\n \"\"\"\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n click.secho(\n f\"Found model checkpoint '{checkpoint_path}'. Resuming from epoch {resume_from_epoch}.\"\n , fg='green')\n model = load_model(checkpoint_path)\n initial_epoch = resume_from_epoch\n else:\n click.secho(\n f\"Could not load model checkpoint '{checkpoint_path}' or `resume_from_epoch == 0`. Building new model.\"\n , fg='yellow')\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n initial_epoch = 0\n return model, initial_epoch\n\n\ndef build_callbacks(name, split_num, summary_dir, checkpoint_dir,\n checkpoint_period):\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n tensorboard_path = os.path.join(summary_dir, basename)\n csv_path = os.path.join(summary_dir, f'{basename}.csv')\n checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt)\n callbacks = []\n callbacks.append(TensorBoard(tensorboard_path, profile_batch=0))\n callbacks.append(CSVLogger(csv_path, append=True))\n callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period)\n )\n return callbacks\n\n\ndef make_plot_data(names, splits, summary_dir, pretty_name_mapping=None):\n df_list = []\n for name in names:\n for split_num in splits:\n basename = get_basename(name, split_num)\n csv_path = os.path.join(summary_dir, f'{basename}.csv')\n df = pd.read_csv(csv_path).assign(name=name, split=split_num)\n df_list.append(df)\n data = pd.concat(df_list, axis='index', sort=True).rename(columns=dict(\n acc='train', val_acc='validation'))\n if pretty_name_mapping is not None:\n data = data.assign(name=data.name.replace(pretty_name_mapping))\n wide_data = pd.melt(data, id_vars=['name', 'split', 'epoch'],\n value_vars=['train', 'validation'], value_name='accuracy', var_name\n ='partition')\n return wide_data\n", "step-4": "<mask token>\nimport click\nimport os.path\nimport pandas as pd\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.regularizers import l1_l2\nfrom tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, TensorBoard\nfrom zalando_classification.models import build_model\n\n\ndef get_basename(name, split_num):\n return f'{name}.split{split_num:d}'\n\n\ndef get_model_filename_fmt(basename):\n return f'{basename}.{{epoch:02d}}.h5'\n\n\ndef maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n \"\"\"\n Attempt to load the specified model (including architecture, weights, and\n even optimizer states). If this is not possible, build a new model from\n scratch.\n \"\"\"\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n click.secho(\n f\"Found model checkpoint '{checkpoint_path}'. Resuming from epoch {resume_from_epoch}.\"\n , fg='green')\n model = load_model(checkpoint_path)\n initial_epoch = resume_from_epoch\n else:\n click.secho(\n f\"Could not load model checkpoint '{checkpoint_path}' or `resume_from_epoch == 0`. Building new model.\"\n , fg='yellow')\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n initial_epoch = 0\n return model, initial_epoch\n\n\ndef build_callbacks(name, split_num, summary_dir, checkpoint_dir,\n checkpoint_period):\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n tensorboard_path = os.path.join(summary_dir, basename)\n csv_path = os.path.join(summary_dir, f'{basename}.csv')\n checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt)\n callbacks = []\n callbacks.append(TensorBoard(tensorboard_path, profile_batch=0))\n callbacks.append(CSVLogger(csv_path, append=True))\n callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period)\n )\n return callbacks\n\n\ndef make_plot_data(names, splits, summary_dir, pretty_name_mapping=None):\n df_list = []\n for name in names:\n for split_num in splits:\n basename = get_basename(name, split_num)\n csv_path = os.path.join(summary_dir, f'{basename}.csv')\n df = pd.read_csv(csv_path).assign(name=name, split=split_num)\n df_list.append(df)\n data = pd.concat(df_list, axis='index', sort=True).rename(columns=dict(\n acc='train', val_acc='validation'))\n if pretty_name_mapping is not None:\n data = data.assign(name=data.name.replace(pretty_name_mapping))\n wide_data = pd.melt(data, id_vars=['name', 'split', 'epoch'],\n value_vars=['train', 'validation'], value_name='accuracy', var_name\n ='partition')\n return wide_data\n", "step-5": "\"\"\"Utils module.\"\"\"\nimport click\nimport os.path\n\nimport pandas as pd\n\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.regularizers import l1_l2\nfrom tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, TensorBoard\n\nfrom zalando_classification.models import build_model\n\n\ndef get_basename(name, split_num):\n\n return f\"{name}.split{split_num:d}\"\n\n\ndef get_model_filename_fmt(basename):\n\n return f\"{basename}.{{epoch:02d}}.h5\"\n\n\ndef maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n \"\"\"\n Attempt to load the specified model (including architecture, weights, and\n even optimizer states). If this is not possible, build a new model from\n scratch.\n \"\"\"\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n\n click.secho(f\"Found model checkpoint '{checkpoint_path}'. \"\n f\"Resuming from epoch {resume_from_epoch}.\", fg='green')\n\n model = load_model(checkpoint_path)\n\n initial_epoch = resume_from_epoch\n\n else:\n\n click.secho(f\"Could not load model checkpoint '{checkpoint_path}' \"\n \"or `resume_from_epoch == 0`. Building new model.\",\n fg='yellow')\n\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n # optimizer = Adam(beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n\n initial_epoch = 0\n\n return model, initial_epoch\n\n\ndef build_callbacks(name, split_num, summary_dir, checkpoint_dir,\n checkpoint_period):\n\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n\n tensorboard_path = os.path.join(summary_dir, basename)\n csv_path = os.path.join(summary_dir, f\"{basename}.csv\")\n checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt)\n\n callbacks = []\n callbacks.append(TensorBoard(tensorboard_path, profile_batch=0))\n callbacks.append(CSVLogger(csv_path, append=True))\n callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period))\n\n return callbacks\n\n\ndef make_plot_data(names, splits, summary_dir, pretty_name_mapping=None):\n\n df_list = []\n\n for name in names:\n for split_num in splits:\n\n basename = get_basename(name, split_num)\n csv_path = os.path.join(summary_dir, f\"{basename}.csv\")\n\n df = pd.read_csv(csv_path).assign(name=name, split=split_num)\n df_list.append(df)\n\n data = pd.concat(df_list, axis=\"index\", sort=True) \\\n .rename(columns=dict(acc=\"train\", val_acc=\"validation\"))\n\n if pretty_name_mapping is not None:\n data = data.assign(name=data.name.replace(pretty_name_mapping))\n\n wide_data = pd.melt(data, id_vars=[\"name\", \"split\", \"epoch\"],\n value_vars=[\"train\", \"validation\"],\n value_name=\"accuracy\", var_name=\"partition\")\n\n return wide_data\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
<|reserved_special_token_0|> class DensePoseConfig(ZambaBaseModel): <|reserved_special_token_0|> video_loader_config: VideoLoaderConfig output_type: DensePoseOutputEnum render_output: bool = False embeddings_in_json: bool = False data_dir: Path filepaths: Optional[Path] = None save_dir: Optional[Path] = None cache_dir: Optional[Path] = None weight_download_region: RegionEnum = RegionEnum('us') _validate_cache_dir = validator('cache_dir', allow_reuse=True, always=True )(validate_model_cache_dir) def run_model(self): """Use this configuration to execute DensePose via the DensePoseManager""" if not isinstance(self.output_type, DensePoseOutputEnum): self.output_type = DensePoseOutputEnum(self.output_type) if self.output_type == DensePoseOutputEnum.segmentation.value: model = MODELS['animals'] elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value: model = MODELS['chimps'] else: raise Exception(f'invalid {self.output_type}') output_dir = Path(os.getcwd() ) if self.save_dir is None else self.save_dir dpm = DensePoseManager(model, model_cache_dir=self.cache_dir, download_region=self.weight_download_region) for fp in tqdm(self.filepaths.filepath, desc='Videos'): fp = Path(fp) vid_arr, labels = dpm.predict_video(fp, video_loader_config= self.video_loader_config) output_path = output_dir / f'{fp.stem}_denspose_labels.json' dpm.serialize_video_output(labels, filename=output_path, write_embeddings=self.embeddings_in_json) if self.render_output: output_path = (output_dir / f"{fp.stem}_denspose_video{''.join(fp.suffixes)}") visualized_video = dpm.visualize_video(vid_arr, labels, output_path=output_path, fps=self.video_loader_config.fps) if self.output_type == DensePoseOutputEnum.chimp_anatomy.value: output_path = output_dir / f'{fp.stem}_denspose_anatomy.csv' dpm.anatomize_video(visualized_video, labels, output_path= output_path, fps=self.video_loader_config.fps) _get_filepaths = root_validator(allow_reuse=True, pre=False, skip_on_failure=True)(get_filepaths) @root_validator(skip_on_failure=True) def validate_files(cls, values): if isinstance(values['filepaths'], pd.DataFrame): files_df = values['filepaths'] else: files_df = pd.DataFrame(pd.read_csv(values['filepaths'])) if 'filepath' not in files_df.columns: raise ValueError( f"{values['filepaths']} must contain a `filepath` column.") duplicated = files_df.filepath.duplicated() if duplicated.sum() > 0: logger.warning( f'Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.' ) files_df = files_df[['filepath']].drop_duplicates() values['filepaths'] = check_files_exist_and_load(df=files_df, data_dir=values['data_dir'], skip_load_validation=True) return values <|reserved_special_token_1|> <|reserved_special_token_0|> class DensePoseConfig(ZambaBaseModel): """Configuration for running dense pose on videos. Args: video_loader_config (VideoLoaderConfig): Configuration for loading videos output_type (str): one of DensePoseOutputEnum (currently "segmentation" or "chimp_anatomy"). render_output (bool): Whether to save a version of the video with the output overlaid on top. Defaults to False. embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the DensePose result. Setting to True can result in large json files. Defaults to False. data_dir (Path): Where to find the files listed in filepaths (or where to look if filepaths is not provided). filepaths (Path, optional): Path to a CSV file with a list of filepaths to process. save_dir (Path, optional): Directory for where to save the output files; defaults to os.getcwd(). cache_dir (Path, optional): Path for downloading and saving model weights. Defaults to env var `MODEL_CACHE_DIR` or the OS app cache dir. weight_download_region (RegionEnum, optional): region where to download weights; should be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'. """ video_loader_config: VideoLoaderConfig output_type: DensePoseOutputEnum render_output: bool = False embeddings_in_json: bool = False data_dir: Path filepaths: Optional[Path] = None save_dir: Optional[Path] = None cache_dir: Optional[Path] = None weight_download_region: RegionEnum = RegionEnum('us') _validate_cache_dir = validator('cache_dir', allow_reuse=True, always=True )(validate_model_cache_dir) def run_model(self): """Use this configuration to execute DensePose via the DensePoseManager""" if not isinstance(self.output_type, DensePoseOutputEnum): self.output_type = DensePoseOutputEnum(self.output_type) if self.output_type == DensePoseOutputEnum.segmentation.value: model = MODELS['animals'] elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value: model = MODELS['chimps'] else: raise Exception(f'invalid {self.output_type}') output_dir = Path(os.getcwd() ) if self.save_dir is None else self.save_dir dpm = DensePoseManager(model, model_cache_dir=self.cache_dir, download_region=self.weight_download_region) for fp in tqdm(self.filepaths.filepath, desc='Videos'): fp = Path(fp) vid_arr, labels = dpm.predict_video(fp, video_loader_config= self.video_loader_config) output_path = output_dir / f'{fp.stem}_denspose_labels.json' dpm.serialize_video_output(labels, filename=output_path, write_embeddings=self.embeddings_in_json) if self.render_output: output_path = (output_dir / f"{fp.stem}_denspose_video{''.join(fp.suffixes)}") visualized_video = dpm.visualize_video(vid_arr, labels, output_path=output_path, fps=self.video_loader_config.fps) if self.output_type == DensePoseOutputEnum.chimp_anatomy.value: output_path = output_dir / f'{fp.stem}_denspose_anatomy.csv' dpm.anatomize_video(visualized_video, labels, output_path= output_path, fps=self.video_loader_config.fps) _get_filepaths = root_validator(allow_reuse=True, pre=False, skip_on_failure=True)(get_filepaths) @root_validator(skip_on_failure=True) def validate_files(cls, values): if isinstance(values['filepaths'], pd.DataFrame): files_df = values['filepaths'] else: files_df = pd.DataFrame(pd.read_csv(values['filepaths'])) if 'filepath' not in files_df.columns: raise ValueError( f"{values['filepaths']} must contain a `filepath` column.") duplicated = files_df.filepath.duplicated() if duplicated.sum() > 0: logger.warning( f'Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.' ) files_df = files_df[['filepath']].drop_duplicates() values['filepaths'] = check_files_exist_and_load(df=files_df, data_dir=values['data_dir'], skip_load_validation=True) return values <|reserved_special_token_1|> <|reserved_special_token_0|> class DensePoseOutputEnum(Enum): segmentation = 'segmentation' chimp_anatomy = 'chimp_anatomy' class DensePoseConfig(ZambaBaseModel): """Configuration for running dense pose on videos. Args: video_loader_config (VideoLoaderConfig): Configuration for loading videos output_type (str): one of DensePoseOutputEnum (currently "segmentation" or "chimp_anatomy"). render_output (bool): Whether to save a version of the video with the output overlaid on top. Defaults to False. embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the DensePose result. Setting to True can result in large json files. Defaults to False. data_dir (Path): Where to find the files listed in filepaths (or where to look if filepaths is not provided). filepaths (Path, optional): Path to a CSV file with a list of filepaths to process. save_dir (Path, optional): Directory for where to save the output files; defaults to os.getcwd(). cache_dir (Path, optional): Path for downloading and saving model weights. Defaults to env var `MODEL_CACHE_DIR` or the OS app cache dir. weight_download_region (RegionEnum, optional): region where to download weights; should be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'. """ video_loader_config: VideoLoaderConfig output_type: DensePoseOutputEnum render_output: bool = False embeddings_in_json: bool = False data_dir: Path filepaths: Optional[Path] = None save_dir: Optional[Path] = None cache_dir: Optional[Path] = None weight_download_region: RegionEnum = RegionEnum('us') _validate_cache_dir = validator('cache_dir', allow_reuse=True, always=True )(validate_model_cache_dir) def run_model(self): """Use this configuration to execute DensePose via the DensePoseManager""" if not isinstance(self.output_type, DensePoseOutputEnum): self.output_type = DensePoseOutputEnum(self.output_type) if self.output_type == DensePoseOutputEnum.segmentation.value: model = MODELS['animals'] elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value: model = MODELS['chimps'] else: raise Exception(f'invalid {self.output_type}') output_dir = Path(os.getcwd() ) if self.save_dir is None else self.save_dir dpm = DensePoseManager(model, model_cache_dir=self.cache_dir, download_region=self.weight_download_region) for fp in tqdm(self.filepaths.filepath, desc='Videos'): fp = Path(fp) vid_arr, labels = dpm.predict_video(fp, video_loader_config= self.video_loader_config) output_path = output_dir / f'{fp.stem}_denspose_labels.json' dpm.serialize_video_output(labels, filename=output_path, write_embeddings=self.embeddings_in_json) if self.render_output: output_path = (output_dir / f"{fp.stem}_denspose_video{''.join(fp.suffixes)}") visualized_video = dpm.visualize_video(vid_arr, labels, output_path=output_path, fps=self.video_loader_config.fps) if self.output_type == DensePoseOutputEnum.chimp_anatomy.value: output_path = output_dir / f'{fp.stem}_denspose_anatomy.csv' dpm.anatomize_video(visualized_video, labels, output_path= output_path, fps=self.video_loader_config.fps) _get_filepaths = root_validator(allow_reuse=True, pre=False, skip_on_failure=True)(get_filepaths) @root_validator(skip_on_failure=True) def validate_files(cls, values): if isinstance(values['filepaths'], pd.DataFrame): files_df = values['filepaths'] else: files_df = pd.DataFrame(pd.read_csv(values['filepaths'])) if 'filepath' not in files_df.columns: raise ValueError( f"{values['filepaths']} must contain a `filepath` column.") duplicated = files_df.filepath.duplicated() if duplicated.sum() > 0: logger.warning( f'Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.' ) files_df = files_df[['filepath']].drop_duplicates() values['filepaths'] = check_files_exist_and_load(df=files_df, data_dir=values['data_dir'], skip_load_validation=True) return values <|reserved_special_token_1|> from enum import Enum import os from pathlib import Path from typing import Optional from loguru import logger import pandas as pd from pydantic.class_validators import root_validator, validator from tqdm import tqdm from zamba.data.video import VideoLoaderConfig from zamba.models.config import ZambaBaseModel, check_files_exist_and_load, get_filepaths, validate_model_cache_dir from zamba.models.densepose.densepose_manager import MODELS, DensePoseManager from zamba.models.utils import RegionEnum class DensePoseOutputEnum(Enum): segmentation = 'segmentation' chimp_anatomy = 'chimp_anatomy' class DensePoseConfig(ZambaBaseModel): """Configuration for running dense pose on videos. Args: video_loader_config (VideoLoaderConfig): Configuration for loading videos output_type (str): one of DensePoseOutputEnum (currently "segmentation" or "chimp_anatomy"). render_output (bool): Whether to save a version of the video with the output overlaid on top. Defaults to False. embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the DensePose result. Setting to True can result in large json files. Defaults to False. data_dir (Path): Where to find the files listed in filepaths (or where to look if filepaths is not provided). filepaths (Path, optional): Path to a CSV file with a list of filepaths to process. save_dir (Path, optional): Directory for where to save the output files; defaults to os.getcwd(). cache_dir (Path, optional): Path for downloading and saving model weights. Defaults to env var `MODEL_CACHE_DIR` or the OS app cache dir. weight_download_region (RegionEnum, optional): region where to download weights; should be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'. """ video_loader_config: VideoLoaderConfig output_type: DensePoseOutputEnum render_output: bool = False embeddings_in_json: bool = False data_dir: Path filepaths: Optional[Path] = None save_dir: Optional[Path] = None cache_dir: Optional[Path] = None weight_download_region: RegionEnum = RegionEnum('us') _validate_cache_dir = validator('cache_dir', allow_reuse=True, always=True )(validate_model_cache_dir) def run_model(self): """Use this configuration to execute DensePose via the DensePoseManager""" if not isinstance(self.output_type, DensePoseOutputEnum): self.output_type = DensePoseOutputEnum(self.output_type) if self.output_type == DensePoseOutputEnum.segmentation.value: model = MODELS['animals'] elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value: model = MODELS['chimps'] else: raise Exception(f'invalid {self.output_type}') output_dir = Path(os.getcwd() ) if self.save_dir is None else self.save_dir dpm = DensePoseManager(model, model_cache_dir=self.cache_dir, download_region=self.weight_download_region) for fp in tqdm(self.filepaths.filepath, desc='Videos'): fp = Path(fp) vid_arr, labels = dpm.predict_video(fp, video_loader_config= self.video_loader_config) output_path = output_dir / f'{fp.stem}_denspose_labels.json' dpm.serialize_video_output(labels, filename=output_path, write_embeddings=self.embeddings_in_json) if self.render_output: output_path = (output_dir / f"{fp.stem}_denspose_video{''.join(fp.suffixes)}") visualized_video = dpm.visualize_video(vid_arr, labels, output_path=output_path, fps=self.video_loader_config.fps) if self.output_type == DensePoseOutputEnum.chimp_anatomy.value: output_path = output_dir / f'{fp.stem}_denspose_anatomy.csv' dpm.anatomize_video(visualized_video, labels, output_path= output_path, fps=self.video_loader_config.fps) _get_filepaths = root_validator(allow_reuse=True, pre=False, skip_on_failure=True)(get_filepaths) @root_validator(skip_on_failure=True) def validate_files(cls, values): if isinstance(values['filepaths'], pd.DataFrame): files_df = values['filepaths'] else: files_df = pd.DataFrame(pd.read_csv(values['filepaths'])) if 'filepath' not in files_df.columns: raise ValueError( f"{values['filepaths']} must contain a `filepath` column.") duplicated = files_df.filepath.duplicated() if duplicated.sum() > 0: logger.warning( f'Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.' ) files_df = files_df[['filepath']].drop_duplicates() values['filepaths'] = check_files_exist_and_load(df=files_df, data_dir=values['data_dir'], skip_load_validation=True) return values <|reserved_special_token_1|> from enum import Enum import os from pathlib import Path from typing import Optional from loguru import logger import pandas as pd from pydantic.class_validators import root_validator, validator from tqdm import tqdm from zamba.data.video import VideoLoaderConfig from zamba.models.config import ( ZambaBaseModel, check_files_exist_and_load, get_filepaths, validate_model_cache_dir, ) from zamba.models.densepose.densepose_manager import MODELS, DensePoseManager from zamba.models.utils import RegionEnum class DensePoseOutputEnum(Enum): segmentation = "segmentation" chimp_anatomy = "chimp_anatomy" class DensePoseConfig(ZambaBaseModel): """Configuration for running dense pose on videos. Args: video_loader_config (VideoLoaderConfig): Configuration for loading videos output_type (str): one of DensePoseOutputEnum (currently "segmentation" or "chimp_anatomy"). render_output (bool): Whether to save a version of the video with the output overlaid on top. Defaults to False. embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the DensePose result. Setting to True can result in large json files. Defaults to False. data_dir (Path): Where to find the files listed in filepaths (or where to look if filepaths is not provided). filepaths (Path, optional): Path to a CSV file with a list of filepaths to process. save_dir (Path, optional): Directory for where to save the output files; defaults to os.getcwd(). cache_dir (Path, optional): Path for downloading and saving model weights. Defaults to env var `MODEL_CACHE_DIR` or the OS app cache dir. weight_download_region (RegionEnum, optional): region where to download weights; should be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'. """ video_loader_config: VideoLoaderConfig output_type: DensePoseOutputEnum render_output: bool = False embeddings_in_json: bool = False data_dir: Path filepaths: Optional[Path] = None save_dir: Optional[Path] = None cache_dir: Optional[Path] = None weight_download_region: RegionEnum = RegionEnum("us") _validate_cache_dir = validator("cache_dir", allow_reuse=True, always=True)( validate_model_cache_dir ) def run_model(self): """Use this configuration to execute DensePose via the DensePoseManager""" if not isinstance(self.output_type, DensePoseOutputEnum): self.output_type = DensePoseOutputEnum(self.output_type) if self.output_type == DensePoseOutputEnum.segmentation.value: model = MODELS["animals"] elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value: model = MODELS["chimps"] else: raise Exception(f"invalid {self.output_type}") output_dir = Path(os.getcwd()) if self.save_dir is None else self.save_dir dpm = DensePoseManager( model, model_cache_dir=self.cache_dir, download_region=self.weight_download_region ) for fp in tqdm(self.filepaths.filepath, desc="Videos"): fp = Path(fp) vid_arr, labels = dpm.predict_video(fp, video_loader_config=self.video_loader_config) # serialize the labels generated by densepose to json output_path = output_dir / f"{fp.stem}_denspose_labels.json" dpm.serialize_video_output( labels, filename=output_path, write_embeddings=self.embeddings_in_json ) # re-render the video with the densepose labels visualized on top of the video if self.render_output: output_path = output_dir / f"{fp.stem}_denspose_video{''.join(fp.suffixes)}" visualized_video = dpm.visualize_video( vid_arr, labels, output_path=output_path, fps=self.video_loader_config.fps ) # write out the anatomy present in each frame to a csv for later analysis if self.output_type == DensePoseOutputEnum.chimp_anatomy.value: output_path = output_dir / f"{fp.stem}_denspose_anatomy.csv" dpm.anatomize_video( visualized_video, labels, output_path=output_path, fps=self.video_loader_config.fps, ) _get_filepaths = root_validator(allow_reuse=True, pre=False, skip_on_failure=True)( get_filepaths ) @root_validator(skip_on_failure=True) def validate_files(cls, values): # if globbing from data directory, already have valid dataframe if isinstance(values["filepaths"], pd.DataFrame): files_df = values["filepaths"] else: # make into dataframe even if only one column for clearer indexing files_df = pd.DataFrame(pd.read_csv(values["filepaths"])) if "filepath" not in files_df.columns: raise ValueError(f"{values['filepaths']} must contain a `filepath` column.") # can only contain one row per filepath duplicated = files_df.filepath.duplicated() if duplicated.sum() > 0: logger.warning( f"Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video." ) files_df = files_df[["filepath"]].drop_duplicates() values["filepaths"] = check_files_exist_and_load( df=files_df, data_dir=values["data_dir"], skip_load_validation=True, ) return values
flexible
{ "blob_id": "9d8d8e97f7d3dbbb47dc6d4105f0f1ffb358fd2f", "index": 6977, "step-1": "<mask token>\n\n\nclass DensePoseConfig(ZambaBaseModel):\n <mask token>\n video_loader_config: VideoLoaderConfig\n output_type: DensePoseOutputEnum\n render_output: bool = False\n embeddings_in_json: bool = False\n data_dir: Path\n filepaths: Optional[Path] = None\n save_dir: Optional[Path] = None\n cache_dir: Optional[Path] = None\n weight_download_region: RegionEnum = RegionEnum('us')\n _validate_cache_dir = validator('cache_dir', allow_reuse=True, always=True\n )(validate_model_cache_dir)\n\n def run_model(self):\n \"\"\"Use this configuration to execute DensePose via the DensePoseManager\"\"\"\n if not isinstance(self.output_type, DensePoseOutputEnum):\n self.output_type = DensePoseOutputEnum(self.output_type)\n if self.output_type == DensePoseOutputEnum.segmentation.value:\n model = MODELS['animals']\n elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n model = MODELS['chimps']\n else:\n raise Exception(f'invalid {self.output_type}')\n output_dir = Path(os.getcwd()\n ) if self.save_dir is None else self.save_dir\n dpm = DensePoseManager(model, model_cache_dir=self.cache_dir,\n download_region=self.weight_download_region)\n for fp in tqdm(self.filepaths.filepath, desc='Videos'):\n fp = Path(fp)\n vid_arr, labels = dpm.predict_video(fp, video_loader_config=\n self.video_loader_config)\n output_path = output_dir / f'{fp.stem}_denspose_labels.json'\n dpm.serialize_video_output(labels, filename=output_path,\n write_embeddings=self.embeddings_in_json)\n if self.render_output:\n output_path = (output_dir /\n f\"{fp.stem}_denspose_video{''.join(fp.suffixes)}\")\n visualized_video = dpm.visualize_video(vid_arr, labels,\n output_path=output_path, fps=self.video_loader_config.fps)\n if self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n output_path = output_dir / f'{fp.stem}_denspose_anatomy.csv'\n dpm.anatomize_video(visualized_video, labels, output_path=\n output_path, fps=self.video_loader_config.fps)\n _get_filepaths = root_validator(allow_reuse=True, pre=False,\n skip_on_failure=True)(get_filepaths)\n\n @root_validator(skip_on_failure=True)\n def validate_files(cls, values):\n if isinstance(values['filepaths'], pd.DataFrame):\n files_df = values['filepaths']\n else:\n files_df = pd.DataFrame(pd.read_csv(values['filepaths']))\n if 'filepath' not in files_df.columns:\n raise ValueError(\n f\"{values['filepaths']} must contain a `filepath` column.\")\n duplicated = files_df.filepath.duplicated()\n if duplicated.sum() > 0:\n logger.warning(\n f'Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.'\n )\n files_df = files_df[['filepath']].drop_duplicates()\n values['filepaths'] = check_files_exist_and_load(df=files_df,\n data_dir=values['data_dir'], skip_load_validation=True)\n return values\n", "step-2": "<mask token>\n\n\nclass DensePoseConfig(ZambaBaseModel):\n \"\"\"Configuration for running dense pose on videos.\n\n Args:\n video_loader_config (VideoLoaderConfig): Configuration for loading videos\n output_type (str): one of DensePoseOutputEnum (currently \"segmentation\" or \"chimp_anatomy\").\n render_output (bool): Whether to save a version of the video with the output overlaid on top.\n Defaults to False.\n embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the\n DensePose result. Setting to True can result in large json files. Defaults to False.\n data_dir (Path): Where to find the files listed in filepaths (or where to look if\n filepaths is not provided).\n filepaths (Path, optional): Path to a CSV file with a list of filepaths to process.\n save_dir (Path, optional): Directory for where to save the output files;\n defaults to os.getcwd().\n cache_dir (Path, optional): Path for downloading and saving model weights. Defaults\n to env var `MODEL_CACHE_DIR` or the OS app cache dir.\n weight_download_region (RegionEnum, optional): region where to download weights; should\n be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'.\n \"\"\"\n video_loader_config: VideoLoaderConfig\n output_type: DensePoseOutputEnum\n render_output: bool = False\n embeddings_in_json: bool = False\n data_dir: Path\n filepaths: Optional[Path] = None\n save_dir: Optional[Path] = None\n cache_dir: Optional[Path] = None\n weight_download_region: RegionEnum = RegionEnum('us')\n _validate_cache_dir = validator('cache_dir', allow_reuse=True, always=True\n )(validate_model_cache_dir)\n\n def run_model(self):\n \"\"\"Use this configuration to execute DensePose via the DensePoseManager\"\"\"\n if not isinstance(self.output_type, DensePoseOutputEnum):\n self.output_type = DensePoseOutputEnum(self.output_type)\n if self.output_type == DensePoseOutputEnum.segmentation.value:\n model = MODELS['animals']\n elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n model = MODELS['chimps']\n else:\n raise Exception(f'invalid {self.output_type}')\n output_dir = Path(os.getcwd()\n ) if self.save_dir is None else self.save_dir\n dpm = DensePoseManager(model, model_cache_dir=self.cache_dir,\n download_region=self.weight_download_region)\n for fp in tqdm(self.filepaths.filepath, desc='Videos'):\n fp = Path(fp)\n vid_arr, labels = dpm.predict_video(fp, video_loader_config=\n self.video_loader_config)\n output_path = output_dir / f'{fp.stem}_denspose_labels.json'\n dpm.serialize_video_output(labels, filename=output_path,\n write_embeddings=self.embeddings_in_json)\n if self.render_output:\n output_path = (output_dir /\n f\"{fp.stem}_denspose_video{''.join(fp.suffixes)}\")\n visualized_video = dpm.visualize_video(vid_arr, labels,\n output_path=output_path, fps=self.video_loader_config.fps)\n if self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n output_path = output_dir / f'{fp.stem}_denspose_anatomy.csv'\n dpm.anatomize_video(visualized_video, labels, output_path=\n output_path, fps=self.video_loader_config.fps)\n _get_filepaths = root_validator(allow_reuse=True, pre=False,\n skip_on_failure=True)(get_filepaths)\n\n @root_validator(skip_on_failure=True)\n def validate_files(cls, values):\n if isinstance(values['filepaths'], pd.DataFrame):\n files_df = values['filepaths']\n else:\n files_df = pd.DataFrame(pd.read_csv(values['filepaths']))\n if 'filepath' not in files_df.columns:\n raise ValueError(\n f\"{values['filepaths']} must contain a `filepath` column.\")\n duplicated = files_df.filepath.duplicated()\n if duplicated.sum() > 0:\n logger.warning(\n f'Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.'\n )\n files_df = files_df[['filepath']].drop_duplicates()\n values['filepaths'] = check_files_exist_and_load(df=files_df,\n data_dir=values['data_dir'], skip_load_validation=True)\n return values\n", "step-3": "<mask token>\n\n\nclass DensePoseOutputEnum(Enum):\n segmentation = 'segmentation'\n chimp_anatomy = 'chimp_anatomy'\n\n\nclass DensePoseConfig(ZambaBaseModel):\n \"\"\"Configuration for running dense pose on videos.\n\n Args:\n video_loader_config (VideoLoaderConfig): Configuration for loading videos\n output_type (str): one of DensePoseOutputEnum (currently \"segmentation\" or \"chimp_anatomy\").\n render_output (bool): Whether to save a version of the video with the output overlaid on top.\n Defaults to False.\n embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the\n DensePose result. Setting to True can result in large json files. Defaults to False.\n data_dir (Path): Where to find the files listed in filepaths (or where to look if\n filepaths is not provided).\n filepaths (Path, optional): Path to a CSV file with a list of filepaths to process.\n save_dir (Path, optional): Directory for where to save the output files;\n defaults to os.getcwd().\n cache_dir (Path, optional): Path for downloading and saving model weights. Defaults\n to env var `MODEL_CACHE_DIR` or the OS app cache dir.\n weight_download_region (RegionEnum, optional): region where to download weights; should\n be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'.\n \"\"\"\n video_loader_config: VideoLoaderConfig\n output_type: DensePoseOutputEnum\n render_output: bool = False\n embeddings_in_json: bool = False\n data_dir: Path\n filepaths: Optional[Path] = None\n save_dir: Optional[Path] = None\n cache_dir: Optional[Path] = None\n weight_download_region: RegionEnum = RegionEnum('us')\n _validate_cache_dir = validator('cache_dir', allow_reuse=True, always=True\n )(validate_model_cache_dir)\n\n def run_model(self):\n \"\"\"Use this configuration to execute DensePose via the DensePoseManager\"\"\"\n if not isinstance(self.output_type, DensePoseOutputEnum):\n self.output_type = DensePoseOutputEnum(self.output_type)\n if self.output_type == DensePoseOutputEnum.segmentation.value:\n model = MODELS['animals']\n elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n model = MODELS['chimps']\n else:\n raise Exception(f'invalid {self.output_type}')\n output_dir = Path(os.getcwd()\n ) if self.save_dir is None else self.save_dir\n dpm = DensePoseManager(model, model_cache_dir=self.cache_dir,\n download_region=self.weight_download_region)\n for fp in tqdm(self.filepaths.filepath, desc='Videos'):\n fp = Path(fp)\n vid_arr, labels = dpm.predict_video(fp, video_loader_config=\n self.video_loader_config)\n output_path = output_dir / f'{fp.stem}_denspose_labels.json'\n dpm.serialize_video_output(labels, filename=output_path,\n write_embeddings=self.embeddings_in_json)\n if self.render_output:\n output_path = (output_dir /\n f\"{fp.stem}_denspose_video{''.join(fp.suffixes)}\")\n visualized_video = dpm.visualize_video(vid_arr, labels,\n output_path=output_path, fps=self.video_loader_config.fps)\n if self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n output_path = output_dir / f'{fp.stem}_denspose_anatomy.csv'\n dpm.anatomize_video(visualized_video, labels, output_path=\n output_path, fps=self.video_loader_config.fps)\n _get_filepaths = root_validator(allow_reuse=True, pre=False,\n skip_on_failure=True)(get_filepaths)\n\n @root_validator(skip_on_failure=True)\n def validate_files(cls, values):\n if isinstance(values['filepaths'], pd.DataFrame):\n files_df = values['filepaths']\n else:\n files_df = pd.DataFrame(pd.read_csv(values['filepaths']))\n if 'filepath' not in files_df.columns:\n raise ValueError(\n f\"{values['filepaths']} must contain a `filepath` column.\")\n duplicated = files_df.filepath.duplicated()\n if duplicated.sum() > 0:\n logger.warning(\n f'Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.'\n )\n files_df = files_df[['filepath']].drop_duplicates()\n values['filepaths'] = check_files_exist_and_load(df=files_df,\n data_dir=values['data_dir'], skip_load_validation=True)\n return values\n", "step-4": "from enum import Enum\nimport os\nfrom pathlib import Path\nfrom typing import Optional\nfrom loguru import logger\nimport pandas as pd\nfrom pydantic.class_validators import root_validator, validator\nfrom tqdm import tqdm\nfrom zamba.data.video import VideoLoaderConfig\nfrom zamba.models.config import ZambaBaseModel, check_files_exist_and_load, get_filepaths, validate_model_cache_dir\nfrom zamba.models.densepose.densepose_manager import MODELS, DensePoseManager\nfrom zamba.models.utils import RegionEnum\n\n\nclass DensePoseOutputEnum(Enum):\n segmentation = 'segmentation'\n chimp_anatomy = 'chimp_anatomy'\n\n\nclass DensePoseConfig(ZambaBaseModel):\n \"\"\"Configuration for running dense pose on videos.\n\n Args:\n video_loader_config (VideoLoaderConfig): Configuration for loading videos\n output_type (str): one of DensePoseOutputEnum (currently \"segmentation\" or \"chimp_anatomy\").\n render_output (bool): Whether to save a version of the video with the output overlaid on top.\n Defaults to False.\n embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the\n DensePose result. Setting to True can result in large json files. Defaults to False.\n data_dir (Path): Where to find the files listed in filepaths (or where to look if\n filepaths is not provided).\n filepaths (Path, optional): Path to a CSV file with a list of filepaths to process.\n save_dir (Path, optional): Directory for where to save the output files;\n defaults to os.getcwd().\n cache_dir (Path, optional): Path for downloading and saving model weights. Defaults\n to env var `MODEL_CACHE_DIR` or the OS app cache dir.\n weight_download_region (RegionEnum, optional): region where to download weights; should\n be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'.\n \"\"\"\n video_loader_config: VideoLoaderConfig\n output_type: DensePoseOutputEnum\n render_output: bool = False\n embeddings_in_json: bool = False\n data_dir: Path\n filepaths: Optional[Path] = None\n save_dir: Optional[Path] = None\n cache_dir: Optional[Path] = None\n weight_download_region: RegionEnum = RegionEnum('us')\n _validate_cache_dir = validator('cache_dir', allow_reuse=True, always=True\n )(validate_model_cache_dir)\n\n def run_model(self):\n \"\"\"Use this configuration to execute DensePose via the DensePoseManager\"\"\"\n if not isinstance(self.output_type, DensePoseOutputEnum):\n self.output_type = DensePoseOutputEnum(self.output_type)\n if self.output_type == DensePoseOutputEnum.segmentation.value:\n model = MODELS['animals']\n elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n model = MODELS['chimps']\n else:\n raise Exception(f'invalid {self.output_type}')\n output_dir = Path(os.getcwd()\n ) if self.save_dir is None else self.save_dir\n dpm = DensePoseManager(model, model_cache_dir=self.cache_dir,\n download_region=self.weight_download_region)\n for fp in tqdm(self.filepaths.filepath, desc='Videos'):\n fp = Path(fp)\n vid_arr, labels = dpm.predict_video(fp, video_loader_config=\n self.video_loader_config)\n output_path = output_dir / f'{fp.stem}_denspose_labels.json'\n dpm.serialize_video_output(labels, filename=output_path,\n write_embeddings=self.embeddings_in_json)\n if self.render_output:\n output_path = (output_dir /\n f\"{fp.stem}_denspose_video{''.join(fp.suffixes)}\")\n visualized_video = dpm.visualize_video(vid_arr, labels,\n output_path=output_path, fps=self.video_loader_config.fps)\n if self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n output_path = output_dir / f'{fp.stem}_denspose_anatomy.csv'\n dpm.anatomize_video(visualized_video, labels, output_path=\n output_path, fps=self.video_loader_config.fps)\n _get_filepaths = root_validator(allow_reuse=True, pre=False,\n skip_on_failure=True)(get_filepaths)\n\n @root_validator(skip_on_failure=True)\n def validate_files(cls, values):\n if isinstance(values['filepaths'], pd.DataFrame):\n files_df = values['filepaths']\n else:\n files_df = pd.DataFrame(pd.read_csv(values['filepaths']))\n if 'filepath' not in files_df.columns:\n raise ValueError(\n f\"{values['filepaths']} must contain a `filepath` column.\")\n duplicated = files_df.filepath.duplicated()\n if duplicated.sum() > 0:\n logger.warning(\n f'Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.'\n )\n files_df = files_df[['filepath']].drop_duplicates()\n values['filepaths'] = check_files_exist_and_load(df=files_df,\n data_dir=values['data_dir'], skip_load_validation=True)\n return values\n", "step-5": "from enum import Enum\nimport os\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom loguru import logger\nimport pandas as pd\nfrom pydantic.class_validators import root_validator, validator\nfrom tqdm import tqdm\n\nfrom zamba.data.video import VideoLoaderConfig\nfrom zamba.models.config import (\n ZambaBaseModel,\n check_files_exist_and_load,\n get_filepaths,\n validate_model_cache_dir,\n)\nfrom zamba.models.densepose.densepose_manager import MODELS, DensePoseManager\nfrom zamba.models.utils import RegionEnum\n\n\nclass DensePoseOutputEnum(Enum):\n segmentation = \"segmentation\"\n chimp_anatomy = \"chimp_anatomy\"\n\n\nclass DensePoseConfig(ZambaBaseModel):\n \"\"\"Configuration for running dense pose on videos.\n\n Args:\n video_loader_config (VideoLoaderConfig): Configuration for loading videos\n output_type (str): one of DensePoseOutputEnum (currently \"segmentation\" or \"chimp_anatomy\").\n render_output (bool): Whether to save a version of the video with the output overlaid on top.\n Defaults to False.\n embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the\n DensePose result. Setting to True can result in large json files. Defaults to False.\n data_dir (Path): Where to find the files listed in filepaths (or where to look if\n filepaths is not provided).\n filepaths (Path, optional): Path to a CSV file with a list of filepaths to process.\n save_dir (Path, optional): Directory for where to save the output files;\n defaults to os.getcwd().\n cache_dir (Path, optional): Path for downloading and saving model weights. Defaults\n to env var `MODEL_CACHE_DIR` or the OS app cache dir.\n weight_download_region (RegionEnum, optional): region where to download weights; should\n be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'.\n \"\"\"\n\n video_loader_config: VideoLoaderConfig\n output_type: DensePoseOutputEnum\n render_output: bool = False\n embeddings_in_json: bool = False\n data_dir: Path\n filepaths: Optional[Path] = None\n save_dir: Optional[Path] = None\n cache_dir: Optional[Path] = None\n weight_download_region: RegionEnum = RegionEnum(\"us\")\n\n _validate_cache_dir = validator(\"cache_dir\", allow_reuse=True, always=True)(\n validate_model_cache_dir\n )\n\n def run_model(self):\n \"\"\"Use this configuration to execute DensePose via the DensePoseManager\"\"\"\n if not isinstance(self.output_type, DensePoseOutputEnum):\n self.output_type = DensePoseOutputEnum(self.output_type)\n\n if self.output_type == DensePoseOutputEnum.segmentation.value:\n model = MODELS[\"animals\"]\n elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n model = MODELS[\"chimps\"]\n else:\n raise Exception(f\"invalid {self.output_type}\")\n\n output_dir = Path(os.getcwd()) if self.save_dir is None else self.save_dir\n\n dpm = DensePoseManager(\n model, model_cache_dir=self.cache_dir, download_region=self.weight_download_region\n )\n\n for fp in tqdm(self.filepaths.filepath, desc=\"Videos\"):\n fp = Path(fp)\n\n vid_arr, labels = dpm.predict_video(fp, video_loader_config=self.video_loader_config)\n\n # serialize the labels generated by densepose to json\n output_path = output_dir / f\"{fp.stem}_denspose_labels.json\"\n dpm.serialize_video_output(\n labels, filename=output_path, write_embeddings=self.embeddings_in_json\n )\n\n # re-render the video with the densepose labels visualized on top of the video\n if self.render_output:\n output_path = output_dir / f\"{fp.stem}_denspose_video{''.join(fp.suffixes)}\"\n visualized_video = dpm.visualize_video(\n vid_arr, labels, output_path=output_path, fps=self.video_loader_config.fps\n )\n\n # write out the anatomy present in each frame to a csv for later analysis\n if self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n output_path = output_dir / f\"{fp.stem}_denspose_anatomy.csv\"\n dpm.anatomize_video(\n visualized_video,\n labels,\n output_path=output_path,\n fps=self.video_loader_config.fps,\n )\n\n _get_filepaths = root_validator(allow_reuse=True, pre=False, skip_on_failure=True)(\n get_filepaths\n )\n\n @root_validator(skip_on_failure=True)\n def validate_files(cls, values):\n # if globbing from data directory, already have valid dataframe\n if isinstance(values[\"filepaths\"], pd.DataFrame):\n files_df = values[\"filepaths\"]\n else:\n # make into dataframe even if only one column for clearer indexing\n files_df = pd.DataFrame(pd.read_csv(values[\"filepaths\"]))\n\n if \"filepath\" not in files_df.columns:\n raise ValueError(f\"{values['filepaths']} must contain a `filepath` column.\")\n\n # can only contain one row per filepath\n duplicated = files_df.filepath.duplicated()\n if duplicated.sum() > 0:\n logger.warning(\n f\"Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.\"\n )\n files_df = files_df[[\"filepath\"]].drop_duplicates()\n\n values[\"filepaths\"] = check_files_exist_and_load(\n df=files_df,\n data_dir=values[\"data_dir\"],\n skip_load_validation=True,\n )\n return values\n", "step-ids": [ 4, 5, 7, 8, 9 ] }
[ 4, 5, 7, 8, 9 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> def tetrahedron_filled(tetrahedrons, water): var = 0 br = 0 tetrahedrons.sort() for numbers in tetrahedrons: v = tetrahedrons[var] ** 3 * 2 ** 0.5 / 12000 if v < water: br = br + 1 water = water - v var = var + 1 print(br) <|reserved_special_token_0|> <|reserved_special_token_1|> def tetrahedron_filled(tetrahedrons, water): var = 0 br = 0 tetrahedrons.sort() for numbers in tetrahedrons: v = tetrahedrons[var] ** 3 * 2 ** 0.5 / 12000 if v < water: br = br + 1 water = water - v var = var + 1 print(br) print(tetrahedron_filled([1000, 10], 10)) <|reserved_special_token_1|> def tetrahedron_filled(tetrahedrons, water): var=0 br=0 tetrahedrons.sort() for numbers in tetrahedrons: v=(tetrahedrons[var]**3*(2**0.5))/12000 if v<water: br=br+1 water=water-v var=var+1 print (br) print (tetrahedron_filled([1000,10],10))
flexible
{ "blob_id": "c926e16ef2daa5978b6c71e7794721d320bb9b1e", "index": 1224, "step-1": "<mask token>\n", "step-2": "def tetrahedron_filled(tetrahedrons, water):\n var = 0\n br = 0\n tetrahedrons.sort()\n for numbers in tetrahedrons:\n v = tetrahedrons[var] ** 3 * 2 ** 0.5 / 12000\n if v < water:\n br = br + 1\n water = water - v\n var = var + 1\n print(br)\n\n\n<mask token>\n", "step-3": "def tetrahedron_filled(tetrahedrons, water):\n var = 0\n br = 0\n tetrahedrons.sort()\n for numbers in tetrahedrons:\n v = tetrahedrons[var] ** 3 * 2 ** 0.5 / 12000\n if v < water:\n br = br + 1\n water = water - v\n var = var + 1\n print(br)\n\n\nprint(tetrahedron_filled([1000, 10], 10))\n", "step-4": "def tetrahedron_filled(tetrahedrons, water):\n\tvar=0\n\tbr=0\n\ttetrahedrons.sort()\n\tfor numbers in tetrahedrons:\n\t\tv=(tetrahedrons[var]**3*(2**0.5))/12000\n\t\tif v<water:\n\t\t\tbr=br+1\n\t\t\twater=water-v\n\t\tvar=var+1\n\tprint (br)\n\n\nprint (tetrahedron_filled([1000,10],10))", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class InvoiceServiceTestCase(TestCase): <|reserved_special_token_0|> def test_create_invoice(self): invoice = self.invoice_service.create_invoice(amount=12.1, status= InvoiceStatusChoices.OVERDUE, due_date=date(2019, 4, 1), debtor =self.debtor_1) self.assertEqual(invoice.amount, 12.1) self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE) self.assertEqual(invoice.due_date, date(2019, 4, 1)) self.assertEqual(invoice.debtor, self.debtor_1) def test_update_invoice(self): updated_invoice = self.invoice_service.update_invoice(instance=self .invoice_1, status=InvoiceStatusChoices.PAID, random_attr='foo') self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID) self.assertFalse(hasattr(updated_invoice, 'random_attr')) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class InvoiceServiceTestCase(TestCase): def setUp(self) ->None: self.invoice_service = InvoiceService() self.debtor_1 = mommy.make(Debtor) self.invoice_1 = mommy.make(Invoice, debtor=self.debtor_1) def test_create_invoice(self): invoice = self.invoice_service.create_invoice(amount=12.1, status= InvoiceStatusChoices.OVERDUE, due_date=date(2019, 4, 1), debtor =self.debtor_1) self.assertEqual(invoice.amount, 12.1) self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE) self.assertEqual(invoice.due_date, date(2019, 4, 1)) self.assertEqual(invoice.debtor, self.debtor_1) def test_update_invoice(self): updated_invoice = self.invoice_service.update_invoice(instance=self .invoice_1, status=InvoiceStatusChoices.PAID, random_attr='foo') self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID) self.assertFalse(hasattr(updated_invoice, 'random_attr')) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class InvoiceServiceTestCase(TestCase): def setUp(self) ->None: self.invoice_service = InvoiceService() self.debtor_1 = mommy.make(Debtor) self.invoice_1 = mommy.make(Invoice, debtor=self.debtor_1) def test_create_invoice(self): invoice = self.invoice_service.create_invoice(amount=12.1, status= InvoiceStatusChoices.OVERDUE, due_date=date(2019, 4, 1), debtor =self.debtor_1) self.assertEqual(invoice.amount, 12.1) self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE) self.assertEqual(invoice.due_date, date(2019, 4, 1)) self.assertEqual(invoice.debtor, self.debtor_1) def test_update_invoice(self): updated_invoice = self.invoice_service.update_invoice(instance=self .invoice_1, status=InvoiceStatusChoices.PAID, random_attr='foo') self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID) self.assertFalse(hasattr(updated_invoice, 'random_attr')) def test_delete_invoice(self): self.invoice_service.delete_invoice(instance=self.invoice_1) self.assertFalse(Invoice.objects.all().exists()) <|reserved_special_token_1|> from datetime import date from django.test import TestCase from model_mommy import mommy from apps.debtors.models import Debtor from apps.invoices.models import Invoice, InvoiceStatusChoices from apps.invoices.services import InvoiceService class InvoiceServiceTestCase(TestCase): def setUp(self) ->None: self.invoice_service = InvoiceService() self.debtor_1 = mommy.make(Debtor) self.invoice_1 = mommy.make(Invoice, debtor=self.debtor_1) def test_create_invoice(self): invoice = self.invoice_service.create_invoice(amount=12.1, status= InvoiceStatusChoices.OVERDUE, due_date=date(2019, 4, 1), debtor =self.debtor_1) self.assertEqual(invoice.amount, 12.1) self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE) self.assertEqual(invoice.due_date, date(2019, 4, 1)) self.assertEqual(invoice.debtor, self.debtor_1) def test_update_invoice(self): updated_invoice = self.invoice_service.update_invoice(instance=self .invoice_1, status=InvoiceStatusChoices.PAID, random_attr='foo') self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID) self.assertFalse(hasattr(updated_invoice, 'random_attr')) def test_delete_invoice(self): self.invoice_service.delete_invoice(instance=self.invoice_1) self.assertFalse(Invoice.objects.all().exists())
flexible
{ "blob_id": "5f77e93d63c696363c30f019019acd22c694308b", "index": 4529, "step-1": "<mask token>\n\n\nclass InvoiceServiceTestCase(TestCase):\n <mask token>\n\n def test_create_invoice(self):\n invoice = self.invoice_service.create_invoice(amount=12.1, status=\n InvoiceStatusChoices.OVERDUE, due_date=date(2019, 4, 1), debtor\n =self.debtor_1)\n self.assertEqual(invoice.amount, 12.1)\n self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE)\n self.assertEqual(invoice.due_date, date(2019, 4, 1))\n self.assertEqual(invoice.debtor, self.debtor_1)\n\n def test_update_invoice(self):\n updated_invoice = self.invoice_service.update_invoice(instance=self\n .invoice_1, status=InvoiceStatusChoices.PAID, random_attr='foo')\n self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID)\n self.assertFalse(hasattr(updated_invoice, 'random_attr'))\n <mask token>\n", "step-2": "<mask token>\n\n\nclass InvoiceServiceTestCase(TestCase):\n\n def setUp(self) ->None:\n self.invoice_service = InvoiceService()\n self.debtor_1 = mommy.make(Debtor)\n self.invoice_1 = mommy.make(Invoice, debtor=self.debtor_1)\n\n def test_create_invoice(self):\n invoice = self.invoice_service.create_invoice(amount=12.1, status=\n InvoiceStatusChoices.OVERDUE, due_date=date(2019, 4, 1), debtor\n =self.debtor_1)\n self.assertEqual(invoice.amount, 12.1)\n self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE)\n self.assertEqual(invoice.due_date, date(2019, 4, 1))\n self.assertEqual(invoice.debtor, self.debtor_1)\n\n def test_update_invoice(self):\n updated_invoice = self.invoice_service.update_invoice(instance=self\n .invoice_1, status=InvoiceStatusChoices.PAID, random_attr='foo')\n self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID)\n self.assertFalse(hasattr(updated_invoice, 'random_attr'))\n <mask token>\n", "step-3": "<mask token>\n\n\nclass InvoiceServiceTestCase(TestCase):\n\n def setUp(self) ->None:\n self.invoice_service = InvoiceService()\n self.debtor_1 = mommy.make(Debtor)\n self.invoice_1 = mommy.make(Invoice, debtor=self.debtor_1)\n\n def test_create_invoice(self):\n invoice = self.invoice_service.create_invoice(amount=12.1, status=\n InvoiceStatusChoices.OVERDUE, due_date=date(2019, 4, 1), debtor\n =self.debtor_1)\n self.assertEqual(invoice.amount, 12.1)\n self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE)\n self.assertEqual(invoice.due_date, date(2019, 4, 1))\n self.assertEqual(invoice.debtor, self.debtor_1)\n\n def test_update_invoice(self):\n updated_invoice = self.invoice_service.update_invoice(instance=self\n .invoice_1, status=InvoiceStatusChoices.PAID, random_attr='foo')\n self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID)\n self.assertFalse(hasattr(updated_invoice, 'random_attr'))\n\n def test_delete_invoice(self):\n self.invoice_service.delete_invoice(instance=self.invoice_1)\n self.assertFalse(Invoice.objects.all().exists())\n", "step-4": "from datetime import date\nfrom django.test import TestCase\nfrom model_mommy import mommy\nfrom apps.debtors.models import Debtor\nfrom apps.invoices.models import Invoice, InvoiceStatusChoices\nfrom apps.invoices.services import InvoiceService\n\n\nclass InvoiceServiceTestCase(TestCase):\n\n def setUp(self) ->None:\n self.invoice_service = InvoiceService()\n self.debtor_1 = mommy.make(Debtor)\n self.invoice_1 = mommy.make(Invoice, debtor=self.debtor_1)\n\n def test_create_invoice(self):\n invoice = self.invoice_service.create_invoice(amount=12.1, status=\n InvoiceStatusChoices.OVERDUE, due_date=date(2019, 4, 1), debtor\n =self.debtor_1)\n self.assertEqual(invoice.amount, 12.1)\n self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE)\n self.assertEqual(invoice.due_date, date(2019, 4, 1))\n self.assertEqual(invoice.debtor, self.debtor_1)\n\n def test_update_invoice(self):\n updated_invoice = self.invoice_service.update_invoice(instance=self\n .invoice_1, status=InvoiceStatusChoices.PAID, random_attr='foo')\n self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID)\n self.assertFalse(hasattr(updated_invoice, 'random_attr'))\n\n def test_delete_invoice(self):\n self.invoice_service.delete_invoice(instance=self.invoice_1)\n self.assertFalse(Invoice.objects.all().exists())\n", "step-5": null, "step-ids": [ 3, 4, 5, 6 ] }
[ 3, 4, 5, 6 ]
#!/usr/bin/env python __author__ = "Maxime Beauchamp" __version__ = "0.1" __date__ = "2020-12-10" __email__ = "maxime.beauchamp@imt-atantique.fr" from graphics_OSSE import * # function to create recursive paths def mk_dir_recursive(dir_path): if os.path.isdir(dir_path): return h, t = os.path.split(dir_path) # head/tail if not os.path.isdir(h): mk_dir_recursive(h) new_path = join_paths(h, t) if not os.path.isdir(new_path): os.mkdir(new_path) type_obs = sys.argv[1] domain = sys.argv[2] workpath = "/users/local/m19beauc/4DVARNN-DinAE_xp/"+domain+"/OSSE/scores_allmethods_nadlag_"+type_obs scratchpath = "/users/local/m19beauc/4DVARNN-DinAE_xp/"+domain+"/OSSE" if not os.path.exists(workpath): mk_dir_recursive(workpath) #else: # shutil.rmtree(workpath) # mk_dir_recursive(workpath) ## parameters if domain=="OSMOSIS": extent = [-19.5,-11.5,45.,55.] indLat = 200 indLon = 160 elif domain=='GULFSTREAM': extent = [-65.,-55.,33.,43.] indLat = 200 indLon = 200 else: extent=[-65.,-55.,30.,40.] indLat = 200 indLon = 200 #lon = lon[:indLon] #lat = lat[:indLat] ## store all data in a list AnDA_nadir_lag_0_file = scratchpath+'/resAnDA_nadir_nadlag_0_'+type_obs+'/saved_path.pickle' FP_GENN_nadir_lag_0_file = scratchpath+'/resIA_nadir_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle' AnDA_nadir_lag_5_file = scratchpath+'/resAnDA_nadir_nadlag_5_'+type_obs+'/saved_path.pickle' FP_GENN_nadir_lag_5_file = scratchpath+'/resIA_nadir_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle' AnDA_nadirswot_lag_0_file = scratchpath+'/resAnDA_nadirswot_nadlag_0'+type_obs+'/saved_path.pickle' FP_GENN_nadirswot_lag_0_file = scratchpath+'/resIA_nadirswot_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle' AnDA_nadirswot_lag_5_file = scratchpath+'/resAnDA_nadirswot_nadlag_5'+type_obs+'/saved_path.pickle' FP_GENN_nadirswot_lag_5_file = scratchpath+'/resIA_nadirswot_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle' # Reload saved AnDA result with open(AnDA_nadir_lag_0_file, 'rb') as handle: AnDA_ssh_1, itrp_dineof = pickle.load(handle) AnDA_ssh_1_nadir_0 = AnDA_ssh_1 itrp_dineof_nadir_0 = itrp_dineof with open(AnDA_nadirswot_lag_0_file, 'rb') as handle: AnDA_ssh_1, itrp_dineof = pickle.load(handle) AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1 itrp_dineof_nadirswot_0 = itrp_dineof with open(AnDA_nadir_lag_5_file, 'rb') as handle: AnDA_ssh_1, itrp_dineof = pickle.load(handle) AnDA_ssh_1_nadir_5 = AnDA_ssh_1 itrp_dineof_nadir_5 = itrp_dineof with open(AnDA_nadirswot_lag_5_file, 'rb') as handle: AnDA_ssh_1, itrp_dineof = pickle.load(handle) AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1 itrp_dineof_nadirswot_5 = itrp_dineof # Reload saved ConvAE and GE-NN results with open(FP_GENN_nadir_lag_0_file, 'rb') as handle: itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9] with open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle: itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9] with open(FP_GENN_nadir_lag_5_file, 'rb') as handle: itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9] with open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle: itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9] ## list of dates lday1 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\ + timedelta(days=60+i),"%Y-%m-%d") for i in range(20) ] lday2 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\ + timedelta(days=140+i),"%Y-%m-%d") for i in range(20) ] lday3 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\ + timedelta(days=220+i),"%Y-%m-%d") for i in range(20) ] lday4 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\ + timedelta(days=300+i),"%Y-%m-%d") for i in range(20) ] lday = np.concatenate([lday1,lday2,lday3,lday4]) lday2 = [ datetime.strptime(lday[i],'%Y-%m-%d') for i in range(len(lday)) ] GT = AnDA_ssh_1_nadir.GT[:,:indLat,:indLon] # list_data (AnDA nadir) list_data = [] list_data.append(GT) list_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:,:indLat,:indLon]) list_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:,:indLat,:indLon]) # arguments for plots (nadir) labels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)']) colors = np.array(['k','','red','blue']) symbols = np.array(['k','','o','o']) lstyle = np.array(['solid','','solid','solid']) lwidth = np.array([2,2,1,1]) # compare shapes and do appropriate downscaling with minimal resolution min_res=1e9 for i in range(len(list_data)): min_res=min(min_res,list_data[i].shape[1]) for i in range(len(list_data)): if list_data[i].shape[1]>min_res: dwscale = int(list_data[i].shape[1]/min_res) list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean) print(list_data[i].shape) dwscale = int(200/min_res) indLon = int(indLon/dwscale) indLat = int(indLat/dwscale) lon = np.arange(extent[0],extent[1],1/(20/dwscale)) lat = np.arange(extent[2],extent[3],1/(20/dwscale)) ## nRMSE time series resfile=workpath+"/TS_AnDA_nadir_nadlag.png" plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False) # list_data (AnDA nadirswot) list_data = [] list_data.append(GT) list_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:,:indLat,:indLon]) list_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:,:indLat,:indLon]) # arguments for plots (nadirswot) labels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)']) colors = np.array(['k','','red','blue']) symbols = np.array(['k','','o','o']) lstyle = np.array(['solid','','solid','solid']) lwidth = np.array([2,2,1,1]) # compare shapes and do appropriate downscaling with minimal resolution min_res=1e9 for i in range(len(list_data)): min_res=min(min_res,list_data[i].shape[1]) for i in range(len(list_data)): if list_data[i].shape[1]>min_res: dwscale = int(list_data[i].shape[1]/min_res) list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean) print(list_data[i].shape) dwscale = int(200/min_res) indLon = int(indLon/dwscale) indLat = int(indLat/dwscale) lon = np.arange(extent[0],extent[1],1/(20/dwscale)) lat = np.arange(extent[2],extent[3],1/(20/dwscale)) ## nRMSE time series resfile=workpath+"/TS_AnDA_nadirswot_nadlag.png" plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False) # list_data (GENN nadir) list_data = [] list_data.append(GT) list_data.append(itrp_FP_GENN_nadir_0[:,:indLat,:indLon]) list_data.append(itrp_FP_GENN_nadir_5[:,:indLat,:indLon]) # arguments for plots (nadir) labels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)']) colors = np.array(['k','','red','blue']) symbols = np.array(['k','','o','o']) lstyle = np.array(['solid','','solid','solid']) lwidth = np.array([2,2,1,1]) # compare shapes and do appropriate downscaling with minimal resolution min_res=1e9 for i in range(len(list_data)): min_res=min(min_res,list_data[i].shape[1]) for i in range(len(list_data)): if list_data[i].shape[1]>min_res: dwscale = int(list_data[i].shape[1]/min_res) list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean) print(list_data[i].shape) dwscale = int(200/min_res) indLon = int(indLon/dwscale) indLat = int(indLat/dwscale) lon = np.arange(extent[0],extent[1],1/(20/dwscale)) lat = np.arange(extent[2],extent[3],1/(20/dwscale)) ## nRMSE time series resfile=workpath+"/TS_GENN_nadir_nadlag.png" plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False) # list_data (GENN nadirswot) list_data = [] list_data.append(GT) list_data.append(itrp_FP_GENN_nadirswot_0[:,:indLat,:indLon]) list_data.append(itrp_FP_GENN_nadirswot_5[:,:indLat,:indLon]) # arguments for plots (nadirswot) labels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)']) colors = np.array(['k','','red','blue']) symbols = np.array(['k','','o','o']) lstyle = np.array(['solid','','solid','solid']) lwidth = np.array([2,2,1,1]) # compare shapes and do appropriate downscaling with minimal resolution min_res=1e9 for i in range(len(list_data)): min_res=min(min_res,list_data[i].shape[1]) for i in range(len(list_data)): if list_data[i].shape[1]>min_res: dwscale = int(list_data[i].shape[1]/min_res) list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean) print(list_data[i].shape) dwscale = int(200/min_res) indLon = int(indLon/dwscale) indLat = int(indLat/dwscale) lon = np.arange(extent[0],extent[1],1/(20/dwscale)) lat = np.arange(extent[2],extent[3],1/(20/dwscale)) ## nRMSE time series resfile=workpath+"/TS_GENN_nadirswot_nadlag.png" plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)
normal
{ "blob_id": "9f4cd9ed8aea03f5908aef4a154d964f0810619b", "index": 9820, "step-1": "<mask token>\n\n\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path)\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path)\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\n\n<mask token>\nif not os.path.exists(workpath):\n mk_dir_recursive(workpath)\nif domain == 'OSMOSIS':\n extent = [-19.5, -11.5, 45.0, 55.0]\n indLat = 200\n indLon = 160\nelif domain == 'GULFSTREAM':\n extent = [-65.0, -55.0, 33.0, 43.0]\n indLat = 200\n indLon = 200\nelse:\n extent = [-65.0, -55.0, 30.0, 40.0]\n indLat = 200\n indLon = 200\n<mask token>\nwith open(AnDA_nadir_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_0 = AnDA_ssh_1\n itrp_dineof_nadir_0 = itrp_dineof\nwith open(AnDA_nadirswot_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1\n itrp_dineof_nadirswot_0 = itrp_dineof\nwith open(AnDA_nadir_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_5 = AnDA_ssh_1\n itrp_dineof_nadir_5 = itrp_dineof\nwith open(AnDA_nadirswot_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1\n itrp_dineof_nadirswot_5 = itrp_dineof\nwith open(FP_GENN_nadir_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9\n ]\nwith open(FP_GENN_nadir_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9\n ]\n<mask token>\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:, :indLat, :indLon])\n<mask token>\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\n<mask token>\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n<mask token>\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:, :indLat, :indLon])\n<mask token>\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\n<mask token>\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n<mask token>\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadir_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadir_5[:, :indLat, :indLon])\n<mask token>\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\n<mask token>\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n<mask token>\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadirswot_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadirswot_5[:, :indLat, :indLon])\n<mask token>\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\n<mask token>\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n", "step-3": "__author__ = 'Maxime Beauchamp'\n__version__ = '0.1'\n__date__ = '2020-12-10'\n__email__ = 'maxime.beauchamp@imt-atantique.fr'\n<mask token>\n\n\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path)\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\n\ntype_obs = sys.argv[1]\ndomain = sys.argv[2]\nworkpath = ('/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain +\n '/OSSE/scores_allmethods_nadlag_' + type_obs)\nscratchpath = '/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain + '/OSSE'\nif not os.path.exists(workpath):\n mk_dir_recursive(workpath)\nif domain == 'OSMOSIS':\n extent = [-19.5, -11.5, 45.0, 55.0]\n indLat = 200\n indLon = 160\nelif domain == 'GULFSTREAM':\n extent = [-65.0, -55.0, 33.0, 43.0]\n indLat = 200\n indLon = 200\nelse:\n extent = [-65.0, -55.0, 30.0, 40.0]\n indLat = 200\n indLon = 200\nAnDA_nadir_lag_0_file = (scratchpath + '/resAnDA_nadir_nadlag_0_' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadir_lag_0_file = (scratchpath + '/resIA_nadir_nadlag_0' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadir_lag_5_file = (scratchpath + '/resAnDA_nadir_nadlag_5_' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadir_lag_5_file = (scratchpath + '/resIA_nadir_nadlag_5' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadirswot_lag_0_file = (scratchpath + '/resAnDA_nadirswot_nadlag_0' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadirswot_lag_0_file = (scratchpath + '/resIA_nadirswot_nadlag_0' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadirswot_lag_5_file = (scratchpath + '/resAnDA_nadirswot_nadlag_5' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadirswot_lag_5_file = (scratchpath + '/resIA_nadirswot_nadlag_5' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nwith open(AnDA_nadir_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_0 = AnDA_ssh_1\n itrp_dineof_nadir_0 = itrp_dineof\nwith open(AnDA_nadirswot_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1\n itrp_dineof_nadirswot_0 = itrp_dineof\nwith open(AnDA_nadir_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_5 = AnDA_ssh_1\n itrp_dineof_nadir_5 = itrp_dineof\nwith open(AnDA_nadirswot_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1\n itrp_dineof_nadirswot_5 = itrp_dineof\nwith open(FP_GENN_nadir_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9\n ]\nwith open(FP_GENN_nadir_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9\n ]\nlday1 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=60 + i), '%Y-%m-%d') for i in range(20)]\nlday2 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=140 + i), '%Y-%m-%d') for i in range(20)]\nlday3 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=220 + i), '%Y-%m-%d') for i in range(20)]\nlday4 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=300 + i), '%Y-%m-%d') for i in range(20)]\nlday = np.concatenate([lday1, lday2, lday3, lday4])\nlday2 = [datetime.strptime(lday[i], '%Y-%m-%d') for i in range(len(lday))]\nGT = AnDA_ssh_1_nadir.GT[:, :indLat, :indLon]\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_AnDA_nadir_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_AnDA_nadirswot_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadir_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadir_5[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_GENN_nadir_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadirswot_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadirswot_5[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_GENN_nadirswot_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n", "step-4": "__author__ = 'Maxime Beauchamp'\n__version__ = '0.1'\n__date__ = '2020-12-10'\n__email__ = 'maxime.beauchamp@imt-atantique.fr'\nfrom graphics_OSSE import *\n\n\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path)\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\n\ntype_obs = sys.argv[1]\ndomain = sys.argv[2]\nworkpath = ('/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain +\n '/OSSE/scores_allmethods_nadlag_' + type_obs)\nscratchpath = '/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain + '/OSSE'\nif not os.path.exists(workpath):\n mk_dir_recursive(workpath)\nif domain == 'OSMOSIS':\n extent = [-19.5, -11.5, 45.0, 55.0]\n indLat = 200\n indLon = 160\nelif domain == 'GULFSTREAM':\n extent = [-65.0, -55.0, 33.0, 43.0]\n indLat = 200\n indLon = 200\nelse:\n extent = [-65.0, -55.0, 30.0, 40.0]\n indLat = 200\n indLon = 200\nAnDA_nadir_lag_0_file = (scratchpath + '/resAnDA_nadir_nadlag_0_' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadir_lag_0_file = (scratchpath + '/resIA_nadir_nadlag_0' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadir_lag_5_file = (scratchpath + '/resAnDA_nadir_nadlag_5_' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadir_lag_5_file = (scratchpath + '/resIA_nadir_nadlag_5' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadirswot_lag_0_file = (scratchpath + '/resAnDA_nadirswot_nadlag_0' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadirswot_lag_0_file = (scratchpath + '/resIA_nadirswot_nadlag_0' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadirswot_lag_5_file = (scratchpath + '/resAnDA_nadirswot_nadlag_5' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadirswot_lag_5_file = (scratchpath + '/resIA_nadirswot_nadlag_5' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nwith open(AnDA_nadir_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_0 = AnDA_ssh_1\n itrp_dineof_nadir_0 = itrp_dineof\nwith open(AnDA_nadirswot_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1\n itrp_dineof_nadirswot_0 = itrp_dineof\nwith open(AnDA_nadir_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_5 = AnDA_ssh_1\n itrp_dineof_nadir_5 = itrp_dineof\nwith open(AnDA_nadirswot_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1\n itrp_dineof_nadirswot_5 = itrp_dineof\nwith open(FP_GENN_nadir_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9\n ]\nwith open(FP_GENN_nadir_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9\n ]\nlday1 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=60 + i), '%Y-%m-%d') for i in range(20)]\nlday2 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=140 + i), '%Y-%m-%d') for i in range(20)]\nlday3 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=220 + i), '%Y-%m-%d') for i in range(20)]\nlday4 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=300 + i), '%Y-%m-%d') for i in range(20)]\nlday = np.concatenate([lday1, lday2, lday3, lday4])\nlday2 = [datetime.strptime(lday[i], '%Y-%m-%d') for i in range(len(lday))]\nGT = AnDA_ssh_1_nadir.GT[:, :indLat, :indLon]\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_AnDA_nadir_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_AnDA_nadirswot_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadir_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadir_5[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_GENN_nadir_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadirswot_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadirswot_5[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_GENN_nadirswot_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n", "step-5": "#!/usr/bin/env python\n\n__author__ = \"Maxime Beauchamp\"\n__version__ = \"0.1\"\n__date__ = \"2020-12-10\"\n__email__ = \"maxime.beauchamp@imt-atantique.fr\"\n\nfrom graphics_OSSE import *\n\n# function to create recursive paths\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path) # head/tail\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\ntype_obs = sys.argv[1]\ndomain = sys.argv[2] \n\nworkpath = \"/users/local/m19beauc/4DVARNN-DinAE_xp/\"+domain+\"/OSSE/scores_allmethods_nadlag_\"+type_obs\nscratchpath = \"/users/local/m19beauc/4DVARNN-DinAE_xp/\"+domain+\"/OSSE\"\nif not os.path.exists(workpath):\n mk_dir_recursive(workpath)\n#else:\n# shutil.rmtree(workpath)\n# mk_dir_recursive(workpath) \n\n## parameters\nif domain==\"OSMOSIS\":\n extent = [-19.5,-11.5,45.,55.]\n indLat = 200\n indLon = 160\nelif domain=='GULFSTREAM':\n extent = [-65.,-55.,33.,43.]\n indLat = 200\n indLon = 200\nelse:\n extent=[-65.,-55.,30.,40.]\n indLat = 200\n indLon = 200\n#lon = lon[:indLon]\n#lat = lat[:indLat]\n\n## store all data in a list\nAnDA_nadir_lag_0_file = scratchpath+'/resAnDA_nadir_nadlag_0_'+type_obs+'/saved_path.pickle'\nFP_GENN_nadir_lag_0_file = scratchpath+'/resIA_nadir_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'\nAnDA_nadir_lag_5_file = scratchpath+'/resAnDA_nadir_nadlag_5_'+type_obs+'/saved_path.pickle'\nFP_GENN_nadir_lag_5_file = scratchpath+'/resIA_nadir_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'\nAnDA_nadirswot_lag_0_file = scratchpath+'/resAnDA_nadirswot_nadlag_0'+type_obs+'/saved_path.pickle'\nFP_GENN_nadirswot_lag_0_file = scratchpath+'/resIA_nadirswot_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'\nAnDA_nadirswot_lag_5_file = scratchpath+'/resAnDA_nadirswot_nadlag_5'+type_obs+'/saved_path.pickle'\nFP_GENN_nadirswot_lag_5_file = scratchpath+'/resIA_nadirswot_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'\n\n# Reload saved AnDA result\nwith open(AnDA_nadir_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_0 = AnDA_ssh_1 \n itrp_dineof_nadir_0 = itrp_dineof\nwith open(AnDA_nadirswot_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1 \n itrp_dineof_nadirswot_0 = itrp_dineof\nwith open(AnDA_nadir_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_5 = AnDA_ssh_1\n itrp_dineof_nadir_5 = itrp_dineof\nwith open(AnDA_nadirswot_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1\n itrp_dineof_nadirswot_5 = itrp_dineof\n# Reload saved ConvAE and GE-NN results\nwith open(FP_GENN_nadir_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadir_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9]\n\n\n## list of dates\nlday1 = [ datetime.strftime(datetime.strptime(\"2012-10-01\",'%Y-%m-%d')\\\n + timedelta(days=60+i),\"%Y-%m-%d\") for i in range(20) ]\nlday2 = [ datetime.strftime(datetime.strptime(\"2012-10-01\",'%Y-%m-%d')\\\n + timedelta(days=140+i),\"%Y-%m-%d\") for i in range(20) ]\nlday3 = [ datetime.strftime(datetime.strptime(\"2012-10-01\",'%Y-%m-%d')\\\n + timedelta(days=220+i),\"%Y-%m-%d\") for i in range(20) ]\nlday4 = [ datetime.strftime(datetime.strptime(\"2012-10-01\",'%Y-%m-%d')\\\n + timedelta(days=300+i),\"%Y-%m-%d\") for i in range(20) ]\nlday = np.concatenate([lday1,lday2,lday3,lday4])\nlday2 = [ datetime.strptime(lday[i],'%Y-%m-%d') for i in range(len(lday)) ]\n\nGT = AnDA_ssh_1_nadir.GT[:,:indLat,:indLon]\n# list_data (AnDA nadir)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:,:indLat,:indLon])\nlist_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:,:indLat,:indLon])\n# arguments for plots (nadir)\nlabels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)'])\ncolors = np.array(['k','','red','blue'])\nsymbols = np.array(['k','','o','o'])\nlstyle = np.array(['solid','','solid','solid'])\nlwidth = np.array([2,2,1,1])\n# compare shapes and do appropriate downscaling with minimal resolution\nmin_res=1e9\nfor i in range(len(list_data)):\n min_res=min(min_res,list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1]>min_res:\n dwscale = int(list_data[i].shape[1]/min_res)\n list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200/min_res)\nindLon = int(indLon/dwscale)\nindLat = int(indLat/dwscale)\nlon = np.arange(extent[0],extent[1],1/(20/dwscale))\nlat = np.arange(extent[2],extent[3],1/(20/dwscale))\n## nRMSE time series\nresfile=workpath+\"/TS_AnDA_nadir_nadlag.png\"\nplot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)\n\n# list_data (AnDA nadirswot)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:,:indLat,:indLon])\nlist_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:,:indLat,:indLon])\n# arguments for plots (nadirswot)\nlabels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)'])\ncolors = np.array(['k','','red','blue'])\nsymbols = np.array(['k','','o','o'])\nlstyle = np.array(['solid','','solid','solid'])\nlwidth = np.array([2,2,1,1])\n# compare shapes and do appropriate downscaling with minimal resolution\nmin_res=1e9\nfor i in range(len(list_data)):\n min_res=min(min_res,list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1]>min_res:\n dwscale = int(list_data[i].shape[1]/min_res)\n list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200/min_res)\nindLon = int(indLon/dwscale)\nindLat = int(indLat/dwscale)\nlon = np.arange(extent[0],extent[1],1/(20/dwscale))\nlat = np.arange(extent[2],extent[3],1/(20/dwscale))\n## nRMSE time series\nresfile=workpath+\"/TS_AnDA_nadirswot_nadlag.png\"\nplot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)\n\n# list_data (GENN nadir)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadir_0[:,:indLat,:indLon])\nlist_data.append(itrp_FP_GENN_nadir_5[:,:indLat,:indLon])\n# arguments for plots (nadir)\nlabels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)'])\ncolors = np.array(['k','','red','blue'])\nsymbols = np.array(['k','','o','o'])\nlstyle = np.array(['solid','','solid','solid'])\nlwidth = np.array([2,2,1,1])\n# compare shapes and do appropriate downscaling with minimal resolution\nmin_res=1e9\nfor i in range(len(list_data)):\n min_res=min(min_res,list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1]>min_res:\n dwscale = int(list_data[i].shape[1]/min_res)\n list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200/min_res)\nindLon = int(indLon/dwscale)\nindLat = int(indLat/dwscale)\nlon = np.arange(extent[0],extent[1],1/(20/dwscale))\nlat = np.arange(extent[2],extent[3],1/(20/dwscale))\n## nRMSE time series\nresfile=workpath+\"/TS_GENN_nadir_nadlag.png\"\nplot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)\n\n# list_data (GENN nadirswot)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadirswot_0[:,:indLat,:indLon])\nlist_data.append(itrp_FP_GENN_nadirswot_5[:,:indLat,:indLon])\n# arguments for plots (nadirswot)\nlabels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)'])\ncolors = np.array(['k','','red','blue'])\nsymbols = np.array(['k','','o','o'])\nlstyle = np.array(['solid','','solid','solid'])\nlwidth = np.array([2,2,1,1])\n# compare shapes and do appropriate downscaling with minimal resolution\nmin_res=1e9\nfor i in range(len(list_data)):\n min_res=min(min_res,list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1]>min_res:\n dwscale = int(list_data[i].shape[1]/min_res)\n list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200/min_res)\nindLon = int(indLon/dwscale)\nindLat = int(indLat/dwscale)\nlon = np.arange(extent[0],extent[1],1/(20/dwscale))\nlat = np.arange(extent[2],extent[3],1/(20/dwscale))\n## nRMSE time series\nresfile=workpath+\"/TS_GENN_nadirswot_nadlag.png\"\nplot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from django.http import HttpResponse from polls.models import Pregunta from django.template import loader def index(request): preguntas = Pregunta.objects.order_by('-fecha')[:5] template = loader.get_template('polls/index.html') context = { 'listado': preguntas,} return HttpResponse(template.render(context, request)) def detalle(request, id_pregunta): pregunta = Pregunta.objects.get(id=id_pregunta) template = loader.get_template('polls/detalle.html') context = { 'pregunta': pregunta } return HttpResponse(template.render(context, request)) def resultados(request, total): latest_question_list = Pregunta.objects.order_by('fecha')[:total] output = ', '.join([q.descripcion for q in latest_question_list]) return HttpResponse(output) """ -Construir una vista que retorne todas las opciones asociadas a una pregunta *FILTRAR POR ID DE PREGUNTA """
normal
{ "blob_id": "07dc058ecef323ffd41299245e4fcafdc9e41506", "index": 2131, "step-1": "<mask token>\n\n\ndef resultados(request, total):\n latest_question_list = Pregunta.objects.order_by('fecha')[:total]\n output = ', '.join([q.descripcion for q in latest_question_list])\n return HttpResponse(output)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef detalle(request, id_pregunta):\n pregunta = Pregunta.objects.get(id=id_pregunta)\n template = loader.get_template('polls/detalle.html')\n context = {'pregunta': pregunta}\n return HttpResponse(template.render(context, request))\n\n\ndef resultados(request, total):\n latest_question_list = Pregunta.objects.order_by('fecha')[:total]\n output = ', '.join([q.descripcion for q in latest_question_list])\n return HttpResponse(output)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef index(request):\n preguntas = Pregunta.objects.order_by('-fecha')[:5]\n template = loader.get_template('polls/index.html')\n context = {'listado': preguntas}\n return HttpResponse(template.render(context, request))\n\n\ndef detalle(request, id_pregunta):\n pregunta = Pregunta.objects.get(id=id_pregunta)\n template = loader.get_template('polls/detalle.html')\n context = {'pregunta': pregunta}\n return HttpResponse(template.render(context, request))\n\n\ndef resultados(request, total):\n latest_question_list = Pregunta.objects.order_by('fecha')[:total]\n output = ', '.join([q.descripcion for q in latest_question_list])\n return HttpResponse(output)\n\n\n<mask token>\n", "step-4": "from django.http import HttpResponse\nfrom polls.models import Pregunta\nfrom django.template import loader\n\n\ndef index(request):\n preguntas = Pregunta.objects.order_by('-fecha')[:5]\n template = loader.get_template('polls/index.html')\n context = {'listado': preguntas}\n return HttpResponse(template.render(context, request))\n\n\ndef detalle(request, id_pregunta):\n pregunta = Pregunta.objects.get(id=id_pregunta)\n template = loader.get_template('polls/detalle.html')\n context = {'pregunta': pregunta}\n return HttpResponse(template.render(context, request))\n\n\ndef resultados(request, total):\n latest_question_list = Pregunta.objects.order_by('fecha')[:total]\n output = ', '.join([q.descripcion for q in latest_question_list])\n return HttpResponse(output)\n\n\n<mask token>\n", "step-5": "from django.http import HttpResponse\r\n\r\nfrom polls.models import Pregunta\r\n\r\nfrom django.template import loader\r\n\r\n\r\ndef index(request):\r\n\tpreguntas = Pregunta.objects.order_by('-fecha')[:5]\r\n\ttemplate = loader.get_template('polls/index.html')\r\n\tcontext = { 'listado': preguntas,}\r\n\treturn HttpResponse(template.render(context, request))\r\n\r\ndef detalle(request, id_pregunta):\r\n\tpregunta = Pregunta.objects.get(id=id_pregunta)\r\n\ttemplate = loader.get_template('polls/detalle.html')\r\n\tcontext = { 'pregunta': pregunta }\r\n\treturn HttpResponse(template.render(context, request))\r\n\r\ndef resultados(request, total):\r\n latest_question_list = Pregunta.objects.order_by('fecha')[:total]\r\n output = ', '.join([q.descripcion for q in latest_question_list])\r\n return HttpResponse(output)\r\n\r\n\"\"\"\r\n\t-Construir una vista que retorne todas las opciones asociadas a una pregunta\r\n\t*FILTRAR POR ID DE PREGUNTA\r\n\"\"\"", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> def f(x, y): return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def f(x, y): return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2) <|reserved_special_token_0|> axes([0.025, 0.025, 0.95, 0.95]) contourf(X, Y, f(X, Y), 8, alpha=0.75, cmap=cm.hot) <|reserved_special_token_0|> clabel(C, inline=1, fontsize=10) xticks([]), yticks([]) savefig('../figures/contour_ex.png', dpi=48) show() <|reserved_special_token_1|> <|reserved_special_token_0|> def f(x, y): return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2) n = 256 x = np.linspace(-3, 3, n) y = np.linspace(-3, 3, n) X, Y = np.meshgrid(x, y) axes([0.025, 0.025, 0.95, 0.95]) contourf(X, Y, f(X, Y), 8, alpha=0.75, cmap=cm.hot) C = contour(X, Y, f(X, Y), 8, colors='black', linewidth=0.5) clabel(C, inline=1, fontsize=10) xticks([]), yticks([]) savefig('../figures/contour_ex.png', dpi=48) show() <|reserved_special_token_1|> from pylab import * def f(x, y): return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2) n = 256 x = np.linspace(-3, 3, n) y = np.linspace(-3, 3, n) X, Y = np.meshgrid(x, y) axes([0.025, 0.025, 0.95, 0.95]) contourf(X, Y, f(X, Y), 8, alpha=0.75, cmap=cm.hot) C = contour(X, Y, f(X, Y), 8, colors='black', linewidth=0.5) clabel(C, inline=1, fontsize=10) xticks([]), yticks([]) savefig('../figures/contour_ex.png', dpi=48) show() <|reserved_special_token_1|> from pylab import * def f(x,y): return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2) n = 256 x = np.linspace(-3,3,n) y = np.linspace(-3,3,n) X,Y = np.meshgrid(x,y) axes([0.025,0.025,0.95,0.95]) contourf(X, Y, f(X,Y), 8, alpha=.75, cmap=cm.hot) C = contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5) clabel(C, inline=1, fontsize=10) xticks([]), yticks([]) savefig('../figures/contour_ex.png',dpi=48) show()
flexible
{ "blob_id": "e9c439eafac8fd689980ffcb562f3b5ee903dd56", "index": 2604, "step-1": "<mask token>\n\n\ndef f(x, y):\n return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef f(x, y):\n return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)\n\n\n<mask token>\naxes([0.025, 0.025, 0.95, 0.95])\ncontourf(X, Y, f(X, Y), 8, alpha=0.75, cmap=cm.hot)\n<mask token>\nclabel(C, inline=1, fontsize=10)\nxticks([]), yticks([])\nsavefig('../figures/contour_ex.png', dpi=48)\nshow()\n", "step-3": "<mask token>\n\n\ndef f(x, y):\n return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)\n\n\nn = 256\nx = np.linspace(-3, 3, n)\ny = np.linspace(-3, 3, n)\nX, Y = np.meshgrid(x, y)\naxes([0.025, 0.025, 0.95, 0.95])\ncontourf(X, Y, f(X, Y), 8, alpha=0.75, cmap=cm.hot)\nC = contour(X, Y, f(X, Y), 8, colors='black', linewidth=0.5)\nclabel(C, inline=1, fontsize=10)\nxticks([]), yticks([])\nsavefig('../figures/contour_ex.png', dpi=48)\nshow()\n", "step-4": "from pylab import *\n\n\ndef f(x, y):\n return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)\n\n\nn = 256\nx = np.linspace(-3, 3, n)\ny = np.linspace(-3, 3, n)\nX, Y = np.meshgrid(x, y)\naxes([0.025, 0.025, 0.95, 0.95])\ncontourf(X, Y, f(X, Y), 8, alpha=0.75, cmap=cm.hot)\nC = contour(X, Y, f(X, Y), 8, colors='black', linewidth=0.5)\nclabel(C, inline=1, fontsize=10)\nxticks([]), yticks([])\nsavefig('../figures/contour_ex.png', dpi=48)\nshow()\n", "step-5": "from pylab import *\n\ndef f(x,y): return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)\n\nn = 256\nx = np.linspace(-3,3,n)\ny = np.linspace(-3,3,n)\nX,Y = np.meshgrid(x,y)\n\naxes([0.025,0.025,0.95,0.95])\n\ncontourf(X, Y, f(X,Y), 8, alpha=.75, cmap=cm.hot)\nC = contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5)\nclabel(C, inline=1, fontsize=10)\n\nxticks([]), yticks([])\nsavefig('../figures/contour_ex.png',dpi=48)\nshow()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> class Actor: def __init__(self): self.x = random.random() * sizex self.y = random.random() * sizey self.xn = self.x self.yn = self.y def step(self): t = getnoise(self.x, self.y) * 5 * math.pi self.x = self.xn self.y = self.yn self.xn += steplenght * math.cos(t) self.yn += steplenght * math.sin(t) if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey: return None return self.xn, self.yn, self.x, self.y <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def getnoise(x, y): return noisemap[math.floor(x)][math.floor(y)] class Actor: def __init__(self): self.x = random.random() * sizex self.y = random.random() * sizey self.xn = self.x self.yn = self.y def step(self): t = getnoise(self.x, self.y) * 5 * math.pi self.x = self.xn self.y = self.yn self.xn += steplenght * math.cos(t) self.yn += steplenght * math.sin(t) if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey: return None return self.xn, self.yn, self.x, self.y <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> sizex = 950 sizey = 500 noisescale = 400 persistence = 0.5 lacunarity = 2 seed = random.randint(0, 100) actorsnum = 1000 stepsnum = 50 steplenght = 2 noisemap = np.zeros((sizex, sizey)) for i in range(sizex): for j in range(sizey): noisemap[i][j] = noise.pnoise2(i / noisescale, j / noisescale, octaves=2, persistence=persistence, lacunarity=lacunarity, repeatx=1024, repeaty=1024, base=seed) map_max = np.max(noisemap) map_min = np.min(noisemap) map_range = map_max - map_min for i in range(sizex): for j in range(sizey): k = noisemap[i][j] k = (k - map_min) / map_range noisemap[i][j] = k map_max = np.max(noisemap) map_min = np.min(noisemap) def getnoise(x, y): return noisemap[math.floor(x)][math.floor(y)] class Actor: def __init__(self): self.x = random.random() * sizex self.y = random.random() * sizey self.xn = self.x self.yn = self.y def step(self): t = getnoise(self.x, self.y) * 5 * math.pi self.x = self.xn self.y = self.yn self.xn += steplenght * math.cos(t) self.yn += steplenght * math.sin(t) if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey: return None return self.xn, self.yn, self.x, self.y canvas = drawSvg.Drawing(sizex, sizey, displayInline='False') actors = [] for a in range(actorsnum): n = Actor() actors.append(n) for s in range(stepsnum): for a in actors: p = a.step() if p: canvas.append(drawSvg.Line(p[2], p[3], p[0], p[1], stroke= 'black', stroke_width=1)) else: actors.remove(a) canvas.saveSvg('test.svg') <|reserved_special_token_1|> import drawSvg import noise import random import math import numpy as np sizex = 950 sizey = 500 noisescale = 400 persistence = 0.5 lacunarity = 2 seed = random.randint(0, 100) actorsnum = 1000 stepsnum = 50 steplenght = 2 noisemap = np.zeros((sizex, sizey)) for i in range(sizex): for j in range(sizey): noisemap[i][j] = noise.pnoise2(i / noisescale, j / noisescale, octaves=2, persistence=persistence, lacunarity=lacunarity, repeatx=1024, repeaty=1024, base=seed) map_max = np.max(noisemap) map_min = np.min(noisemap) map_range = map_max - map_min for i in range(sizex): for j in range(sizey): k = noisemap[i][j] k = (k - map_min) / map_range noisemap[i][j] = k map_max = np.max(noisemap) map_min = np.min(noisemap) def getnoise(x, y): return noisemap[math.floor(x)][math.floor(y)] class Actor: def __init__(self): self.x = random.random() * sizex self.y = random.random() * sizey self.xn = self.x self.yn = self.y def step(self): t = getnoise(self.x, self.y) * 5 * math.pi self.x = self.xn self.y = self.yn self.xn += steplenght * math.cos(t) self.yn += steplenght * math.sin(t) if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey: return None return self.xn, self.yn, self.x, self.y canvas = drawSvg.Drawing(sizex, sizey, displayInline='False') actors = [] for a in range(actorsnum): n = Actor() actors.append(n) for s in range(stepsnum): for a in actors: p = a.step() if p: canvas.append(drawSvg.Line(p[2], p[3], p[0], p[1], stroke= 'black', stroke_width=1)) else: actors.remove(a) canvas.saveSvg('test.svg')
flexible
{ "blob_id": "68c9944c788b9976660384e5d1cd0a736c4cd0e6", "index": 3826, "step-1": "<mask token>\n\n\nclass Actor:\n\n def __init__(self):\n self.x = random.random() * sizex\n self.y = random.random() * sizey\n self.xn = self.x\n self.yn = self.y\n\n def step(self):\n t = getnoise(self.x, self.y) * 5 * math.pi\n self.x = self.xn\n self.y = self.yn\n self.xn += steplenght * math.cos(t)\n self.yn += steplenght * math.sin(t)\n if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey:\n return None\n return self.xn, self.yn, self.x, self.y\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef getnoise(x, y):\n return noisemap[math.floor(x)][math.floor(y)]\n\n\nclass Actor:\n\n def __init__(self):\n self.x = random.random() * sizex\n self.y = random.random() * sizey\n self.xn = self.x\n self.yn = self.y\n\n def step(self):\n t = getnoise(self.x, self.y) * 5 * math.pi\n self.x = self.xn\n self.y = self.yn\n self.xn += steplenght * math.cos(t)\n self.yn += steplenght * math.sin(t)\n if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey:\n return None\n return self.xn, self.yn, self.x, self.y\n\n\n<mask token>\n", "step-3": "<mask token>\nsizex = 950\nsizey = 500\nnoisescale = 400\npersistence = 0.5\nlacunarity = 2\nseed = random.randint(0, 100)\nactorsnum = 1000\nstepsnum = 50\nsteplenght = 2\nnoisemap = np.zeros((sizex, sizey))\nfor i in range(sizex):\n for j in range(sizey):\n noisemap[i][j] = noise.pnoise2(i / noisescale, j / noisescale,\n octaves=2, persistence=persistence, lacunarity=lacunarity,\n repeatx=1024, repeaty=1024, base=seed)\nmap_max = np.max(noisemap)\nmap_min = np.min(noisemap)\nmap_range = map_max - map_min\nfor i in range(sizex):\n for j in range(sizey):\n k = noisemap[i][j]\n k = (k - map_min) / map_range\n noisemap[i][j] = k\nmap_max = np.max(noisemap)\nmap_min = np.min(noisemap)\n\n\ndef getnoise(x, y):\n return noisemap[math.floor(x)][math.floor(y)]\n\n\nclass Actor:\n\n def __init__(self):\n self.x = random.random() * sizex\n self.y = random.random() * sizey\n self.xn = self.x\n self.yn = self.y\n\n def step(self):\n t = getnoise(self.x, self.y) * 5 * math.pi\n self.x = self.xn\n self.y = self.yn\n self.xn += steplenght * math.cos(t)\n self.yn += steplenght * math.sin(t)\n if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey:\n return None\n return self.xn, self.yn, self.x, self.y\n\n\ncanvas = drawSvg.Drawing(sizex, sizey, displayInline='False')\nactors = []\nfor a in range(actorsnum):\n n = Actor()\n actors.append(n)\nfor s in range(stepsnum):\n for a in actors:\n p = a.step()\n if p:\n canvas.append(drawSvg.Line(p[2], p[3], p[0], p[1], stroke=\n 'black', stroke_width=1))\n else:\n actors.remove(a)\ncanvas.saveSvg('test.svg')\n", "step-4": "import drawSvg\nimport noise\nimport random\nimport math\nimport numpy as np\nsizex = 950\nsizey = 500\nnoisescale = 400\npersistence = 0.5\nlacunarity = 2\nseed = random.randint(0, 100)\nactorsnum = 1000\nstepsnum = 50\nsteplenght = 2\nnoisemap = np.zeros((sizex, sizey))\nfor i in range(sizex):\n for j in range(sizey):\n noisemap[i][j] = noise.pnoise2(i / noisescale, j / noisescale,\n octaves=2, persistence=persistence, lacunarity=lacunarity,\n repeatx=1024, repeaty=1024, base=seed)\nmap_max = np.max(noisemap)\nmap_min = np.min(noisemap)\nmap_range = map_max - map_min\nfor i in range(sizex):\n for j in range(sizey):\n k = noisemap[i][j]\n k = (k - map_min) / map_range\n noisemap[i][j] = k\nmap_max = np.max(noisemap)\nmap_min = np.min(noisemap)\n\n\ndef getnoise(x, y):\n return noisemap[math.floor(x)][math.floor(y)]\n\n\nclass Actor:\n\n def __init__(self):\n self.x = random.random() * sizex\n self.y = random.random() * sizey\n self.xn = self.x\n self.yn = self.y\n\n def step(self):\n t = getnoise(self.x, self.y) * 5 * math.pi\n self.x = self.xn\n self.y = self.yn\n self.xn += steplenght * math.cos(t)\n self.yn += steplenght * math.sin(t)\n if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey:\n return None\n return self.xn, self.yn, self.x, self.y\n\n\ncanvas = drawSvg.Drawing(sizex, sizey, displayInline='False')\nactors = []\nfor a in range(actorsnum):\n n = Actor()\n actors.append(n)\nfor s in range(stepsnum):\n for a in actors:\n p = a.step()\n if p:\n canvas.append(drawSvg.Line(p[2], p[3], p[0], p[1], stroke=\n 'black', stroke_width=1))\n else:\n actors.remove(a)\ncanvas.saveSvg('test.svg')\n", "step-5": null, "step-ids": [ 3, 4, 6, 7 ] }
[ 3, 4, 6, 7 ]
<|reserved_special_token_0|> class TestComputeReverseDependencies(unittest.TestCase): def setUp(self): repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0)) repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1)) self.repos = [repo_0, repo_1] <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> class TestComputeLeafPackages(unittest.TestCase): def setUp(self): repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0)) repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1)) repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2)) self.repos = [repo_0, repo_1, repo_2] def test_simple(self): expected_leaf_packages = packages_from_definition( """A 0.0.0-1; depends (B ^= 0.0.0) C 0.0.0-1; depends (E >= 1.0.0) E 0.0.0-1 """ ) leaf_packages = compute_leaf_packages(self.repos) self.assertEqual(leaf_packages, set(expected_leaf_packages)) <|reserved_special_token_1|> <|reserved_special_token_0|> class TestComputeDependencies(unittest.TestCase): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def test_chained_requirements(self): requirement = InstallRequirement._from_string('A ^= 0.0.0') expected_deps = packages_from_definition( """B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0) """ ) deps = compute_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_requirements_transitive(self): requirement = InstallRequirement._from_string('A ^= 0.0.0') expected_deps = packages_from_definition( """B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0) D 0.0.0-2 """ ) deps = compute_dependencies(self.repos, requirement, transitive=True) self.assertEqual(deps, set(expected_deps)) class TestComputeReverseDependencies(unittest.TestCase): def setUp(self): repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0)) repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1)) self.repos = [repo_0, repo_1] def test_no_dependency(self): requirement = InstallRequirement._from_string('A ^= 0.0.0') deps = compute_reverse_dependencies(self.repos, requirement) self.assertEqual(deps, set()) def test_simple_dependency(self): requirement = InstallRequirement._from_string('E *') expected_deps = packages_from_definition( 'C 0.0.0-1; depends (E >= 1.0.0)') deps = compute_reverse_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_dependencies(self): requirement = InstallRequirement._from_string('D ^= 0.0.0') expected_deps = packages_from_definition( """B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0)""" ) deps = compute_reverse_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_dependencies_transitive(self): requirement = InstallRequirement._from_string('D ^= 0.0.0') expected_deps = packages_from_definition( """A 0.0.0-1; depends (B ^= 0.0.0) B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0)""" ) deps = compute_reverse_dependencies(self.repos, requirement, transitive=True) self.assertEqual(deps, set(expected_deps)) class TestComputeLeafPackages(unittest.TestCase): def setUp(self): repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0)) repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1)) repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2)) self.repos = [repo_0, repo_1, repo_2] def test_simple(self): expected_leaf_packages = packages_from_definition( """A 0.0.0-1; depends (B ^= 0.0.0) C 0.0.0-1; depends (E >= 1.0.0) E 0.0.0-1 """ ) leaf_packages = compute_leaf_packages(self.repos) self.assertEqual(leaf_packages, set(expected_leaf_packages)) <|reserved_special_token_1|> <|reserved_special_token_0|> class TestComputeDependencies(unittest.TestCase): <|reserved_special_token_0|> def test_no_dependency(self): requirement = InstallRequirement._from_string('D == 0.0.0-2') expected_deps = set() deps = compute_dependencies(self.repos, requirement) self.assertEqual(deps, expected_deps) <|reserved_special_token_0|> def test_chained_requirements(self): requirement = InstallRequirement._from_string('A ^= 0.0.0') expected_deps = packages_from_definition( """B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0) """ ) deps = compute_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_requirements_transitive(self): requirement = InstallRequirement._from_string('A ^= 0.0.0') expected_deps = packages_from_definition( """B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0) D 0.0.0-2 """ ) deps = compute_dependencies(self.repos, requirement, transitive=True) self.assertEqual(deps, set(expected_deps)) class TestComputeReverseDependencies(unittest.TestCase): def setUp(self): repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0)) repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1)) self.repos = [repo_0, repo_1] def test_no_dependency(self): requirement = InstallRequirement._from_string('A ^= 0.0.0') deps = compute_reverse_dependencies(self.repos, requirement) self.assertEqual(deps, set()) def test_simple_dependency(self): requirement = InstallRequirement._from_string('E *') expected_deps = packages_from_definition( 'C 0.0.0-1; depends (E >= 1.0.0)') deps = compute_reverse_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_dependencies(self): requirement = InstallRequirement._from_string('D ^= 0.0.0') expected_deps = packages_from_definition( """B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0)""" ) deps = compute_reverse_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_dependencies_transitive(self): requirement = InstallRequirement._from_string('D ^= 0.0.0') expected_deps = packages_from_definition( """A 0.0.0-1; depends (B ^= 0.0.0) B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0)""" ) deps = compute_reverse_dependencies(self.repos, requirement, transitive=True) self.assertEqual(deps, set(expected_deps)) class TestComputeLeafPackages(unittest.TestCase): def setUp(self): repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0)) repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1)) repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2)) self.repos = [repo_0, repo_1, repo_2] def test_simple(self): expected_leaf_packages = packages_from_definition( """A 0.0.0-1; depends (B ^= 0.0.0) C 0.0.0-1; depends (E >= 1.0.0) E 0.0.0-1 """ ) leaf_packages = compute_leaf_packages(self.repos) self.assertEqual(leaf_packages, set(expected_leaf_packages)) <|reserved_special_token_1|> <|reserved_special_token_0|> class TestComputeDependencies(unittest.TestCase): <|reserved_special_token_0|> def test_no_dependency(self): requirement = InstallRequirement._from_string('D == 0.0.0-2') expected_deps = set() deps = compute_dependencies(self.repos, requirement) self.assertEqual(deps, expected_deps) def test_simple_dependency(self): requirement = InstallRequirement._from_string('C *') expected_deps = packages_from_definition( """E 1.0.0-1 E 1.0.1-1""") deps = compute_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_requirements(self): requirement = InstallRequirement._from_string('A ^= 0.0.0') expected_deps = packages_from_definition( """B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0) """ ) deps = compute_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_requirements_transitive(self): requirement = InstallRequirement._from_string('A ^= 0.0.0') expected_deps = packages_from_definition( """B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0) D 0.0.0-2 """ ) deps = compute_dependencies(self.repos, requirement, transitive=True) self.assertEqual(deps, set(expected_deps)) class TestComputeReverseDependencies(unittest.TestCase): def setUp(self): repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0)) repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1)) self.repos = [repo_0, repo_1] def test_no_dependency(self): requirement = InstallRequirement._from_string('A ^= 0.0.0') deps = compute_reverse_dependencies(self.repos, requirement) self.assertEqual(deps, set()) def test_simple_dependency(self): requirement = InstallRequirement._from_string('E *') expected_deps = packages_from_definition( 'C 0.0.0-1; depends (E >= 1.0.0)') deps = compute_reverse_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_dependencies(self): requirement = InstallRequirement._from_string('D ^= 0.0.0') expected_deps = packages_from_definition( """B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0)""" ) deps = compute_reverse_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_dependencies_transitive(self): requirement = InstallRequirement._from_string('D ^= 0.0.0') expected_deps = packages_from_definition( """A 0.0.0-1; depends (B ^= 0.0.0) B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0)""" ) deps = compute_reverse_dependencies(self.repos, requirement, transitive=True) self.assertEqual(deps, set(expected_deps)) class TestComputeLeafPackages(unittest.TestCase): def setUp(self): repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0)) repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1)) repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2)) self.repos = [repo_0, repo_1, repo_2] def test_simple(self): expected_leaf_packages = packages_from_definition( """A 0.0.0-1; depends (B ^= 0.0.0) C 0.0.0-1; depends (E >= 1.0.0) E 0.0.0-1 """ ) leaf_packages = compute_leaf_packages(self.repos) self.assertEqual(leaf_packages, set(expected_leaf_packages)) <|reserved_special_token_1|> import unittest from textwrap import dedent from simplesat import InstallRequirement, Repository from simplesat.test_utils import packages_from_definition from ..compute_dependencies import (compute_dependencies, compute_leaf_packages, compute_reverse_dependencies) PACKAGE_DEF_0 = dedent("""\ A 0.0.0-1; depends (B ^= 0.0.0) B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0) C 0.0.0-1; depends (E >= 1.0.0) """) PACKAGE_DEF_1 = dedent("""\ D 0.0.0-2 E 0.0.0-1 E 1.0.0-1 E 1.0.1-1 """) PACKAGE_DEF_2 = dedent("""\ B 0.0.0-1; depends (D == 0.0.0-2) C 0.0.0-1; depends (E >= 1.0.0) """) class TestComputeDependencies(unittest.TestCase): def setUp(self): repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0)) repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1)) self.repos = [repo_0, repo_1] def test_no_dependency(self): requirement = InstallRequirement._from_string('D == 0.0.0-2') expected_deps = set() deps = compute_dependencies(self.repos, requirement) self.assertEqual(deps, expected_deps) def test_simple_dependency(self): requirement = InstallRequirement._from_string('C *') expected_deps = packages_from_definition( """E 1.0.0-1 E 1.0.1-1""") deps = compute_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_requirements(self): requirement = InstallRequirement._from_string('A ^= 0.0.0') expected_deps = packages_from_definition( """B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0) """ ) deps = compute_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_requirements_transitive(self): requirement = InstallRequirement._from_string('A ^= 0.0.0') expected_deps = packages_from_definition( """B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0) D 0.0.0-2 """ ) deps = compute_dependencies(self.repos, requirement, transitive=True) self.assertEqual(deps, set(expected_deps)) class TestComputeReverseDependencies(unittest.TestCase): def setUp(self): repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0)) repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1)) self.repos = [repo_0, repo_1] def test_no_dependency(self): requirement = InstallRequirement._from_string('A ^= 0.0.0') deps = compute_reverse_dependencies(self.repos, requirement) self.assertEqual(deps, set()) def test_simple_dependency(self): requirement = InstallRequirement._from_string('E *') expected_deps = packages_from_definition( 'C 0.0.0-1; depends (E >= 1.0.0)' ) deps = compute_reverse_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_dependencies(self): requirement = InstallRequirement._from_string('D ^= 0.0.0') expected_deps = packages_from_definition( """B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0)""" ) deps = compute_reverse_dependencies(self.repos, requirement) self.assertEqual(deps, set(expected_deps)) def test_chained_dependencies_transitive(self): requirement = InstallRequirement._from_string('D ^= 0.0.0') expected_deps = packages_from_definition( """A 0.0.0-1; depends (B ^= 0.0.0) B 0.0.0-1; depends (D == 0.0.0-2) B 0.0.0-2; depends (D ^= 0.0.0)""" ) deps = compute_reverse_dependencies(self.repos, requirement, transitive=True) self.assertEqual(deps, set(expected_deps)) class TestComputeLeafPackages(unittest.TestCase): def setUp(self): repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0)) repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1)) repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2)) self.repos = [repo_0, repo_1, repo_2] def test_simple(self): expected_leaf_packages = packages_from_definition( """A 0.0.0-1; depends (B ^= 0.0.0) C 0.0.0-1; depends (E >= 1.0.0) E 0.0.0-1 """ ) leaf_packages = compute_leaf_packages(self.repos) self.assertEqual(leaf_packages, set(expected_leaf_packages))
flexible
{ "blob_id": "fcf19c49bb161305eaa5ba8bc26e276a8e8db8ea", "index": 3925, "step-1": "<mask token>\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n", "step-2": "<mask token>\n\n\nclass TestComputeDependencies(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def test_chained_requirements(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0) \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements_transitive(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n D 0.0.0-2 \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement, transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set())\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('E *')\n expected_deps = packages_from_definition(\n 'C 0.0.0-1; depends (E >= 1.0.0)')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies_transitive(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement,\n transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n", "step-3": "<mask token>\n\n\nclass TestComputeDependencies(unittest.TestCase):\n <mask token>\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('D == 0.0.0-2')\n expected_deps = set()\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, expected_deps)\n <mask token>\n\n def test_chained_requirements(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0) \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements_transitive(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n D 0.0.0-2 \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement, transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set())\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('E *')\n expected_deps = packages_from_definition(\n 'C 0.0.0-1; depends (E >= 1.0.0)')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies_transitive(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement,\n transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n", "step-4": "<mask token>\n\n\nclass TestComputeDependencies(unittest.TestCase):\n <mask token>\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('D == 0.0.0-2')\n expected_deps = set()\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, expected_deps)\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('C *')\n expected_deps = packages_from_definition(\n \"\"\"E 1.0.0-1\n E 1.0.1-1\"\"\")\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0) \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements_transitive(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n D 0.0.0-2 \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement, transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set())\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('E *')\n expected_deps = packages_from_definition(\n 'C 0.0.0-1; depends (E >= 1.0.0)')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies_transitive(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement,\n transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n", "step-5": "import unittest\nfrom textwrap import dedent\n\nfrom simplesat import InstallRequirement, Repository\nfrom simplesat.test_utils import packages_from_definition\n\nfrom ..compute_dependencies import (compute_dependencies,\n compute_leaf_packages,\n compute_reverse_dependencies)\n\n\nPACKAGE_DEF_0 = dedent(\"\"\"\\\n A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n\"\"\")\n\n\nPACKAGE_DEF_1 = dedent(\"\"\"\\\n D 0.0.0-2\n E 0.0.0-1\n E 1.0.0-1\n E 1.0.1-1\n\"\"\")\n\nPACKAGE_DEF_2 = dedent(\"\"\"\\\n B 0.0.0-1; depends (D == 0.0.0-2)\n C 0.0.0-1; depends (E >= 1.0.0)\n\"\"\")\n\n\nclass TestComputeDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('D == 0.0.0-2')\n expected_deps = set()\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, expected_deps)\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('C *')\n expected_deps = packages_from_definition(\n \"\"\"E 1.0.0-1\n E 1.0.1-1\"\"\")\n\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0) \"\"\"\n )\n\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements_transitive(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n D 0.0.0-2 \"\"\"\n )\n\n deps = compute_dependencies(self.repos, requirement, transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set())\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('E *')\n expected_deps = packages_from_definition(\n 'C 0.0.0-1; depends (E >= 1.0.0)'\n )\n\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies_transitive(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement,\n transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n", "step-ids": [ 5, 12, 13, 14, 18 ] }
[ 5, 12, 13, 14, 18 ]
# 1.- Crear una grafica que muestre la desviacion tipica de los datos cada dia para todos los pacientes # 2.- Crear una grafica que muestre a la vez la inflamacion maxima, media y minima para cada dia import numpy as np data = np.loadtxt(fname='inflammation-01.csv', delimiter=',') import matplotlib.pyplot as plt plt.plot(data.std(axis=0)) # Desviacion tipica por dia plt.show() plt.plot(data.max(axis=0)) # Inflamacion maxima, media y minima para cada dia plt.plot(data.mean(axis=0)) plt.plot(data.min(axis=0))
normal
{ "blob_id": "52064b518ad067c9906e7de8542d9a399076a0b5", "index": 4214, "step-1": "<mask token>\n", "step-2": "<mask token>\nplt.plot(data.std(axis=0))\nplt.show()\nplt.plot(data.max(axis=0))\nplt.plot(data.mean(axis=0))\nplt.plot(data.min(axis=0))\n", "step-3": "<mask token>\ndata = np.loadtxt(fname='inflammation-01.csv', delimiter=',')\n<mask token>\nplt.plot(data.std(axis=0))\nplt.show()\nplt.plot(data.max(axis=0))\nplt.plot(data.mean(axis=0))\nplt.plot(data.min(axis=0))\n", "step-4": "import numpy as np\ndata = np.loadtxt(fname='inflammation-01.csv', delimiter=',')\nimport matplotlib.pyplot as plt\nplt.plot(data.std(axis=0))\nplt.show()\nplt.plot(data.max(axis=0))\nplt.plot(data.mean(axis=0))\nplt.plot(data.min(axis=0))\n", "step-5": "# 1.- Crear una grafica que muestre la desviacion tipica de los datos cada dia para todos los pacientes\r\n# 2.- Crear una grafica que muestre a la vez la inflamacion maxima, media y minima para cada dia\r\n\r\nimport numpy as np\r\ndata = np.loadtxt(fname='inflammation-01.csv', delimiter=',')\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.plot(data.std(axis=0)) # Desviacion tipica por dia\r\nplt.show()\r\n\r\nplt.plot(data.max(axis=0)) # Inflamacion maxima, media y minima para cada dia\r\nplt.plot(data.mean(axis=0))\r\nplt.plot(data.min(axis=0))\r\n\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class BaseCard(object): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def __getattr__(self, item): """ 添加魔术方法 :param item: :return: """ operation = item[0:3] field = item[4:] if operation == 'set' and field and field.lower( ) in self.support_set_field: def function(*args): self.data[field.lower()] = args[0] return function else: def function(*args): logging.info('不支持 %s_%s' % (operation, field)) print('不支持', operation, field) return function <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class BaseCard(object): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def get_data(self): return self.data def __getattr__(self, item): """ 添加魔术方法 :param item: :return: """ operation = item[0:3] field = item[4:] if operation == 'set' and field and field.lower( ) in self.support_set_field: def function(*args): self.data[field.lower()] = args[0] return function else: def function(*args): logging.info('不支持 %s_%s' % (operation, field)) print('不支持', operation, field) return function <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class BaseCard(object): def __init__(self, field=[]): self.data = {} self.support_set_field = field <|reserved_special_token_0|> <|reserved_special_token_0|> def get_data(self): return self.data def __getattr__(self, item): """ 添加魔术方法 :param item: :return: """ operation = item[0:3] field = item[4:] if operation == 'set' and field and field.lower( ) in self.support_set_field: def function(*args): self.data[field.lower()] = args[0] return function else: def function(*args): logging.info('不支持 %s_%s' % (operation, field)) print('不支持', operation, field) return function <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class BaseCard(object): def __init__(self, field=[]): self.data = {} self.support_set_field = field def add_cue_words(self, arr): """ 为卡片添加cue words 提示用户输入 :param arr: :return: """ if arr: if isinstance(arr, str): arr = [arr] if 'cueWords' in self.data: self.data['cueWords'] = self.data['cueWords'] else: self.data['cueWords'] = [] self.data['cueWords'].extend(arr) return self def set_anchor(self, url, anchor_text): """ 设置卡片链接 :param url: 比如:http(s)://.... :param anchor_text: 链接显示的文字 :return: """ if url: self.data['url'] = url if anchor_text: self.data['anchorText'] = anchor_text return self def get_data(self): return self.data def __getattr__(self, item): """ 添加魔术方法 :param item: :return: """ operation = item[0:3] field = item[4:] if operation == 'set' and field and field.lower( ) in self.support_set_field: def function(*args): self.data[field.lower()] = args[0] return function else: def function(*args): logging.info('不支持 %s_%s' % (operation, field)) print('不支持', operation, field) return function if __name__ == '__main__': pass <|reserved_special_token_1|> #!/usr/bin/env python3 # -*- coding=utf-8 -*- # description: # author:jack # create_time: 2017/12/30 """ 卡片基类 """ import logging class BaseCard(object): def __init__(self, field=[]): self.data = {} self.support_set_field = field def add_cue_words(self, arr): """ 为卡片添加cue words 提示用户输入 :param arr: :return: """ if arr: if isinstance(arr, str): arr = [arr] if 'cueWords' in self.data: self.data['cueWords'] = self.data['cueWords'] else: self.data['cueWords'] = [] self.data['cueWords'].extend(arr) return self def set_anchor(self, url, anchor_text): """ 设置卡片链接 :param url: 比如:http(s)://.... :param anchor_text: 链接显示的文字 :return: """ if url: self.data['url'] = url if anchor_text: self.data['anchorText'] = anchor_text return self def get_data(self): return self.data def __getattr__(self, item): """ 添加魔术方法 :param item: :return: """ # 获取操作类型 set operation = item[0:3] # 获取被操作的属性 set_xxxx 获取xxxx field = item[4:] if operation == 'set' and field and (field.lower() in self.support_set_field): def function(*args): self.data[field.lower()] = args[0] return function else: def function(*args): logging.info("不支持 %s_%s" % (operation, field)) print('不支持', operation, field) return function if __name__ == '__main__': pass
flexible
{ "blob_id": "93e5852df00733c024a59d37699bae58bd893030", "index": 112, "step-1": "<mask token>\n\n\nclass BaseCard(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n operation = item[0:3]\n field = item[4:]\n if operation == 'set' and field and field.lower(\n ) in self.support_set_field:\n\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n\n def function(*args):\n logging.info('不支持 %s_%s' % (operation, field))\n print('不支持', operation, field)\n return function\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass BaseCard(object):\n <mask token>\n <mask token>\n <mask token>\n\n def get_data(self):\n return self.data\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n operation = item[0:3]\n field = item[4:]\n if operation == 'set' and field and field.lower(\n ) in self.support_set_field:\n\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n\n def function(*args):\n logging.info('不支持 %s_%s' % (operation, field))\n print('不支持', operation, field)\n return function\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass BaseCard(object):\n\n def __init__(self, field=[]):\n self.data = {}\n self.support_set_field = field\n <mask token>\n <mask token>\n\n def get_data(self):\n return self.data\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n operation = item[0:3]\n field = item[4:]\n if operation == 'set' and field and field.lower(\n ) in self.support_set_field:\n\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n\n def function(*args):\n logging.info('不支持 %s_%s' % (operation, field))\n print('不支持', operation, field)\n return function\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass BaseCard(object):\n\n def __init__(self, field=[]):\n self.data = {}\n self.support_set_field = field\n\n def add_cue_words(self, arr):\n \"\"\"\n 为卡片添加cue words 提示用户输入\n :param arr:\n :return:\n \"\"\"\n if arr:\n if isinstance(arr, str):\n arr = [arr]\n if 'cueWords' in self.data:\n self.data['cueWords'] = self.data['cueWords']\n else:\n self.data['cueWords'] = []\n self.data['cueWords'].extend(arr)\n return self\n\n def set_anchor(self, url, anchor_text):\n \"\"\"\n 设置卡片链接\n :param url: 比如:http(s)://....\n :param anchor_text: 链接显示的文字\n :return:\n \"\"\"\n if url:\n self.data['url'] = url\n if anchor_text:\n self.data['anchorText'] = anchor_text\n return self\n\n def get_data(self):\n return self.data\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n operation = item[0:3]\n field = item[4:]\n if operation == 'set' and field and field.lower(\n ) in self.support_set_field:\n\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n\n def function(*args):\n logging.info('不支持 %s_%s' % (operation, field))\n print('不支持', operation, field)\n return function\n\n\nif __name__ == '__main__':\n pass\n", "step-5": "#!/usr/bin/env python3\n# -*- coding=utf-8 -*-\n\n# description:\n# author:jack\n# create_time: 2017/12/30\n\"\"\"\n卡片基类\n\"\"\"\nimport logging\n\n\nclass BaseCard(object):\n\n def __init__(self, field=[]):\n self.data = {}\n self.support_set_field = field\n\n def add_cue_words(self, arr):\n \"\"\"\n 为卡片添加cue words 提示用户输入\n :param arr:\n :return:\n \"\"\"\n\n if arr:\n if isinstance(arr, str):\n arr = [arr]\n\n if 'cueWords' in self.data:\n self.data['cueWords'] = self.data['cueWords']\n else:\n self.data['cueWords'] = []\n\n self.data['cueWords'].extend(arr)\n return self\n\n def set_anchor(self, url, anchor_text):\n \"\"\"\n 设置卡片链接\n :param url: 比如:http(s)://....\n :param anchor_text: 链接显示的文字\n :return:\n \"\"\"\n\n if url:\n self.data['url'] = url\n if anchor_text:\n self.data['anchorText'] = anchor_text\n return self\n\n def get_data(self):\n return self.data\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n # 获取操作类型 set\n operation = item[0:3]\n # 获取被操作的属性 set_xxxx 获取xxxx\n field = item[4:]\n if operation == 'set' and field and (field.lower() in self.support_set_field):\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n def function(*args):\n logging.info(\"不支持 %s_%s\" % (operation, field))\n print('不支持', operation, field)\n\n return function\n\n\nif __name__ == '__main__':\n pass\n", "step-ids": [ 2, 3, 4, 7, 9 ] }
[ 2, 3, 4, 7, 9 ]
class Solution: def minimumDeletions(self, nums: List[int]) ->int: n = len(nums) a = nums.index(min(nums)) b = nums.index(max(nums)) if a > b: a, b = b, a return min(a + 1 + n - b, b + 1, n - a)
normal
{ "blob_id": "14f3c941856ddf6bd7b3e046f21072f0b5f7b036", "index": 5009, "step-1": "<mask token>\n", "step-2": "class Solution:\n <mask token>\n", "step-3": "class Solution:\n\n def minimumDeletions(self, nums: List[int]) ->int:\n n = len(nums)\n a = nums.index(min(nums))\n b = nums.index(max(nums))\n if a > b:\n a, b = b, a\n return min(a + 1 + n - b, b + 1, n - a)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# Copyright (c) 2023 Intel Corporation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from os import path as osp from typing import Any, Callable, Dict, List, Optional, Tuple import torch from torch.distributed import barrier from torch.nn import Module from nncf.api.compression import CompressionAlgorithmController from nncf.common.compression import BaseCompressionAlgorithmController as BaseController from nncf.common.deprecation import warning_deprecated from nncf.common.logging import nncf_logger from nncf.common.utils.api_marker import api from nncf.common.utils.debug import set_debug_log_dir from nncf.config import NNCFConfig from nncf.config.extractors import extract_algorithm_names from nncf.config.telemetry_extractors import CompressionStartedFromConfig from nncf.telemetry import tracked_function from nncf.telemetry.events import NNCF_PT_CATEGORY from nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS from nncf.torch.algo_selector import NoCompressionAlgorithmBuilder from nncf.torch.composite_compression import PTCompositeCompressionAlgorithmBuilder from nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder from nncf.torch.dynamic_graph.graph_tracer import create_input_infos from nncf.torch.nncf_network import NNCFNetwork # pylint:disable=too-many-branches from nncf.torch.utils import is_dist_avail_and_initialized from nncf.torch.utils import is_main_process from nncf.torch.utils import maybe_convert_legacy_names_in_compress_state from nncf.torch.utils import training_mode_switcher @api(canonical_alias="nncf.torch.create_compressed_model") @tracked_function( NNCF_PT_CATEGORY, [ CompressionStartedFromConfig(argname="config"), ], ) def create_compressed_model( model: Module, config: NNCFConfig, compression_state: Optional[Dict[str, Any]] = None, dummy_forward_fn: Callable[[Module], Any] = None, wrap_inputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None, wrap_outputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None, dump_graphs=True, ) -> Tuple[CompressionAlgorithmController, NNCFNetwork]: """ The main function used to produce a model ready for compression fine-tuning from an original PyTorch model and a configuration object. dummy_forward_fn :param model: The original model. Should have its parameters already loaded from a checkpoint or another source. :param config: A configuration object used to determine the exact compression modifications to be applied to the model :type config: nncf.NNCFConfig :param compression_state: representation of the entire compression state to unambiguously restore the compressed model. Includes builder and controller states. :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build the internal graph representation via tracing. Specifying this is useful when the original training pipeline has special formats of data loader output or has additional *forward* arguments other than input tensors. Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified. :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy forward call before passing the inputs to the underlying compressed model. This is required if the model's input tensors that are important for compression are not supplied as arguments to the model's forward call directly, but instead are located in a container (such as list), and the model receives the container as an argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified if dummy_forward_fn is specified. :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs :param dump_graphs: Whether to dump the internal graph representation of the original and compressed models in the .dot format into the log directory. :return: A controller for the compression algorithm (or algorithms, in which case the controller is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped as an object of NNCFNetwork. """ if isinstance(model, NNCFNetwork): raise RuntimeError( "The model object has already been compressed.\n" "NNCF for PyTorch modifies the model object in-place, and repeat calls to " "`nncf.torch.create_compressed_model` with the same model object passed as argument " "will lead to an incorrect attempt to compress the model twice.\n" "Make sure that the model object you are passing has not already been compressed (for " "instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\n" "If you are encountering this in a Jupyter notebook context - make sure that when " "re-running cells involving `nncf.torch.create_compressed_model` the original model object " "is also re-created (via constructor call)." ) if config.get("target_device") == "VPU": warning_deprecated("VPU device is deprecated and will no longer be supported in the future.") set_debug_log_dir(config.get("log_dir", ".")) is_legacy_model_state_dict = ( compression_state is not None and BaseController.BUILDER_STATE not in compression_state and BaseController.CONTROLLER_STATE not in compression_state ) maybe_convert_legacy_names_in_compress_state(compression_state) should_init = compression_state is None nncf_network = create_nncf_network(model, config, dummy_forward_fn, wrap_inputs_fn, wrap_outputs_fn) if dump_graphs and is_main_process(): nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get("log_dir", "."), "original_graph.dot")) builder = create_compression_algorithm_builder(config, should_init) is_state_loadable = not is_legacy_model_state_dict and compression_state is not None if is_state_loadable: builder.load_state(compression_state[BaseController.BUILDER_STATE]) compressed_model = builder.apply_to(nncf_network) compression_ctrl = builder.build_controller(compressed_model) if is_state_loadable: compression_ctrl.load_state(compression_state[BaseController.CONTROLLER_STATE]) compressed_model.nncf.set_compression_controller(compression_ctrl) # Required to ensure that the model leaving create_compressed_model has correct compressed graph. # In particular, this is currently required for correct functioning of RNNs. compressed_model.nncf.rebuild_graph() try: if is_legacy_model_state_dict: from nncf.torch import load_state # pylint: disable=cyclic-import state_dict_to_load = compression_state.get("state_dict", compression_state) load_state(compressed_model, state_dict_to_load, is_resume=True) finally: if dump_graphs and is_main_process(): compressed_model_graph = compressed_model.nncf.get_graph() compressed_model_graph.visualize_graph(osp.join(config.get("log_dir", "."), "compressed_graph.dot")) synchronize_all_processes_in_distributed_mode() return compression_ctrl, compressed_model def create_nncf_network( model: torch.nn.Module, config: NNCFConfig, dummy_forward_fn: Callable[[Module], Any] = None, wrap_inputs_fn: Callable = None, wrap_outputs_fn: Callable = None, ) -> NNCFNetwork: """ The main function used to produce a model ready for adding compression from an original PyTorch model and a configuration object. :param model: The original model. Should have its parameters already loaded from a checkpoint or another source. :param config: A configuration object used to determine the exact compression modifications to be applied to the model :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build the internal graph representation via tracing. Specifying this is useful when the original training pipeline has special formats of data loader output or has additional *forward* arguments other than input tensors. Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified. :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy forward call before passing the inputs to the underlying compressed model. This is required if the model's input tensors that are important for compression are not supplied as arguments to the model's forward call directly, but instead are located in a container (such as list), and the model receives the container as an argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among the supplied model's args and kwargs that is important for compression (e.g. quantization) with an nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified if dummy_forward_fn is specified. :param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call. :return: A model wrapped by NNCFNetwork, which is ready for adding compression.""" if dummy_forward_fn is not None and wrap_inputs_fn is None: raise ValueError( "A custom dummy forward function was specified, but the corresponding input wrapping function " "was not. In case a custom dummy forward function is specified for purposes of NNCF graph " "building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with " "the input wrapping done in dummy_forward_fn." ) # Preserve `.training`/`.requires_grad` state since we will be building NNCFNetwork in `.eval` mode with training_mode_switcher(model, is_training=False): # Compress model that will be deployed for the inference on target device. No need to compress parts of the # model that are used on training stage only (e.g. AuxLogits of Inception-v3 model) or unused modules with # weights. As a consequence, no need to care about spoiling BN statistics, as they're disabled in eval mode. input_info_list = create_input_infos(config) scopes_without_shape_matching = config.get("scopes_without_shape_matching", []) ignored_scopes = config.get("ignored_scopes") target_scopes = config.get("target_scopes") nncf_network = NNCFNetwork( model, input_infos=input_info_list, dummy_forward_fn=dummy_forward_fn, wrap_inputs_fn=wrap_inputs_fn, wrap_outputs_fn=wrap_outputs_fn, ignored_scopes=ignored_scopes, target_scopes=target_scopes, scopes_without_shape_matching=scopes_without_shape_matching, ) nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph() synchronize_all_processes_in_distributed_mode() return nncf_network def synchronize_all_processes_in_distributed_mode(): if is_dist_avail_and_initialized(): try: barrier() # Exception can be raised during running barrier # if the backend not in the supported list https://pytorch.org/docs/stable/distributed.html except RuntimeError as err: nncf_logger.warning( "Training pipeline spawned an error while synchronizing distributed training processes:" ) nncf_logger.warning(err) nncf_logger.warning("Desynchronization of distributed processes may occur.") def create_compression_algorithm_builder(config: NNCFConfig, should_init=True) -> PTCompressionAlgorithmBuilder: """ Create compression algorithm builders by a given list of algorithm names. :param config: A configuration object used to determine the exact compression modifications to be applied to the model :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False) the training parameters of the model during model building. :return: compression algorithm builder """ algo_names = extract_algorithm_names(config) return create_compression_algorithm_builder_from_algo_names(algo_names, config, should_init) def create_compression_algorithm_builder_from_algo_names( algo_names: List[str], config: NNCFConfig, should_init: bool ) -> PTCompressionAlgorithmBuilder: """ Create compression algorithm builders by a given list of algorithm names. :param algo_names: list of algorithm names :param config: A configuration object used to determine the exact compression modifications to be applied to the model :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False) the training parameters of the model during model building. :return: compression algorithm builder """ if not algo_names: algo_builder_classes = [NoCompressionAlgorithmBuilder] else: algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for algo_name in algo_names] if len(algo_builder_classes) == 1: builder = next(iter(algo_builder_classes))(config, should_init=should_init) else: builder = PTCompositeCompressionAlgorithmBuilder(config, should_init=should_init) return builder
normal
{ "blob_id": "cd1ada2d7979fffc17f707ed113efde7aa134954", "index": 3036, "step-1": "<mask token>\n\n\n@api(canonical_alias='nncf.torch.create_compressed_model')\n@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=\n 'config')])\ndef create_compressed_model(model: Module, config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:\n Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[\n CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"\"\"The model object has already been compressed.\nNNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.\nMake sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\nIf you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call).\"\"\"\n )\n if config.get('target_device') == 'VPU':\n warning_deprecated(\n 'VPU device is deprecated and will no longer be supported in the future.'\n )\n set_debug_log_dir(config.get('log_dir', '.'))\n is_legacy_model_state_dict = (compression_state is not None and \n BaseController.BUILDER_STATE not in compression_state and \n BaseController.CONTROLLER_STATE not in compression_state)\n maybe_convert_legacy_names_in_compress_state(compression_state)\n should_init = compression_state is None\n nncf_network = create_nncf_network(model, config, dummy_forward_fn,\n wrap_inputs_fn, wrap_outputs_fn)\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'original_graph.dot'))\n builder = create_compression_algorithm_builder(config, should_init)\n is_state_loadable = (not is_legacy_model_state_dict and \n compression_state is not None)\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.\n CONTROLLER_STATE])\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n compressed_model.nncf.rebuild_graph()\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state\n state_dict_to_load = compression_state.get('state_dict',\n compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'compressed_graph.dot'))\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\n<mask token>\n\n\ndef create_compression_algorithm_builder_from_algo_names(algo_names: List[\n str], config: NNCFConfig, should_init: bool\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for\n algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=\n should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config,\n should_init=should_init)\n return builder\n", "step-2": "<mask token>\n\n\n@api(canonical_alias='nncf.torch.create_compressed_model')\n@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=\n 'config')])\ndef create_compressed_model(model: Module, config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:\n Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[\n CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"\"\"The model object has already been compressed.\nNNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.\nMake sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\nIf you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call).\"\"\"\n )\n if config.get('target_device') == 'VPU':\n warning_deprecated(\n 'VPU device is deprecated and will no longer be supported in the future.'\n )\n set_debug_log_dir(config.get('log_dir', '.'))\n is_legacy_model_state_dict = (compression_state is not None and \n BaseController.BUILDER_STATE not in compression_state and \n BaseController.CONTROLLER_STATE not in compression_state)\n maybe_convert_legacy_names_in_compress_state(compression_state)\n should_init = compression_state is None\n nncf_network = create_nncf_network(model, config, dummy_forward_fn,\n wrap_inputs_fn, wrap_outputs_fn)\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'original_graph.dot'))\n builder = create_compression_algorithm_builder(config, should_init)\n is_state_loadable = (not is_legacy_model_state_dict and \n compression_state is not None)\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.\n CONTROLLER_STATE])\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n compressed_model.nncf.rebuild_graph()\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state\n state_dict_to_load = compression_state.get('state_dict',\n compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'compressed_graph.dot'))\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\n<mask token>\n\n\ndef create_compression_algorithm_builder(config: NNCFConfig, should_init=True\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names,\n config, should_init)\n\n\ndef create_compression_algorithm_builder_from_algo_names(algo_names: List[\n str], config: NNCFConfig, should_init: bool\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for\n algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=\n should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config,\n should_init=should_init)\n return builder\n", "step-3": "<mask token>\n\n\n@api(canonical_alias='nncf.torch.create_compressed_model')\n@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=\n 'config')])\ndef create_compressed_model(model: Module, config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:\n Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[\n CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"\"\"The model object has already been compressed.\nNNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.\nMake sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\nIf you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call).\"\"\"\n )\n if config.get('target_device') == 'VPU':\n warning_deprecated(\n 'VPU device is deprecated and will no longer be supported in the future.'\n )\n set_debug_log_dir(config.get('log_dir', '.'))\n is_legacy_model_state_dict = (compression_state is not None and \n BaseController.BUILDER_STATE not in compression_state and \n BaseController.CONTROLLER_STATE not in compression_state)\n maybe_convert_legacy_names_in_compress_state(compression_state)\n should_init = compression_state is None\n nncf_network = create_nncf_network(model, config, dummy_forward_fn,\n wrap_inputs_fn, wrap_outputs_fn)\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'original_graph.dot'))\n builder = create_compression_algorithm_builder(config, should_init)\n is_state_loadable = (not is_legacy_model_state_dict and \n compression_state is not None)\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.\n CONTROLLER_STATE])\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n compressed_model.nncf.rebuild_graph()\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state\n state_dict_to_load = compression_state.get('state_dict',\n compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'compressed_graph.dot'))\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\ndef create_nncf_network(model: torch.nn.Module, config: NNCFConfig,\n dummy_forward_fn: Callable[[Module], Any]=None, wrap_inputs_fn:\n Callable=None, wrap_outputs_fn: Callable=None) ->NNCFNetwork:\n \"\"\"\n The main function used to produce a model ready for adding compression from an original PyTorch\n model and a configuration object.\n\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input\n functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these\n calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is\n specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's input\n tensors that are important for compression are not supplied as arguments to the model's forward call directly,\n but instead are located in a container (such as list), and the model receives the container as an argument.\n wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying\n model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among\n the supplied model's args and kwargs that is important for compression (e.g. quantization) with an\n nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced\n by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are\n the same as were supplied in input, but each tensor in the original input. Must be specified if\n dummy_forward_fn is specified.\n :param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.\n :return: A model wrapped by NNCFNetwork, which is ready for adding compression.\"\"\"\n if dummy_forward_fn is not None and wrap_inputs_fn is None:\n raise ValueError(\n 'A custom dummy forward function was specified, but the corresponding input wrapping function was not. In case a custom dummy forward function is specified for purposes of NNCF graph building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with the input wrapping done in dummy_forward_fn.'\n )\n with training_mode_switcher(model, is_training=False):\n input_info_list = create_input_infos(config)\n scopes_without_shape_matching = config.get(\n 'scopes_without_shape_matching', [])\n ignored_scopes = config.get('ignored_scopes')\n target_scopes = config.get('target_scopes')\n nncf_network = NNCFNetwork(model, input_infos=input_info_list,\n dummy_forward_fn=dummy_forward_fn, wrap_inputs_fn=\n wrap_inputs_fn, wrap_outputs_fn=wrap_outputs_fn, ignored_scopes\n =ignored_scopes, target_scopes=target_scopes,\n scopes_without_shape_matching=scopes_without_shape_matching)\n nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()\n synchronize_all_processes_in_distributed_mode()\n return nncf_network\n\n\ndef synchronize_all_processes_in_distributed_mode():\n if is_dist_avail_and_initialized():\n try:\n barrier()\n except RuntimeError as err:\n nncf_logger.warning(\n 'Training pipeline spawned an error while synchronizing distributed training processes:'\n )\n nncf_logger.warning(err)\n nncf_logger.warning(\n 'Desynchronization of distributed processes may occur.')\n\n\ndef create_compression_algorithm_builder(config: NNCFConfig, should_init=True\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names,\n config, should_init)\n\n\ndef create_compression_algorithm_builder_from_algo_names(algo_names: List[\n str], config: NNCFConfig, should_init: bool\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for\n algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=\n should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config,\n should_init=should_init)\n return builder\n", "step-4": "from os import path as osp\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\nimport torch\nfrom torch.distributed import barrier\nfrom torch.nn import Module\nfrom nncf.api.compression import CompressionAlgorithmController\nfrom nncf.common.compression import BaseCompressionAlgorithmController as BaseController\nfrom nncf.common.deprecation import warning_deprecated\nfrom nncf.common.logging import nncf_logger\nfrom nncf.common.utils.api_marker import api\nfrom nncf.common.utils.debug import set_debug_log_dir\nfrom nncf.config import NNCFConfig\nfrom nncf.config.extractors import extract_algorithm_names\nfrom nncf.config.telemetry_extractors import CompressionStartedFromConfig\nfrom nncf.telemetry import tracked_function\nfrom nncf.telemetry.events import NNCF_PT_CATEGORY\nfrom nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS\nfrom nncf.torch.algo_selector import NoCompressionAlgorithmBuilder\nfrom nncf.torch.composite_compression import PTCompositeCompressionAlgorithmBuilder\nfrom nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder\nfrom nncf.torch.dynamic_graph.graph_tracer import create_input_infos\nfrom nncf.torch.nncf_network import NNCFNetwork\nfrom nncf.torch.utils import is_dist_avail_and_initialized\nfrom nncf.torch.utils import is_main_process\nfrom nncf.torch.utils import maybe_convert_legacy_names_in_compress_state\nfrom nncf.torch.utils import training_mode_switcher\n\n\n@api(canonical_alias='nncf.torch.create_compressed_model')\n@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=\n 'config')])\ndef create_compressed_model(model: Module, config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:\n Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[\n CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"\"\"The model object has already been compressed.\nNNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.\nMake sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\nIf you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call).\"\"\"\n )\n if config.get('target_device') == 'VPU':\n warning_deprecated(\n 'VPU device is deprecated and will no longer be supported in the future.'\n )\n set_debug_log_dir(config.get('log_dir', '.'))\n is_legacy_model_state_dict = (compression_state is not None and \n BaseController.BUILDER_STATE not in compression_state and \n BaseController.CONTROLLER_STATE not in compression_state)\n maybe_convert_legacy_names_in_compress_state(compression_state)\n should_init = compression_state is None\n nncf_network = create_nncf_network(model, config, dummy_forward_fn,\n wrap_inputs_fn, wrap_outputs_fn)\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'original_graph.dot'))\n builder = create_compression_algorithm_builder(config, should_init)\n is_state_loadable = (not is_legacy_model_state_dict and \n compression_state is not None)\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.\n CONTROLLER_STATE])\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n compressed_model.nncf.rebuild_graph()\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state\n state_dict_to_load = compression_state.get('state_dict',\n compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'compressed_graph.dot'))\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\ndef create_nncf_network(model: torch.nn.Module, config: NNCFConfig,\n dummy_forward_fn: Callable[[Module], Any]=None, wrap_inputs_fn:\n Callable=None, wrap_outputs_fn: Callable=None) ->NNCFNetwork:\n \"\"\"\n The main function used to produce a model ready for adding compression from an original PyTorch\n model and a configuration object.\n\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input\n functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these\n calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is\n specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's input\n tensors that are important for compression are not supplied as arguments to the model's forward call directly,\n but instead are located in a container (such as list), and the model receives the container as an argument.\n wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying\n model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among\n the supplied model's args and kwargs that is important for compression (e.g. quantization) with an\n nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced\n by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are\n the same as were supplied in input, but each tensor in the original input. Must be specified if\n dummy_forward_fn is specified.\n :param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.\n :return: A model wrapped by NNCFNetwork, which is ready for adding compression.\"\"\"\n if dummy_forward_fn is not None and wrap_inputs_fn is None:\n raise ValueError(\n 'A custom dummy forward function was specified, but the corresponding input wrapping function was not. In case a custom dummy forward function is specified for purposes of NNCF graph building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with the input wrapping done in dummy_forward_fn.'\n )\n with training_mode_switcher(model, is_training=False):\n input_info_list = create_input_infos(config)\n scopes_without_shape_matching = config.get(\n 'scopes_without_shape_matching', [])\n ignored_scopes = config.get('ignored_scopes')\n target_scopes = config.get('target_scopes')\n nncf_network = NNCFNetwork(model, input_infos=input_info_list,\n dummy_forward_fn=dummy_forward_fn, wrap_inputs_fn=\n wrap_inputs_fn, wrap_outputs_fn=wrap_outputs_fn, ignored_scopes\n =ignored_scopes, target_scopes=target_scopes,\n scopes_without_shape_matching=scopes_without_shape_matching)\n nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()\n synchronize_all_processes_in_distributed_mode()\n return nncf_network\n\n\ndef synchronize_all_processes_in_distributed_mode():\n if is_dist_avail_and_initialized():\n try:\n barrier()\n except RuntimeError as err:\n nncf_logger.warning(\n 'Training pipeline spawned an error while synchronizing distributed training processes:'\n )\n nncf_logger.warning(err)\n nncf_logger.warning(\n 'Desynchronization of distributed processes may occur.')\n\n\ndef create_compression_algorithm_builder(config: NNCFConfig, should_init=True\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names,\n config, should_init)\n\n\ndef create_compression_algorithm_builder_from_algo_names(algo_names: List[\n str], config: NNCFConfig, should_init: bool\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for\n algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=\n should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config,\n should_init=should_init)\n return builder\n", "step-5": "# Copyright (c) 2023 Intel Corporation\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os import path as osp\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\n\nimport torch\nfrom torch.distributed import barrier\nfrom torch.nn import Module\n\nfrom nncf.api.compression import CompressionAlgorithmController\nfrom nncf.common.compression import BaseCompressionAlgorithmController as BaseController\nfrom nncf.common.deprecation import warning_deprecated\nfrom nncf.common.logging import nncf_logger\nfrom nncf.common.utils.api_marker import api\nfrom nncf.common.utils.debug import set_debug_log_dir\nfrom nncf.config import NNCFConfig\nfrom nncf.config.extractors import extract_algorithm_names\nfrom nncf.config.telemetry_extractors import CompressionStartedFromConfig\nfrom nncf.telemetry import tracked_function\nfrom nncf.telemetry.events import NNCF_PT_CATEGORY\nfrom nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS\nfrom nncf.torch.algo_selector import NoCompressionAlgorithmBuilder\nfrom nncf.torch.composite_compression import PTCompositeCompressionAlgorithmBuilder\nfrom nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder\nfrom nncf.torch.dynamic_graph.graph_tracer import create_input_infos\nfrom nncf.torch.nncf_network import NNCFNetwork\n\n# pylint:disable=too-many-branches\nfrom nncf.torch.utils import is_dist_avail_and_initialized\nfrom nncf.torch.utils import is_main_process\nfrom nncf.torch.utils import maybe_convert_legacy_names_in_compress_state\nfrom nncf.torch.utils import training_mode_switcher\n\n\n@api(canonical_alias=\"nncf.torch.create_compressed_model\")\n@tracked_function(\n NNCF_PT_CATEGORY,\n [\n CompressionStartedFromConfig(argname=\"config\"),\n ],\n)\ndef create_compressed_model(\n model: Module,\n config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]] = None,\n dummy_forward_fn: Callable[[Module], Any] = None,\n wrap_inputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,\n wrap_outputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,\n dump_graphs=True,\n) -> Tuple[CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"The model object has already been compressed.\\n\"\n \"NNCF for PyTorch modifies the model object in-place, and repeat calls to \"\n \"`nncf.torch.create_compressed_model` with the same model object passed as argument \"\n \"will lead to an incorrect attempt to compress the model twice.\\n\"\n \"Make sure that the model object you are passing has not already been compressed (for \"\n \"instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\\n\"\n \"If you are encountering this in a Jupyter notebook context - make sure that when \"\n \"re-running cells involving `nncf.torch.create_compressed_model` the original model object \"\n \"is also re-created (via constructor call).\"\n )\n\n if config.get(\"target_device\") == \"VPU\":\n warning_deprecated(\"VPU device is deprecated and will no longer be supported in the future.\")\n\n set_debug_log_dir(config.get(\"log_dir\", \".\"))\n\n is_legacy_model_state_dict = (\n compression_state is not None\n and BaseController.BUILDER_STATE not in compression_state\n and BaseController.CONTROLLER_STATE not in compression_state\n )\n maybe_convert_legacy_names_in_compress_state(compression_state)\n\n should_init = compression_state is None\n\n nncf_network = create_nncf_network(model, config, dummy_forward_fn, wrap_inputs_fn, wrap_outputs_fn)\n\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\"log_dir\", \".\"), \"original_graph.dot\"))\n builder = create_compression_algorithm_builder(config, should_init)\n\n is_state_loadable = not is_legacy_model_state_dict and compression_state is not None\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.CONTROLLER_STATE])\n\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n\n # Required to ensure that the model leaving create_compressed_model has correct compressed graph.\n # In particular, this is currently required for correct functioning of RNNs.\n compressed_model.nncf.rebuild_graph()\n\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state # pylint: disable=cyclic-import\n\n state_dict_to_load = compression_state.get(\"state_dict\", compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\"log_dir\", \".\"), \"compressed_graph.dot\"))\n\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\ndef create_nncf_network(\n model: torch.nn.Module,\n config: NNCFConfig,\n dummy_forward_fn: Callable[[Module], Any] = None,\n wrap_inputs_fn: Callable = None,\n wrap_outputs_fn: Callable = None,\n) -> NNCFNetwork:\n \"\"\"\n The main function used to produce a model ready for adding compression from an original PyTorch\n model and a configuration object.\n\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input\n functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these\n calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is\n specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's input\n tensors that are important for compression are not supplied as arguments to the model's forward call directly,\n but instead are located in a container (such as list), and the model receives the container as an argument.\n wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying\n model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among\n the supplied model's args and kwargs that is important for compression (e.g. quantization) with an\n nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced\n by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are\n the same as were supplied in input, but each tensor in the original input. Must be specified if\n dummy_forward_fn is specified.\n :param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.\n :return: A model wrapped by NNCFNetwork, which is ready for adding compression.\"\"\"\n\n if dummy_forward_fn is not None and wrap_inputs_fn is None:\n raise ValueError(\n \"A custom dummy forward function was specified, but the corresponding input wrapping function \"\n \"was not. In case a custom dummy forward function is specified for purposes of NNCF graph \"\n \"building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with \"\n \"the input wrapping done in dummy_forward_fn.\"\n )\n\n # Preserve `.training`/`.requires_grad` state since we will be building NNCFNetwork in `.eval` mode\n with training_mode_switcher(model, is_training=False):\n # Compress model that will be deployed for the inference on target device. No need to compress parts of the\n # model that are used on training stage only (e.g. AuxLogits of Inception-v3 model) or unused modules with\n # weights. As a consequence, no need to care about spoiling BN statistics, as they're disabled in eval mode.\n\n input_info_list = create_input_infos(config)\n scopes_without_shape_matching = config.get(\"scopes_without_shape_matching\", [])\n ignored_scopes = config.get(\"ignored_scopes\")\n target_scopes = config.get(\"target_scopes\")\n\n nncf_network = NNCFNetwork(\n model,\n input_infos=input_info_list,\n dummy_forward_fn=dummy_forward_fn,\n wrap_inputs_fn=wrap_inputs_fn,\n wrap_outputs_fn=wrap_outputs_fn,\n ignored_scopes=ignored_scopes,\n target_scopes=target_scopes,\n scopes_without_shape_matching=scopes_without_shape_matching,\n )\n\n nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()\n\n synchronize_all_processes_in_distributed_mode()\n return nncf_network\n\n\ndef synchronize_all_processes_in_distributed_mode():\n if is_dist_avail_and_initialized():\n try:\n barrier()\n # Exception can be raised during running barrier\n # if the backend not in the supported list https://pytorch.org/docs/stable/distributed.html\n except RuntimeError as err:\n nncf_logger.warning(\n \"Training pipeline spawned an error while synchronizing distributed training processes:\"\n )\n nncf_logger.warning(err)\n nncf_logger.warning(\"Desynchronization of distributed processes may occur.\")\n\n\ndef create_compression_algorithm_builder(config: NNCFConfig, should_init=True) -> PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names, config, should_init)\n\n\ndef create_compression_algorithm_builder_from_algo_names(\n algo_names: List[str], config: NNCFConfig, should_init: bool\n) -> PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config, should_init=should_init)\n return builder\n", "step-ids": [ 2, 3, 5, 6, 7 ] }
[ 2, 3, 5, 6, 7 ]
n = int(input()) p = [220000] + list(map(int, input().split())) cnt = 0 m = 220000 for i in range(1, n + 1): now = p[i] m = min(m, now) if now == m: cnt += 1 print(cnt)
normal
{ "blob_id": "2a500968cf6786440c0d4240430433db90d1fc2f", "index": 5941, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(1, n + 1):\n now = p[i]\n m = min(m, now)\n if now == m:\n cnt += 1\nprint(cnt)\n", "step-3": "n = int(input())\np = [220000] + list(map(int, input().split()))\ncnt = 0\nm = 220000\nfor i in range(1, n + 1):\n now = p[i]\n m = min(m, now)\n if now == m:\n cnt += 1\nprint(cnt)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from itertools import groupby def solve(tribes): attacks = [] for t in tribes: D, N, W, E, S, DD, DP, DS = t for i in range(N): d = D + DD * i w = W + DP * i e = E + DP * i s = S + DS * i attacks.append((d, w, e, s)) attacks = sorted(attacks) ret = 0 days = [] for k, g in groupby(attacks, key=lambda x: x[0]): days.append(list(g)) wall = {} for day in days: for a in day: _d, w, e, s = a for i in range(w, e): h = wall.get(i, 0) if h < s: ret += 1 break for a in day: _d, w, e, s = a for i in range(w, e): wall[i] = max(wall.get(i, 0), s) return ret def line(f): return map(int, f.readline().split()) def main(f): (T,) = line(f) for i in range(T): (N,) = line(f) tribes = [] for j in range(N): rec = line(f) tribes.append(rec) assert len(tribes) == N print('Case #{}: {}'.format(i + 1, solve(tribes))) if __name__ == '__main__': import sys main(sys.stdin) #with open('sample.in') as f: #main(f)
normal
{ "blob_id": "362bfc5a35b09817ce071e71a72e574a28ea287d", "index": 3365, "step-1": "<mask token>\n\n\ndef line(f):\n return map(int, f.readline().split())\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef solve(tribes):\n attacks = []\n for t in tribes:\n D, N, W, E, S, DD, DP, DS = t\n for i in range(N):\n d = D + DD * i\n w = W + DP * i\n e = E + DP * i\n s = S + DS * i\n attacks.append((d, w, e, s))\n attacks = sorted(attacks)\n ret = 0\n days = []\n for k, g in groupby(attacks, key=lambda x: x[0]):\n days.append(list(g))\n wall = {}\n for day in days:\n for a in day:\n _d, w, e, s = a\n for i in range(w, e):\n h = wall.get(i, 0)\n if h < s:\n ret += 1\n break\n for a in day:\n _d, w, e, s = a\n for i in range(w, e):\n wall[i] = max(wall.get(i, 0), s)\n return ret\n\n\ndef line(f):\n return map(int, f.readline().split())\n\n\ndef main(f):\n T, = line(f)\n for i in range(T):\n N, = line(f)\n tribes = []\n for j in range(N):\n rec = line(f)\n tribes.append(rec)\n assert len(tribes) == N\n print('Case #{}: {}'.format(i + 1, solve(tribes)))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef solve(tribes):\n attacks = []\n for t in tribes:\n D, N, W, E, S, DD, DP, DS = t\n for i in range(N):\n d = D + DD * i\n w = W + DP * i\n e = E + DP * i\n s = S + DS * i\n attacks.append((d, w, e, s))\n attacks = sorted(attacks)\n ret = 0\n days = []\n for k, g in groupby(attacks, key=lambda x: x[0]):\n days.append(list(g))\n wall = {}\n for day in days:\n for a in day:\n _d, w, e, s = a\n for i in range(w, e):\n h = wall.get(i, 0)\n if h < s:\n ret += 1\n break\n for a in day:\n _d, w, e, s = a\n for i in range(w, e):\n wall[i] = max(wall.get(i, 0), s)\n return ret\n\n\ndef line(f):\n return map(int, f.readline().split())\n\n\ndef main(f):\n T, = line(f)\n for i in range(T):\n N, = line(f)\n tribes = []\n for j in range(N):\n rec = line(f)\n tribes.append(rec)\n assert len(tribes) == N\n print('Case #{}: {}'.format(i + 1, solve(tribes)))\n\n\nif __name__ == '__main__':\n import sys\n main(sys.stdin)\n", "step-4": "from itertools import groupby\n\n\ndef solve(tribes):\n attacks = []\n for t in tribes:\n D, N, W, E, S, DD, DP, DS = t\n for i in range(N):\n d = D + DD * i\n w = W + DP * i\n e = E + DP * i\n s = S + DS * i\n attacks.append((d, w, e, s))\n attacks = sorted(attacks)\n ret = 0\n days = []\n for k, g in groupby(attacks, key=lambda x: x[0]):\n days.append(list(g))\n wall = {}\n for day in days:\n for a in day:\n _d, w, e, s = a\n for i in range(w, e):\n h = wall.get(i, 0)\n if h < s:\n ret += 1\n break\n for a in day:\n _d, w, e, s = a\n for i in range(w, e):\n wall[i] = max(wall.get(i, 0), s)\n return ret\n\n\ndef line(f):\n return map(int, f.readline().split())\n\n\ndef main(f):\n T, = line(f)\n for i in range(T):\n N, = line(f)\n tribes = []\n for j in range(N):\n rec = line(f)\n tribes.append(rec)\n assert len(tribes) == N\n print('Case #{}: {}'.format(i + 1, solve(tribes)))\n\n\nif __name__ == '__main__':\n import sys\n main(sys.stdin)\n", "step-5": "from itertools import groupby\n\ndef solve(tribes):\n attacks = []\n for t in tribes:\n D, N, W, E, S, DD, DP, DS = t\n for i in range(N):\n d = D + DD * i\n w = W + DP * i\n e = E + DP * i\n s = S + DS * i\n attacks.append((d, w, e, s))\n attacks = sorted(attacks)\n ret = 0\n days = []\n for k, g in groupby(attacks, key=lambda x: x[0]):\n days.append(list(g))\n wall = {}\n for day in days:\n for a in day:\n _d, w, e, s = a\n for i in range(w, e):\n h = wall.get(i, 0)\n if h < s:\n ret += 1\n break\n for a in day:\n _d, w, e, s = a\n for i in range(w, e):\n wall[i] = max(wall.get(i, 0), s)\n return ret\n\ndef line(f):\n return map(int, f.readline().split())\n\ndef main(f):\n (T,) = line(f)\n for i in range(T):\n (N,) = line(f)\n tribes = []\n for j in range(N):\n rec = line(f)\n tribes.append(rec)\n assert len(tribes) == N\n print('Case #{}: {}'.format(i + 1, solve(tribes)))\n\nif __name__ == '__main__':\n import sys\n main(sys.stdin)\n #with open('sample.in') as f:\n #main(f)\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
from . import resources from jsonschema import validate from jsonschema.exceptions import ValidationError import aiohttp_client import importlib.resources as pkg_resources import json import logging log = logging.getLogger("amplitude-client") API_URL = "https://api.amplitude.com/2/httpapi" class AmplitudeLogger: def __init__(self, api_key: str): self.api_key = api_key self.api_schema = json.loads(pkg_resources.read_text(resources, "schema.json")) async def log_event(self, event): # Amplitude API requires (user_id OR device_id) AND event_type event = {"api_key": self.api_key, "events": [event]} try: validate(instance=event, schema=self.api_schema) except ValidationError: log.error("Invalid payload", exc_info=True) return None async with aiohttp_client.post(API_URL, data=json.dumps(event)) as resp: if resp.status != 200: log.warn("Failed to log event", exc_info=True) return resp
normal
{ "blob_id": "d32f009f373249b7b602ac36f29982273a2ed192", "index": 2289, "step-1": "<mask token>\n\n\nclass AmplitudeLogger:\n <mask token>\n\n async def log_event(self, event):\n event = {'api_key': self.api_key, 'events': [event]}\n try:\n validate(instance=event, schema=self.api_schema)\n except ValidationError:\n log.error('Invalid payload', exc_info=True)\n return None\n async with aiohttp_client.post(API_URL, data=json.dumps(event)\n ) as resp:\n if resp.status != 200:\n log.warn('Failed to log event', exc_info=True)\n return resp\n", "step-2": "<mask token>\n\n\nclass AmplitudeLogger:\n\n def __init__(self, api_key: str):\n self.api_key = api_key\n self.api_schema = json.loads(pkg_resources.read_text(resources,\n 'schema.json'))\n\n async def log_event(self, event):\n event = {'api_key': self.api_key, 'events': [event]}\n try:\n validate(instance=event, schema=self.api_schema)\n except ValidationError:\n log.error('Invalid payload', exc_info=True)\n return None\n async with aiohttp_client.post(API_URL, data=json.dumps(event)\n ) as resp:\n if resp.status != 200:\n log.warn('Failed to log event', exc_info=True)\n return resp\n", "step-3": "<mask token>\nlog = logging.getLogger('amplitude-client')\nAPI_URL = 'https://api.amplitude.com/2/httpapi'\n\n\nclass AmplitudeLogger:\n\n def __init__(self, api_key: str):\n self.api_key = api_key\n self.api_schema = json.loads(pkg_resources.read_text(resources,\n 'schema.json'))\n\n async def log_event(self, event):\n event = {'api_key': self.api_key, 'events': [event]}\n try:\n validate(instance=event, schema=self.api_schema)\n except ValidationError:\n log.error('Invalid payload', exc_info=True)\n return None\n async with aiohttp_client.post(API_URL, data=json.dumps(event)\n ) as resp:\n if resp.status != 200:\n log.warn('Failed to log event', exc_info=True)\n return resp\n", "step-4": "from . import resources\nfrom jsonschema import validate\nfrom jsonschema.exceptions import ValidationError\nimport aiohttp_client\nimport importlib.resources as pkg_resources\nimport json\nimport logging\nlog = logging.getLogger('amplitude-client')\nAPI_URL = 'https://api.amplitude.com/2/httpapi'\n\n\nclass AmplitudeLogger:\n\n def __init__(self, api_key: str):\n self.api_key = api_key\n self.api_schema = json.loads(pkg_resources.read_text(resources,\n 'schema.json'))\n\n async def log_event(self, event):\n event = {'api_key': self.api_key, 'events': [event]}\n try:\n validate(instance=event, schema=self.api_schema)\n except ValidationError:\n log.error('Invalid payload', exc_info=True)\n return None\n async with aiohttp_client.post(API_URL, data=json.dumps(event)\n ) as resp:\n if resp.status != 200:\n log.warn('Failed to log event', exc_info=True)\n return resp\n", "step-5": "from . import resources\nfrom jsonschema import validate\nfrom jsonschema.exceptions import ValidationError\n\nimport aiohttp_client\nimport importlib.resources as pkg_resources\nimport json\nimport logging\n\nlog = logging.getLogger(\"amplitude-client\")\n\nAPI_URL = \"https://api.amplitude.com/2/httpapi\"\n\n\nclass AmplitudeLogger:\n def __init__(self, api_key: str):\n self.api_key = api_key\n\n self.api_schema = json.loads(pkg_resources.read_text(resources, \"schema.json\"))\n\n async def log_event(self, event):\n # Amplitude API requires (user_id OR device_id) AND event_type\n\n event = {\"api_key\": self.api_key, \"events\": [event]}\n\n try:\n validate(instance=event, schema=self.api_schema)\n except ValidationError:\n log.error(\"Invalid payload\", exc_info=True)\n return None\n\n async with aiohttp_client.post(API_URL, data=json.dumps(event)) as resp:\n if resp.status != 200:\n log.warn(\"Failed to log event\", exc_info=True)\n\n return resp\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import pandas as pd import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation import math from tkinter import * from tkinter.ttk import * from facedetectandtrack import * x_vals = [] root = Tk() counter=0 #def graph(): plt.style.use('seaborn') def animate(i): data = pd.read_csv('data.csv') global x_vals global counter x_vals.append(counter) try: x = data.iloc[x_vals,0] y = data.iloc[x_vals,1] if counter>10: x_vals.pop(0) plt.cla() axes=plt.gca() axes.set_ylim([0,30]) #plt.plot(x, y) counter=counter+1 height = root.winfo_screenheight() width = root.winfo_screenwidth() screen_x1 = width/2 screen_y1 = height/2 X = screen_x1 - face_x2 Y = screen_y1 - face_y2 d_x = (X*X) d_y = (Y*Y) D = d_x + d_y distance = math.sqrt(D) #print(distance) plt.scatter(counter ,distance, s= 50,linewidth=1) plt.xlabel("Time") plt.ylabel("Movement of student from the center of screen") plt.tight_layout() except IndexError as e: print('Graph ended') exit(0) ani = FuncAnimation(plt.gcf(), animate, interval=1000) plt.savefig("Scatter_Graph.png") plt.tight_layout() plt.show()
normal
{ "blob_id": "239f055fd76a3ecb5f384c256ad850ea42739b8f", "index": 9710, "step-1": "<mask token>\n", "step-2": "<mask token>\nplt.style.use('seaborn')\n\n\ndef animate(i):\n data = pd.read_csv('data.csv')\n global x_vals\n global counter\n x_vals.append(counter)\n try:\n x = data.iloc[x_vals, 0]\n y = data.iloc[x_vals, 1]\n if counter > 10:\n x_vals.pop(0)\n plt.cla()\n axes = plt.gca()\n axes.set_ylim([0, 30])\n counter = counter + 1\n height = root.winfo_screenheight()\n width = root.winfo_screenwidth()\n screen_x1 = width / 2\n screen_y1 = height / 2\n X = screen_x1 - face_x2\n Y = screen_y1 - face_y2\n d_x = X * X\n d_y = Y * Y\n D = d_x + d_y\n distance = math.sqrt(D)\n plt.scatter(counter, distance, s=50, linewidth=1)\n plt.xlabel('Time')\n plt.ylabel('Movement of student from the center of screen')\n plt.tight_layout()\n except IndexError as e:\n print('Graph ended')\n exit(0)\n\n\n<mask token>\nplt.savefig('Scatter_Graph.png')\nplt.tight_layout()\nplt.show()\n", "step-3": "<mask token>\nx_vals = []\nroot = Tk()\ncounter = 0\nplt.style.use('seaborn')\n\n\ndef animate(i):\n data = pd.read_csv('data.csv')\n global x_vals\n global counter\n x_vals.append(counter)\n try:\n x = data.iloc[x_vals, 0]\n y = data.iloc[x_vals, 1]\n if counter > 10:\n x_vals.pop(0)\n plt.cla()\n axes = plt.gca()\n axes.set_ylim([0, 30])\n counter = counter + 1\n height = root.winfo_screenheight()\n width = root.winfo_screenwidth()\n screen_x1 = width / 2\n screen_y1 = height / 2\n X = screen_x1 - face_x2\n Y = screen_y1 - face_y2\n d_x = X * X\n d_y = Y * Y\n D = d_x + d_y\n distance = math.sqrt(D)\n plt.scatter(counter, distance, s=50, linewidth=1)\n plt.xlabel('Time')\n plt.ylabel('Movement of student from the center of screen')\n plt.tight_layout()\n except IndexError as e:\n print('Graph ended')\n exit(0)\n\n\nani = FuncAnimation(plt.gcf(), animate, interval=1000)\nplt.savefig('Scatter_Graph.png')\nplt.tight_layout()\nplt.show()\n", "step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport math\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom facedetectandtrack import *\nx_vals = []\nroot = Tk()\ncounter = 0\nplt.style.use('seaborn')\n\n\ndef animate(i):\n data = pd.read_csv('data.csv')\n global x_vals\n global counter\n x_vals.append(counter)\n try:\n x = data.iloc[x_vals, 0]\n y = data.iloc[x_vals, 1]\n if counter > 10:\n x_vals.pop(0)\n plt.cla()\n axes = plt.gca()\n axes.set_ylim([0, 30])\n counter = counter + 1\n height = root.winfo_screenheight()\n width = root.winfo_screenwidth()\n screen_x1 = width / 2\n screen_y1 = height / 2\n X = screen_x1 - face_x2\n Y = screen_y1 - face_y2\n d_x = X * X\n d_y = Y * Y\n D = d_x + d_y\n distance = math.sqrt(D)\n plt.scatter(counter, distance, s=50, linewidth=1)\n plt.xlabel('Time')\n plt.ylabel('Movement of student from the center of screen')\n plt.tight_layout()\n except IndexError as e:\n print('Graph ended')\n exit(0)\n\n\nani = FuncAnimation(plt.gcf(), animate, interval=1000)\nplt.savefig('Scatter_Graph.png')\nplt.tight_layout()\nplt.show()\n", "step-5": "\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport math\nfrom tkinter import * \nfrom tkinter.ttk import *\nfrom facedetectandtrack import *\n \nx_vals = []\nroot = Tk()\n\n\ncounter=0\n#def graph():\nplt.style.use('seaborn')\n\ndef animate(i):\n data = pd.read_csv('data.csv')\n global x_vals\n global counter\n x_vals.append(counter)\n try:\n x = data.iloc[x_vals,0]\n y = data.iloc[x_vals,1] \n if counter>10:\n x_vals.pop(0)\n\n plt.cla()\n axes=plt.gca()\n axes.set_ylim([0,30])\n #plt.plot(x, y)\n counter=counter+1\n\n height = root.winfo_screenheight() \n width = root.winfo_screenwidth() \n screen_x1 = width/2\n screen_y1 = height/2\n X = screen_x1 - face_x2\n Y = screen_y1 - face_y2\n d_x = (X*X)\n d_y = (Y*Y)\n D = d_x + d_y\n distance = math.sqrt(D)\n #print(distance)\n plt.scatter(counter ,distance, s= 50,linewidth=1)\n\n plt.xlabel(\"Time\")\n plt.ylabel(\"Movement of student from the center of screen\")\n\n\n plt.tight_layout()\n except IndexError as e:\n print('Graph ended')\n exit(0)\n\nani = FuncAnimation(plt.gcf(), animate, interval=1000)\nplt.savefig(\"Scatter_Graph.png\")\n\nplt.tight_layout()\nplt.show()", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
a=10 b=20 c=400 d=100 e=500 f=30 z=a+b+c+d+e+f print "The total sum is",z print "variable d added" print "Variable e added" print "Variable f is equal to 30" print "You are coming from test branch" print "Your are very new in this branch"
normal
{ "blob_id": "700d876dd45548b74b563ed86f8124fa666e1739", "index": 2588, "step-1": "a=10\nb=20\nc=400\nd=100\ne=500\nf=30\nz=a+b+c+d+e+f\nprint \"The total sum is\",z\nprint \"variable d added\"\nprint \"Variable e added\"\nprint \"Variable f is equal to 30\"\nprint \"You are coming from test branch\"\nprint \"Your are very new in this branch\"\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from QnA_processor.question_analysis.google_question_classifier import GoogleQuestionClassifier def classify_question(query): try: """ Get answer-type from google autoML classifier (by making POST requests with authorization key) """ question_classifier = GoogleQuestionClassifier() answer_type = question_classifier.classify_by_api_call(query) except KeyError : """ Get answer-type from google autoML classifier (without authorization key by using google package) """ answer_type = question_classifier.classify_by_package(query) except: """ Get answer-type from custom question classifier """ from QnA_processor.question_analysis.custom_question_classifier import CustomQuestionClassifier question_classifier = CustomQuestionClassifier() answer_type = question_classifier.classify_question(query)[0] return answer_type # print (classify_question("How many seasons are there in a year"))
normal
{ "blob_id": "db231ea92319414dd10ca8dfbc14e5a70ed2fe44", "index": 7343, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef classify_question(query):\n try:\n \"\"\"\n Get answer-type from google autoML classifier \n (by making POST requests with authorization key)\n \"\"\"\n question_classifier = GoogleQuestionClassifier()\n answer_type = question_classifier.classify_by_api_call(query)\n except KeyError:\n \"\"\"\n Get answer-type from google autoML classifier \n (without authorization key by using google package)\n \"\"\"\n answer_type = question_classifier.classify_by_package(query)\n except:\n \"\"\"\n Get answer-type from custom question classifier\n \"\"\"\n from QnA_processor.question_analysis.custom_question_classifier import CustomQuestionClassifier\n question_classifier = CustomQuestionClassifier()\n answer_type = question_classifier.classify_question(query)[0]\n return answer_type\n", "step-3": "from QnA_processor.question_analysis.google_question_classifier import GoogleQuestionClassifier\n\n\ndef classify_question(query):\n try:\n \"\"\"\n Get answer-type from google autoML classifier \n (by making POST requests with authorization key)\n \"\"\"\n question_classifier = GoogleQuestionClassifier()\n answer_type = question_classifier.classify_by_api_call(query)\n except KeyError:\n \"\"\"\n Get answer-type from google autoML classifier \n (without authorization key by using google package)\n \"\"\"\n answer_type = question_classifier.classify_by_package(query)\n except:\n \"\"\"\n Get answer-type from custom question classifier\n \"\"\"\n from QnA_processor.question_analysis.custom_question_classifier import CustomQuestionClassifier\n question_classifier = CustomQuestionClassifier()\n answer_type = question_classifier.classify_question(query)[0]\n return answer_type\n", "step-4": " \r\n \r\nfrom QnA_processor.question_analysis.google_question_classifier import GoogleQuestionClassifier\r\n \r\ndef classify_question(query):\r\n \r\n try:\r\n \"\"\"\r\n Get answer-type from google autoML classifier \r\n (by making POST requests with authorization key)\r\n \"\"\"\r\n question_classifier = GoogleQuestionClassifier()\r\n answer_type = question_classifier.classify_by_api_call(query)\r\n except KeyError :\r\n \"\"\"\r\n Get answer-type from google autoML classifier \r\n (without authorization key by using google package)\r\n \"\"\"\r\n answer_type = question_classifier.classify_by_package(query)\r\n \r\n except:\r\n \"\"\"\r\n Get answer-type from custom question classifier\r\n \"\"\"\r\n from QnA_processor.question_analysis.custom_question_classifier import CustomQuestionClassifier\r\n question_classifier = CustomQuestionClassifier()\r\n answer_type = question_classifier.classify_question(query)[0]\r\n \r\n return answer_type\r\n\r\n# print (classify_question(\"How many seasons are there in a year\"))", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import ssl import urllib from urllib import request, response, error, parse, robotparser context = ssl._create_unverified_context() url = 'https://oauth.51job.com/get_login.php?client_id=000001&redirect_uri=https%3A%2F%2Funion.yingjiesheng.com%2Fapi_login.php&from_domain=yjs_web&display=default&state=7c893ec1be7b355a91bdc3c474087add--368ba30db1d6217cc18f7dfe0bd27a79&partner=' headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36' } data = { 'loginname_encrypt': '/Pjp1Ki1S3j65+QC2J2pkg==', 'password_encrypt': 'hiqxe1qVXCoVuCrSwYM+eg==' } data = bytes(parse.urlencode(data), 'utf-8') req = request.Request(url, data=data, headers=headers, method='POST') res = request.urlopen(req, context=context) print(res.read().decode('utf-8'))
normal
{ "blob_id": "2a37d02c7a0840e855a80adced4794fd757e353a", "index": 2917, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(res.read().decode('utf-8'))\n", "step-3": "<mask token>\ncontext = ssl._create_unverified_context()\nurl = (\n 'https://oauth.51job.com/get_login.php?client_id=000001&redirect_uri=https%3A%2F%2Funion.yingjiesheng.com%2Fapi_login.php&from_domain=yjs_web&display=default&state=7c893ec1be7b355a91bdc3c474087add--368ba30db1d6217cc18f7dfe0bd27a79&partner='\n )\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\n }\ndata = {'loginname_encrypt': '/Pjp1Ki1S3j65+QC2J2pkg==', 'password_encrypt':\n 'hiqxe1qVXCoVuCrSwYM+eg=='}\ndata = bytes(parse.urlencode(data), 'utf-8')\nreq = request.Request(url, data=data, headers=headers, method='POST')\nres = request.urlopen(req, context=context)\nprint(res.read().decode('utf-8'))\n", "step-4": "import ssl\nimport urllib\nfrom urllib import request, response, error, parse, robotparser\ncontext = ssl._create_unverified_context()\nurl = (\n 'https://oauth.51job.com/get_login.php?client_id=000001&redirect_uri=https%3A%2F%2Funion.yingjiesheng.com%2Fapi_login.php&from_domain=yjs_web&display=default&state=7c893ec1be7b355a91bdc3c474087add--368ba30db1d6217cc18f7dfe0bd27a79&partner='\n )\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\n }\ndata = {'loginname_encrypt': '/Pjp1Ki1S3j65+QC2J2pkg==', 'password_encrypt':\n 'hiqxe1qVXCoVuCrSwYM+eg=='}\ndata = bytes(parse.urlencode(data), 'utf-8')\nreq = request.Request(url, data=data, headers=headers, method='POST')\nres = request.urlopen(req, context=context)\nprint(res.read().decode('utf-8'))\n", "step-5": "import ssl\nimport urllib\nfrom urllib import request, response, error, parse, robotparser\ncontext = ssl._create_unverified_context()\nurl = 'https://oauth.51job.com/get_login.php?client_id=000001&redirect_uri=https%3A%2F%2Funion.yingjiesheng.com%2Fapi_login.php&from_domain=yjs_web&display=default&state=7c893ec1be7b355a91bdc3c474087add--368ba30db1d6217cc18f7dfe0bd27a79&partner='\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\n}\ndata = {\n 'loginname_encrypt': '/Pjp1Ki1S3j65+QC2J2pkg==',\n 'password_encrypt': 'hiqxe1qVXCoVuCrSwYM+eg=='\n}\ndata = bytes(parse.urlencode(data), 'utf-8')\nreq = request.Request(url, data=data, headers=headers, method='POST')\nres = request.urlopen(req, context=context)\nprint(res.read().decode('utf-8'))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# coding=utf-8 # Copyright 2022 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tensorflow_datasets.core.community.register_path.""" from etils import epath from tensorflow_datasets.core.community import register_path def test_data_dir_register(): register = register_path.DataDirRegister( namespace_to_data_dirs={'ns1': [epath.Path('/path/ns1')]}) assert {'ns1'} == register.namespaces
normal
{ "blob_id": "ed65d7e0de3fc792753e34b77254bccc8cee6d66", "index": 3657, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef test_data_dir_register():\n register = register_path.DataDirRegister(namespace_to_data_dirs={'ns1':\n [epath.Path('/path/ns1')]})\n assert {'ns1'} == register.namespaces\n", "step-3": "<mask token>\nfrom etils import epath\nfrom tensorflow_datasets.core.community import register_path\n\n\ndef test_data_dir_register():\n register = register_path.DataDirRegister(namespace_to_data_dirs={'ns1':\n [epath.Path('/path/ns1')]})\n assert {'ns1'} == register.namespaces\n", "step-4": "# coding=utf-8\n# Copyright 2022 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tensorflow_datasets.core.community.register_path.\"\"\"\n\nfrom etils import epath\nfrom tensorflow_datasets.core.community import register_path\n\n\ndef test_data_dir_register():\n register = register_path.DataDirRegister(\n namespace_to_data_dirs={'ns1': [epath.Path('/path/ns1')]})\n assert {'ns1'} == register.namespaces\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from django.apps import AppConfig class WebApiAppConfig(AppConfig): name = 'WebApiApp'
normal
{ "blob_id": "cc97f70b9d41357f020ea9c59d8b149392a336cc", "index": 9656, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass WebApiAppConfig(AppConfig):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass WebApiAppConfig(AppConfig):\n name = 'WebApiApp'\n", "step-4": "from django.apps import AppConfig\n\n\nclass WebApiAppConfig(AppConfig):\n name = 'WebApiApp'\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import os, sys sys.path.append('./Pytorch-UNet/') import torch from torch import optim import torchvision.transforms as transforms import torchvision.datasets as dset import wandb from datasets import parse_dataset_args, create_dataset from wt_utils import wt, create_filters, load_checkpoint, load_weights from arguments import parse_args from unet.unet_model import UNet_NTail_128_Mod from train import train_unet256 from logger import Logger if __name__ == "__main__": # Set up logger logger = Logger() # Accelerate training with benchmark true torch.backends.cudnn.benchmark = True # Parse arguments & log args = parse_args() logger.update_args(args) # Create output directory if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) else: print('WARNING: Output directory already exists and will be overwriting (if not resuming)') # Initialize wandb wandb.init(project=args.project_name) # Create filters for dataloader filters_cpu = create_filters(device='cpu') # Create transforms default_transform = transforms.Compose([ transforms.CenterCrop(args.image_size), transforms.Resize(args.image_size), transforms.ToTensor() ]) # Parsing dataset arguments ds_name, classes = parse_dataset_args(args.dataset) # Create train dataset train_dataset = create_dataset(ds_name, args.train_dir, transform=default_transform, classes=classes[0] if classes else None) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) # Create validation dataset valid_dataset = create_dataset(ds_name, args.valid_dir, transform=default_transform, classes=classes[1] if classes else None) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) # Load 128 model print('Loading model 128 weights') model_128 = UNet_NTail_128_Mod(n_channels=12, n_classes=3, n_tails=12, bilinear=True).to(args.device) model_128 = load_weights(model_128, args.model_128_weights, args) # Model and optimizer model = UNet_NTail_128_Mod(n_channels=48, n_classes=3, n_tails=48, bilinear=True).to(args.device) optimizer = optim.Adam(model.parameters(), lr=args.lr) state_dict = {'itr': 0} if args.resume: print('Loading weights & resuming from iteration {}'.format(args.checkpoint)) model, optimizer, logger = load_checkpoint(model, optimizer, '256', args) state_dict['itr'] = args.checkpoint for epoch in range(args.num_epochs): train_unet256(epoch, state_dict, model, model_128, optimizer, train_loader, valid_loader, args, logger)
normal
{ "blob_id": "fbd5c7fa335d6bde112e41a55d15aee31e3ebaf7", "index": 2759, "step-1": "<mask token>\n", "step-2": "<mask token>\nsys.path.append('./Pytorch-UNet/')\n<mask token>\nif __name__ == '__main__':\n logger = Logger()\n torch.backends.cudnn.benchmark = True\n args = parse_args()\n logger.update_args(args)\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n else:\n print(\n 'WARNING: Output directory already exists and will be overwriting (if not resuming)'\n )\n wandb.init(project=args.project_name)\n filters_cpu = create_filters(device='cpu')\n default_transform = transforms.Compose([transforms.CenterCrop(args.\n image_size), transforms.Resize(args.image_size), transforms.ToTensor()]\n )\n ds_name, classes = parse_dataset_args(args.dataset)\n train_dataset = create_dataset(ds_name, args.train_dir, transform=\n default_transform, classes=classes[0] if classes else None)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=\n args.batch_size, shuffle=True, num_workers=args.workers, pin_memory\n =True, drop_last=True)\n valid_dataset = create_dataset(ds_name, args.valid_dir, transform=\n default_transform, classes=classes[1] if classes else None)\n valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=\n args.batch_size, shuffle=True, num_workers=args.workers, pin_memory\n =True, drop_last=True)\n print('Loading model 128 weights')\n model_128 = UNet_NTail_128_Mod(n_channels=12, n_classes=3, n_tails=12,\n bilinear=True).to(args.device)\n model_128 = load_weights(model_128, args.model_128_weights, args)\n model = UNet_NTail_128_Mod(n_channels=48, n_classes=3, n_tails=48,\n bilinear=True).to(args.device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n state_dict = {'itr': 0}\n if args.resume:\n print('Loading weights & resuming from iteration {}'.format(args.\n checkpoint))\n model, optimizer, logger = load_checkpoint(model, optimizer, '256',\n args)\n state_dict['itr'] = args.checkpoint\n for epoch in range(args.num_epochs):\n train_unet256(epoch, state_dict, model, model_128, optimizer,\n train_loader, valid_loader, args, logger)\n", "step-3": "import os, sys\nsys.path.append('./Pytorch-UNet/')\nimport torch\nfrom torch import optim\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dset\nimport wandb\nfrom datasets import parse_dataset_args, create_dataset\nfrom wt_utils import wt, create_filters, load_checkpoint, load_weights\nfrom arguments import parse_args\nfrom unet.unet_model import UNet_NTail_128_Mod\nfrom train import train_unet256\nfrom logger import Logger\nif __name__ == '__main__':\n logger = Logger()\n torch.backends.cudnn.benchmark = True\n args = parse_args()\n logger.update_args(args)\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n else:\n print(\n 'WARNING: Output directory already exists and will be overwriting (if not resuming)'\n )\n wandb.init(project=args.project_name)\n filters_cpu = create_filters(device='cpu')\n default_transform = transforms.Compose([transforms.CenterCrop(args.\n image_size), transforms.Resize(args.image_size), transforms.ToTensor()]\n )\n ds_name, classes = parse_dataset_args(args.dataset)\n train_dataset = create_dataset(ds_name, args.train_dir, transform=\n default_transform, classes=classes[0] if classes else None)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=\n args.batch_size, shuffle=True, num_workers=args.workers, pin_memory\n =True, drop_last=True)\n valid_dataset = create_dataset(ds_name, args.valid_dir, transform=\n default_transform, classes=classes[1] if classes else None)\n valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=\n args.batch_size, shuffle=True, num_workers=args.workers, pin_memory\n =True, drop_last=True)\n print('Loading model 128 weights')\n model_128 = UNet_NTail_128_Mod(n_channels=12, n_classes=3, n_tails=12,\n bilinear=True).to(args.device)\n model_128 = load_weights(model_128, args.model_128_weights, args)\n model = UNet_NTail_128_Mod(n_channels=48, n_classes=3, n_tails=48,\n bilinear=True).to(args.device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n state_dict = {'itr': 0}\n if args.resume:\n print('Loading weights & resuming from iteration {}'.format(args.\n checkpoint))\n model, optimizer, logger = load_checkpoint(model, optimizer, '256',\n args)\n state_dict['itr'] = args.checkpoint\n for epoch in range(args.num_epochs):\n train_unet256(epoch, state_dict, model, model_128, optimizer,\n train_loader, valid_loader, args, logger)\n", "step-4": "import os, sys\nsys.path.append('./Pytorch-UNet/')\nimport torch\nfrom torch import optim\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dset\nimport wandb\n\nfrom datasets import parse_dataset_args, create_dataset\nfrom wt_utils import wt, create_filters, load_checkpoint, load_weights\nfrom arguments import parse_args\nfrom unet.unet_model import UNet_NTail_128_Mod\nfrom train import train_unet256\nfrom logger import Logger\n\nif __name__ == \"__main__\":\n # Set up logger\n logger = Logger()\n \n # Accelerate training with benchmark true\n torch.backends.cudnn.benchmark = True\n\n # Parse arguments & log\n args = parse_args()\n logger.update_args(args)\n\n # Create output directory\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n else:\n print('WARNING: Output directory already exists and will be overwriting (if not resuming)')\n\n # Initialize wandb\n wandb.init(project=args.project_name)\n \n # Create filters for dataloader\n filters_cpu = create_filters(device='cpu')\n\n # Create transforms\n default_transform = transforms.Compose([\n transforms.CenterCrop(args.image_size),\n transforms.Resize(args.image_size),\n transforms.ToTensor()\n ])\n \n # Parsing dataset arguments\n ds_name, classes = parse_dataset_args(args.dataset)\n\n # Create train dataset\n train_dataset = create_dataset(ds_name, args.train_dir, transform=default_transform, classes=classes[0] if classes else None)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers,\n pin_memory=True, drop_last=True)\n\n # Create validation dataset\n valid_dataset = create_dataset(ds_name, args.valid_dir, transform=default_transform, classes=classes[1] if classes else None)\n valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers,\n pin_memory=True, drop_last=True)\n\n # Load 128 model\n print('Loading model 128 weights')\n model_128 = UNet_NTail_128_Mod(n_channels=12, n_classes=3, n_tails=12, bilinear=True).to(args.device)\n model_128 = load_weights(model_128, args.model_128_weights, args)\n\n # Model and optimizer\n model = UNet_NTail_128_Mod(n_channels=48, n_classes=3, n_tails=48, bilinear=True).to(args.device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n state_dict = {'itr': 0}\n\n if args.resume:\n print('Loading weights & resuming from iteration {}'.format(args.checkpoint))\n model, optimizer, logger = load_checkpoint(model, optimizer, '256', args)\n state_dict['itr'] = args.checkpoint\n\n for epoch in range(args.num_epochs):\n train_unet256(epoch, state_dict, model, model_128, optimizer, train_loader, valid_loader, args, logger)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import cv2 import sys import online as API def demo(myAPI): myAPI.setAttr() video_capture = cv2.VideoCapture(0) print("Press q to quit: ") while True: # Capture frame-by-frame ret, frame = video_capture.read() #np.array frame = cv2.resize(frame, (320, 240)) key = cv2.waitKey(100) & 0xFF if key == ord('q'): break elif key == ord('r'): pass frame = myAPI.simple_demo(frame) # Display the resulting frame cv2.imshow('Video', frame) # When everything is done, release the capture video_capture.release() cv2.destroyAllWindows() demo(API.FacePlusPlus())
normal
{ "blob_id": "778ef68b5270657f75185b27dc8219b35847afa1", "index": 5829, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef demo(myAPI):\n myAPI.setAttr()\n video_capture = cv2.VideoCapture(0)\n print('Press q to quit: ')\n while True:\n ret, frame = video_capture.read()\n frame = cv2.resize(frame, (320, 240))\n key = cv2.waitKey(100) & 255\n if key == ord('q'):\n break\n elif key == ord('r'):\n pass\n frame = myAPI.simple_demo(frame)\n cv2.imshow('Video', frame)\n video_capture.release()\n cv2.destroyAllWindows()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef demo(myAPI):\n myAPI.setAttr()\n video_capture = cv2.VideoCapture(0)\n print('Press q to quit: ')\n while True:\n ret, frame = video_capture.read()\n frame = cv2.resize(frame, (320, 240))\n key = cv2.waitKey(100) & 255\n if key == ord('q'):\n break\n elif key == ord('r'):\n pass\n frame = myAPI.simple_demo(frame)\n cv2.imshow('Video', frame)\n video_capture.release()\n cv2.destroyAllWindows()\n\n\ndemo(API.FacePlusPlus())\n", "step-4": "import cv2\nimport sys\nimport online as API\n\n\ndef demo(myAPI):\n myAPI.setAttr()\n video_capture = cv2.VideoCapture(0)\n print('Press q to quit: ')\n while True:\n ret, frame = video_capture.read()\n frame = cv2.resize(frame, (320, 240))\n key = cv2.waitKey(100) & 255\n if key == ord('q'):\n break\n elif key == ord('r'):\n pass\n frame = myAPI.simple_demo(frame)\n cv2.imshow('Video', frame)\n video_capture.release()\n cv2.destroyAllWindows()\n\n\ndemo(API.FacePlusPlus())\n", "step-5": "import cv2\nimport sys\nimport online as API\n\ndef demo(myAPI):\n myAPI.setAttr()\n video_capture = cv2.VideoCapture(0)\n print(\"Press q to quit: \")\n while True:\n # Capture frame-by-frame\n ret, frame = video_capture.read() #np.array\n\n frame = cv2.resize(frame, (320, 240))\n\n key = cv2.waitKey(100) & 0xFF\n if key == ord('q'):\n break\n elif key == ord('r'):\n pass\n frame = myAPI.simple_demo(frame)\n\n # Display the resulting frame\n cv2.imshow('Video', frame)\n\n # When everything is done, release the capture\n video_capture.release()\n cv2.destroyAllWindows()\n\n\ndemo(API.FacePlusPlus())\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def f1(phi, phi_o, d): """sinusoidally growing function between (phi_o-d) to phi_o""" return 1 - sigmoid_decay(phi, phi_o, d) def f2(phi, sigma): """normal distribution""" return math.exp(-phi ** 2 / sigma ** 2) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def f1(phi, phi_o, d): """sinusoidally growing function between (phi_o-d) to phi_o""" return 1 - sigmoid_decay(phi, phi_o, d) def f2(phi, sigma): """normal distribution""" return math.exp(-phi ** 2 / sigma ** 2) <|reserved_special_token_0|> def optofitness(op_array, n_obj=1): """apply respective transfer functions to an array of order parameters **order of elements matters """ d = 5 f_speed = f1(op_array[1], df.v_flock, df.v_tol) f_coll = f3(op_array[3], df.a_tol) f_disc = f3(op_array[4], df.num_agents / 5) f_wall = f2(op_array[0], df.r_tol) f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5) if op_array[2] > 0: f_corr = op_array[2] else: f_corr = 0 time_fit = 1 if n_obj == 2: F2 = -time_fit * f_coll * f_corr * f_disc * f_cluster F1 = -time_fit * f_wall * f_speed return round(F1, d), round(F2, d) elif n_obj == 3: F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster F2 = -time_fit * f_wall F3 = -time_fit * f_coll return round(F1, d), round(F2, d), round(F3, d) elif n_obj == 'all': return round(f_wall, d), round(f_speed, d), round(f_corr, d), round( f_coll, d), round(f_disc, d), round(f_cluster, d) F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster return round(F1, d) <|reserved_special_token_1|> <|reserved_special_token_0|> def f1(phi, phi_o, d): """sinusoidally growing function between (phi_o-d) to phi_o""" return 1 - sigmoid_decay(phi, phi_o, d) def f2(phi, sigma): """normal distribution""" return math.exp(-phi ** 2 / sigma ** 2) def f3(phi, a): """sharp peak""" return a ** 2 / (phi + a) ** 2 def optofitness(op_array, n_obj=1): """apply respective transfer functions to an array of order parameters **order of elements matters """ d = 5 f_speed = f1(op_array[1], df.v_flock, df.v_tol) f_coll = f3(op_array[3], df.a_tol) f_disc = f3(op_array[4], df.num_agents / 5) f_wall = f2(op_array[0], df.r_tol) f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5) if op_array[2] > 0: f_corr = op_array[2] else: f_corr = 0 time_fit = 1 if n_obj == 2: F2 = -time_fit * f_coll * f_corr * f_disc * f_cluster F1 = -time_fit * f_wall * f_speed return round(F1, d), round(F2, d) elif n_obj == 3: F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster F2 = -time_fit * f_wall F3 = -time_fit * f_coll return round(F1, d), round(F2, d), round(F3, d) elif n_obj == 'all': return round(f_wall, d), round(f_speed, d), round(f_corr, d), round( f_coll, d), round(f_disc, d), round(f_cluster, d) F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster return round(F1, d) <|reserved_special_token_1|> import math from Config import defaults as df from Utils.controls import sigmoid_decay def f1(phi, phi_o, d): """sinusoidally growing function between (phi_o-d) to phi_o""" return 1 - sigmoid_decay(phi, phi_o, d) def f2(phi, sigma): """normal distribution""" return math.exp(-phi ** 2 / sigma ** 2) def f3(phi, a): """sharp peak""" return a ** 2 / (phi + a) ** 2 def optofitness(op_array, n_obj=1): """apply respective transfer functions to an array of order parameters **order of elements matters """ d = 5 f_speed = f1(op_array[1], df.v_flock, df.v_tol) f_coll = f3(op_array[3], df.a_tol) f_disc = f3(op_array[4], df.num_agents / 5) f_wall = f2(op_array[0], df.r_tol) f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5) if op_array[2] > 0: f_corr = op_array[2] else: f_corr = 0 time_fit = 1 if n_obj == 2: F2 = -time_fit * f_coll * f_corr * f_disc * f_cluster F1 = -time_fit * f_wall * f_speed return round(F1, d), round(F2, d) elif n_obj == 3: F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster F2 = -time_fit * f_wall F3 = -time_fit * f_coll return round(F1, d), round(F2, d), round(F3, d) elif n_obj == 'all': return round(f_wall, d), round(f_speed, d), round(f_corr, d), round( f_coll, d), round(f_disc, d), round(f_cluster, d) F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster return round(F1, d) <|reserved_special_token_1|> import math from Config import defaults as df from Utils.controls import sigmoid_decay def f1(phi, phi_o, d): """sinusoidally growing function between (phi_o-d) to phi_o""" return 1 - sigmoid_decay(phi, phi_o, d) def f2(phi, sigma): """normal distribution""" return math.exp(-phi ** 2 / sigma ** 2) def f3(phi, a): """sharp peak""" return a ** 2 / (phi + a) ** 2 def optofitness(op_array, n_obj=1): """apply respective transfer functions to an array of order parameters **order of elements matters """ d = 5 f_speed = f1(op_array[1], df.v_flock, df.v_tol) f_coll = f3(op_array[3], df.a_tol) f_disc = f3(op_array[4], df.num_agents / 5) f_wall = f2(op_array[0], df.r_tol) f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5) if op_array[2] > 0: f_corr = op_array[2] else: f_corr = 0 time_fit = 1 # (1-sigmoid_decay(op_array[6], df.max_sim_time-df.wait_time, 200)) if n_obj == 2: # F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster # F2 = -time_fit * f_wall * f_coll F2 = -time_fit *f_coll * f_corr * f_disc * f_cluster F1 = -time_fit * f_wall * f_speed return round(F1, d), round(F2, d) elif n_obj == 3: F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster F2 = -time_fit * f_wall F3 = -time_fit * f_coll return round(F1, d), round(F2, d), round(F3, d) elif n_obj == 'all': return round(f_wall, d), round(f_speed, d), round(f_corr, d), round(f_coll, d), round(f_disc, d), round(f_cluster, d) F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster return round(F1, d)
flexible
{ "blob_id": "19bb3cd0c7862f39a78479d9a9703ebef198fc73", "index": 3677, "step-1": "<mask token>\n\n\ndef f1(phi, phi_o, d):\n \"\"\"sinusoidally growing function between (phi_o-d) to phi_o\"\"\"\n return 1 - sigmoid_decay(phi, phi_o, d)\n\n\ndef f2(phi, sigma):\n \"\"\"normal distribution\"\"\"\n return math.exp(-phi ** 2 / sigma ** 2)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef f1(phi, phi_o, d):\n \"\"\"sinusoidally growing function between (phi_o-d) to phi_o\"\"\"\n return 1 - sigmoid_decay(phi, phi_o, d)\n\n\ndef f2(phi, sigma):\n \"\"\"normal distribution\"\"\"\n return math.exp(-phi ** 2 / sigma ** 2)\n\n\n<mask token>\n\n\ndef optofitness(op_array, n_obj=1):\n \"\"\"apply respective transfer functions to an array of order parameters\n **order of elements matters\n \"\"\"\n d = 5\n f_speed = f1(op_array[1], df.v_flock, df.v_tol)\n f_coll = f3(op_array[3], df.a_tol)\n f_disc = f3(op_array[4], df.num_agents / 5)\n f_wall = f2(op_array[0], df.r_tol)\n f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5)\n if op_array[2] > 0:\n f_corr = op_array[2]\n else:\n f_corr = 0\n time_fit = 1\n if n_obj == 2:\n F2 = -time_fit * f_coll * f_corr * f_disc * f_cluster\n F1 = -time_fit * f_wall * f_speed\n return round(F1, d), round(F2, d)\n elif n_obj == 3:\n F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster\n F2 = -time_fit * f_wall\n F3 = -time_fit * f_coll\n return round(F1, d), round(F2, d), round(F3, d)\n elif n_obj == 'all':\n return round(f_wall, d), round(f_speed, d), round(f_corr, d), round(\n f_coll, d), round(f_disc, d), round(f_cluster, d)\n F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster\n return round(F1, d)\n", "step-3": "<mask token>\n\n\ndef f1(phi, phi_o, d):\n \"\"\"sinusoidally growing function between (phi_o-d) to phi_o\"\"\"\n return 1 - sigmoid_decay(phi, phi_o, d)\n\n\ndef f2(phi, sigma):\n \"\"\"normal distribution\"\"\"\n return math.exp(-phi ** 2 / sigma ** 2)\n\n\ndef f3(phi, a):\n \"\"\"sharp peak\"\"\"\n return a ** 2 / (phi + a) ** 2\n\n\ndef optofitness(op_array, n_obj=1):\n \"\"\"apply respective transfer functions to an array of order parameters\n **order of elements matters\n \"\"\"\n d = 5\n f_speed = f1(op_array[1], df.v_flock, df.v_tol)\n f_coll = f3(op_array[3], df.a_tol)\n f_disc = f3(op_array[4], df.num_agents / 5)\n f_wall = f2(op_array[0], df.r_tol)\n f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5)\n if op_array[2] > 0:\n f_corr = op_array[2]\n else:\n f_corr = 0\n time_fit = 1\n if n_obj == 2:\n F2 = -time_fit * f_coll * f_corr * f_disc * f_cluster\n F1 = -time_fit * f_wall * f_speed\n return round(F1, d), round(F2, d)\n elif n_obj == 3:\n F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster\n F2 = -time_fit * f_wall\n F3 = -time_fit * f_coll\n return round(F1, d), round(F2, d), round(F3, d)\n elif n_obj == 'all':\n return round(f_wall, d), round(f_speed, d), round(f_corr, d), round(\n f_coll, d), round(f_disc, d), round(f_cluster, d)\n F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster\n return round(F1, d)\n", "step-4": "import math\nfrom Config import defaults as df\nfrom Utils.controls import sigmoid_decay\n\n\ndef f1(phi, phi_o, d):\n \"\"\"sinusoidally growing function between (phi_o-d) to phi_o\"\"\"\n return 1 - sigmoid_decay(phi, phi_o, d)\n\n\ndef f2(phi, sigma):\n \"\"\"normal distribution\"\"\"\n return math.exp(-phi ** 2 / sigma ** 2)\n\n\ndef f3(phi, a):\n \"\"\"sharp peak\"\"\"\n return a ** 2 / (phi + a) ** 2\n\n\ndef optofitness(op_array, n_obj=1):\n \"\"\"apply respective transfer functions to an array of order parameters\n **order of elements matters\n \"\"\"\n d = 5\n f_speed = f1(op_array[1], df.v_flock, df.v_tol)\n f_coll = f3(op_array[3], df.a_tol)\n f_disc = f3(op_array[4], df.num_agents / 5)\n f_wall = f2(op_array[0], df.r_tol)\n f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5)\n if op_array[2] > 0:\n f_corr = op_array[2]\n else:\n f_corr = 0\n time_fit = 1\n if n_obj == 2:\n F2 = -time_fit * f_coll * f_corr * f_disc * f_cluster\n F1 = -time_fit * f_wall * f_speed\n return round(F1, d), round(F2, d)\n elif n_obj == 3:\n F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster\n F2 = -time_fit * f_wall\n F3 = -time_fit * f_coll\n return round(F1, d), round(F2, d), round(F3, d)\n elif n_obj == 'all':\n return round(f_wall, d), round(f_speed, d), round(f_corr, d), round(\n f_coll, d), round(f_disc, d), round(f_cluster, d)\n F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster\n return round(F1, d)\n", "step-5": "import math\nfrom Config import defaults as df\nfrom Utils.controls import sigmoid_decay\n\n\ndef f1(phi, phi_o, d):\n \"\"\"sinusoidally growing function between (phi_o-d) to phi_o\"\"\"\n return 1 - sigmoid_decay(phi, phi_o, d)\n\n\ndef f2(phi, sigma):\n \"\"\"normal distribution\"\"\"\n return math.exp(-phi ** 2 / sigma ** 2)\n\n\ndef f3(phi, a):\n \"\"\"sharp peak\"\"\"\n return a ** 2 / (phi + a) ** 2\n\n\ndef optofitness(op_array, n_obj=1):\n \"\"\"apply respective transfer functions to an array of order parameters\n **order of elements matters\n \"\"\"\n d = 5\n f_speed = f1(op_array[1], df.v_flock, df.v_tol)\n f_coll = f3(op_array[3], df.a_tol)\n f_disc = f3(op_array[4], df.num_agents / 5)\n f_wall = f2(op_array[0], df.r_tol)\n f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5)\n if op_array[2] > 0:\n f_corr = op_array[2]\n else:\n f_corr = 0\n time_fit = 1 # (1-sigmoid_decay(op_array[6], df.max_sim_time-df.wait_time, 200))\n if n_obj == 2:\n # F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster\n # F2 = -time_fit * f_wall * f_coll\n F2 = -time_fit *f_coll * f_corr * f_disc * f_cluster\n F1 = -time_fit * f_wall * f_speed\n return round(F1, d), round(F2, d)\n\n elif n_obj == 3:\n F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster\n F2 = -time_fit * f_wall\n F3 = -time_fit * f_coll\n return round(F1, d), round(F2, d), round(F3, d)\n elif n_obj == 'all':\n return round(f_wall, d), round(f_speed, d), round(f_corr, d), round(f_coll, d), round(f_disc, d), round(f_cluster, d)\n F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster\n return round(F1, d)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from django.contrib import admin from django.urls import path from . import views urlpatterns = [ path('', views.artifact, name="artifacts"), path('<int:artifact_id>', views.detail, name="detail"), path('register/', views.register, name="register") ]
normal
{ "blob_id": "9b73037e8af7d4f91261cebf895b68650182fcd5", "index": 2780, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [path('', views.artifact, name='artifacts'), path(\n '<int:artifact_id>', views.detail, name='detail'), path('register/',\n views.register, name='register')]\n", "step-3": "from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nurlpatterns = [path('', views.artifact, name='artifacts'), path(\n '<int:artifact_id>', views.detail, name='detail'), path('register/',\n views.register, name='register')]\n", "step-4": "from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.artifact, name=\"artifacts\"),\n path('<int:artifact_id>', views.detail, name=\"detail\"),\n path('register/', views.register, name=\"register\")\n]", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from collections import defaultdict, namedtuple from color import RGB, clamp import math import controls_model as controls from eyes import Eye, MutableEye from geom import ALL #from icicles.ice_geom import ALL def load_geometry(mapfile): """ Load sheep neighbor geometry Returns a map { panel: [(edge-neighbors), (vertex-neighbors)], ... } """ with open(mapfile, 'r') as f: def blank_or_comment(l): return l.startswith('#') or len(l) == 0 lines = [l.strip() for l in f.readlines()] lines = [l for l in lines if not blank_or_comment(l)] def to_ints(seq): return [int(x) for x in seq] def p(raw): "returns a tuple containing ([a,a,a], [b,b,b]) given a raw string" raw = raw.strip() if ' ' not in raw: return (to_ints(raw.split(',')), None) else: # print ">>%s<<" % raw a,b = raw.split() return (to_ints(a.split(',')), to_ints(b.split(','))) dat = {} # defaultdict(list) for line in lines: # print line (num, rest) = line.split(' ', 1) dat[int(num)] = p(rest.strip()) return dat _neighbor_map = load_geometry('data/geom.txt') def edge_neighbors(panel): "Return the list of panel ids that share an edge with a given panel" try: panel = int(panel) out = _neighbor_map[panel][0] if out is None: return [] return out except Exception, e: return [] def vertex_neighbors(panel): "Return the list of panel ids that share a vertex (but not an edge) with a given panel" try: panel = int(panel) out = _neighbor_map[panel][1] if out is None: return [] return out except Exception, e: return [] ## ## Convenience wrapper to pass around three separate sheep objects ## SheepSides = namedtuple('SheepSides', ['both', 'party', 'business', 'party_eye', 'business_eye']) def make_sheep(model): return SheepSides(both=Sheep(model, 'a'), party=Sheep(model, 'p'), business=Sheep(model, 'b'), party_eye=Eye(model, 'p'), business_eye=Eye(model, 'b')) def make_eyes_only_sheep(sides): null = NullSheep() return SheepSides(both=null, party=null, business=null, party_eye = sides.party_eye, business_eye = sides.business_eye) def make_mutable_sheep(sides): return SheepSides( both=MutableSheep(sides.both), party=MutableSheep(sides.party), business=MutableSheep(sides.business), party_eye=MutableEye(sides.party_eye), business_eye=MutableEye(sides.business_eye) ) ## ## Sheep class to represent one or both sides of the sheep ## VALID_SIDES=set(['a', 'b', 'p']) TEST_COLORS = [ RGB(141,211,199),RGB(255,255,179),RGB(190,186,218),RGB(251,128,114),RGB(128,177,211),RGB(253,180,98),RGB(179,222,105),RGB(252,205,229),RGB(217,217,217),RGB(188,128,189),RGB(204,235,197),RGB(255,237,111) ] class Sheep(object): def __init__(self, model, side): self.model = model if side not in VALID_SIDES: raise Exception("%s is not a valid side. use one of a,b,p") self.side = side self.cells = set(ALL) self.cm = None self.handle_colorized = False self._brightness = 1.0 def __repr__(self): return "Sheep(%s, side='%s')" % (self.model, self.side) def set_brightness(self, val): self._brightness = val def all_cells(self): "Return the list of valid cell IDs" return ALL # handle setting both sides here to keep the commands sent # to the simulator as close as possible to the actual hardware def _resolve(self, cell): """ Translate an integer cell id into a model cell identifier 'a' will be translated into two cells """ if cell in self.cells: if self.side == 'a': return [str(cell)+'b', str(cell)+'p'] else: return [str(cell) + self.side] else: return [] def set_cell(self, cell, color): if isinstance(cell, list): return self.set_cells(cell, color) # a single set_cell call may result in two panels being set c = self._resolve(cell) if not c: return if self.handle_colorized and self.cm: color = color.colorize(self.cm.colorized) if self._brightness < 1.0: color = color.copy() color.v = color.v * self._brightness # print "setting", c self.model.set_cells(c, color) def set_cells(self, cells, color): if cells is None: return resolved = [] for c in cells: if isinstance(c, list): for cb in c: resolved.extend(self._resolve(cb)) else: resolved.extend(self._resolve(c)) if self.handle_colorized and self.cm: color = color.colorize(self.cm.colorized) if self._brightness < 1.0: color = color.copy() color.v = color.v * self._brightness # print "setting", resolved self.model.set_cells(resolved, color) def set_all_cells(self, color): self.set_cells(ALL, color) def clear(self): "" self.set_all_cells(RGB(0,0,0)) # AAck! Never call go like this. Let the main loop # handle the timing!!! :( # self.go() def go(self): self.model.go() # convenience methods in case you only have a sheep object def edge_neighbors(self, cell): return edge_neighbors(cell) def vertex_neighbors(self, cell): return vertex_neighbors(cell) def set_test_colors(self): ix = 0 for p in ALL: self.set_cell(p, TEST_COLORS[ix]) ix += 1 if ix == len(TEST_COLORS): ix = 0 class NullSheep(object): """ An implementation of the Sheep side interface that does nothing. This can be handed to a show which might try to modify it, and thus can run without crashing, while only the eye modifications are used. """ def all_cells(self): return ALL def set_cell(self, cell, color): pass def set_cells(self, cells, color): pass def set_all_cells(self, color): pass def clear(self): pass def go(self): pass def edge_neighbors(self, cell): return edge_neighbors(cell) def vertex_neighbors(self, cell): return vertex_neighbors(cell) def set_test_colors(self): pass class MutableSheep(object): """ An implementation of the Sheep side interface which can be muted - that is, when muted, this sheep will act like the NullSheep, but when unmuted it will pass things to it's parent """ def __init__(self, parent): self.parent = parent self.muted = False def set_cell(self, cell, color): if self.muted: return self.parent.set_cell(cell, color) def set_cells(self, cells, color): if self.muted: return self.parent.set_cells(cells, color) def set_all_cells(self, color): if self.muted: return self.parent.set_all_cells(color) def clear(self): if self.muted: return self.parent.clear() def go(self): if self.muted: return self.parent.go() def set_test_colors(self): self.parent.set_test_colors() def all_cells(self): return self.parent.all_cells() def edge_neighbors(self, cell): return self.parent.edge_neighbors(cell) def vertex_neighbors(self, cell): return self.parent.vertex_neighbors(cell)
normal
{ "blob_id": "fe01b78d29dc456f7a537dd5639bc658fc184e36", "index": 5035, "step-1": "from collections import defaultdict, namedtuple\nfrom color import RGB, clamp\n\nimport math\n\nimport controls_model as controls\nfrom eyes import Eye, MutableEye\n\nfrom geom import ALL\n#from icicles.ice_geom import ALL\n\ndef load_geometry(mapfile):\n \"\"\"\n Load sheep neighbor geometry\n Returns a map { panel: [(edge-neighbors), (vertex-neighbors)], ... }\n \"\"\"\n with open(mapfile, 'r') as f:\n def blank_or_comment(l):\n return l.startswith('#') or len(l) == 0\n lines = [l.strip() for l in f.readlines()]\n lines = [l for l in lines if not blank_or_comment(l)]\n\n def to_ints(seq):\n return [int(x) for x in seq]\n\n def p(raw):\n \"returns a tuple containing ([a,a,a], [b,b,b]) given a raw string\"\n raw = raw.strip()\n if ' ' not in raw:\n return (to_ints(raw.split(',')), None)\n else:\n # print \">>%s<<\" % raw\n a,b = raw.split()\n return (to_ints(a.split(',')), to_ints(b.split(',')))\n\n dat = {} # defaultdict(list)\n for line in lines:\n # print line\n (num, rest) = line.split(' ', 1)\n dat[int(num)] = p(rest.strip())\n\n return dat\n\n_neighbor_map = load_geometry('data/geom.txt')\n\ndef edge_neighbors(panel):\n \"Return the list of panel ids that share an edge with a given panel\"\n try:\n panel = int(panel)\n out = _neighbor_map[panel][0]\n if out is None:\n return []\n\n return out\n except Exception, e:\n return []\n\ndef vertex_neighbors(panel):\n \"Return the list of panel ids that share a vertex (but not an edge) with a given panel\"\n try:\n panel = int(panel)\n out = _neighbor_map[panel][1]\n if out is None:\n return []\n\n return out\n except Exception, e:\n return []\n\n##\n## Convenience wrapper to pass around three separate sheep objects\n##\nSheepSides = namedtuple('SheepSides', ['both', 'party', 'business', 'party_eye', 'business_eye'])\n\ndef make_sheep(model):\n return SheepSides(both=Sheep(model, 'a'),\n party=Sheep(model, 'p'),\n business=Sheep(model, 'b'),\n party_eye=Eye(model, 'p'),\n business_eye=Eye(model, 'b'))\n\ndef make_eyes_only_sheep(sides):\n null = NullSheep()\n return SheepSides(both=null, party=null, business=null, party_eye = sides.party_eye, business_eye = sides.business_eye)\n\ndef make_mutable_sheep(sides):\n return SheepSides(\n both=MutableSheep(sides.both),\n party=MutableSheep(sides.party),\n business=MutableSheep(sides.business),\n party_eye=MutableEye(sides.party_eye),\n business_eye=MutableEye(sides.business_eye)\n )\n##\n## Sheep class to represent one or both sides of the sheep\n##\nVALID_SIDES=set(['a', 'b', 'p'])\nTEST_COLORS = [\nRGB(141,211,199),RGB(255,255,179),RGB(190,186,218),RGB(251,128,114),RGB(128,177,211),RGB(253,180,98),RGB(179,222,105),RGB(252,205,229),RGB(217,217,217),RGB(188,128,189),RGB(204,235,197),RGB(255,237,111)\n]\n\nclass Sheep(object):\n def __init__(self, model, side):\n self.model = model\n if side not in VALID_SIDES:\n raise Exception(\"%s is not a valid side. use one of a,b,p\")\n self.side = side\n self.cells = set(ALL)\n self.cm = None\n self.handle_colorized = False\n\n self._brightness = 1.0\n\n def __repr__(self):\n return \"Sheep(%s, side='%s')\" % (self.model, self.side)\n\n def set_brightness(self, val):\n self._brightness = val\n\n def all_cells(self):\n \"Return the list of valid cell IDs\"\n return ALL\n\n # handle setting both sides here to keep the commands sent\n # to the simulator as close as possible to the actual hardware\n def _resolve(self, cell):\n \"\"\"\n Translate an integer cell id into a model cell identifier\n 'a' will be translated into two cells\n \"\"\"\n if cell in self.cells:\n if self.side == 'a':\n return [str(cell)+'b', str(cell)+'p']\n else:\n return [str(cell) + self.side]\n else:\n return []\n\n def set_cell(self, cell, color):\n if isinstance(cell, list):\n return self.set_cells(cell, color)\n\n # a single set_cell call may result in two panels being set\n c = self._resolve(cell)\n if not c:\n return\n\n if self.handle_colorized and self.cm:\n color = color.colorize(self.cm.colorized)\n\n if self._brightness < 1.0:\n color = color.copy()\n color.v = color.v * self._brightness\n\n # print \"setting\", c\n self.model.set_cells(c, color)\n\n def set_cells(self, cells, color):\n if cells is None:\n return\n\n resolved = []\n for c in cells:\n if isinstance(c, list):\n for cb in c:\n resolved.extend(self._resolve(cb))\n else:\n resolved.extend(self._resolve(c))\n\n if self.handle_colorized and self.cm:\n color = color.colorize(self.cm.colorized)\n\n if self._brightness < 1.0:\n color = color.copy()\n color.v = color.v * self._brightness\n\n # print \"setting\", resolved\n self.model.set_cells(resolved, color)\n\n def set_all_cells(self, color):\n self.set_cells(ALL, color)\n\n def clear(self):\n \"\"\n self.set_all_cells(RGB(0,0,0))\n # AAck! Never call go like this. Let the main loop\n # handle the timing!!! :(\n # self.go()\n\n def go(self):\n self.model.go()\n\n # convenience methods in case you only have a sheep object\n def edge_neighbors(self, cell):\n return edge_neighbors(cell)\n\n def vertex_neighbors(self, cell):\n return vertex_neighbors(cell)\n\n def set_test_colors(self):\n ix = 0\n for p in ALL:\n self.set_cell(p, TEST_COLORS[ix])\n ix += 1\n if ix == len(TEST_COLORS):\n ix = 0\n\n\nclass NullSheep(object):\n \"\"\"\n An implementation of the Sheep side interface that does nothing. This\n can be handed to a show which might try to modify it, and thus can run\n without crashing, while only the eye modifications are used.\n \"\"\"\n def all_cells(self):\n return ALL\n\n def set_cell(self, cell, color):\n pass\n\n def set_cells(self, cells, color):\n pass\n\n def set_all_cells(self, color):\n pass\n\n def clear(self):\n pass\n\n def go(self):\n pass\n\n def edge_neighbors(self, cell):\n return edge_neighbors(cell)\n\n def vertex_neighbors(self, cell):\n return vertex_neighbors(cell)\n\n def set_test_colors(self):\n pass\n\n\nclass MutableSheep(object):\n \"\"\"\n An implementation of the Sheep side interface which can be muted -\n that is, when muted, this sheep will act like the NullSheep, but when\n unmuted it will pass things to it's parent\n \"\"\"\n\n def __init__(self, parent):\n self.parent = parent\n self.muted = False\n\n def set_cell(self, cell, color):\n if self.muted:\n return\n\n self.parent.set_cell(cell, color)\n\n def set_cells(self, cells, color):\n if self.muted:\n return\n self.parent.set_cells(cells, color)\n\n def set_all_cells(self, color):\n if self.muted:\n return\n self.parent.set_all_cells(color)\n\n def clear(self):\n if self.muted:\n return\n self.parent.clear()\n\n def go(self):\n if self.muted:\n return\n\n self.parent.go()\n\n def set_test_colors(self):\n self.parent.set_test_colors()\n\n def all_cells(self):\n return self.parent.all_cells()\n\n def edge_neighbors(self, cell):\n return self.parent.edge_neighbors(cell)\n\n def vertex_neighbors(self, cell):\n return self.parent.vertex_neighbors(cell)\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import os from subprocess import Popen, PIPE from Bio import SeqIO from Bio.Align.Applications import ClustalOmegaCommandline from Bio import Phylo from io import StringIO # from ete3 import Tree, TreeStyle import pylab class TreeDrawer: def __init__(self, sequences=None): self.sequences = sequences def make_alignment(self, method): ### Mulltiple Sequence Alignment ### path = os.getcwd() in_file = "example.fasta" out_file = "alignment.aln" if os.path.isfile("alignment.aln"): os.remove("alignment.aln") clustalomega_cline = ClustalOmegaCommandline(infile=in_file, outfile=out_file, verbose=True, iterations=1, max_guidetree_iterations=1, max_hmm_iterations=1, dealign=True, outfmt="clu") print(clustalomega_cline) stdout, stderr = clustalomega_cline() ### Convert to phylip format ### SeqIO.convert("alignment.aln", "clustal", "alignment.phy", "phylip") ### Phylogentetic analysis ### # Choose method proml, dnaml # Maximum likelihood analysis # # Run Phylip Proml program instructions = bytes("alignment.phy\ny\n", 'utf-8') proml = Popen("phylip " + method, stdin=PIPE, shell=True) (out, err) = proml.communicate(instructions) # Change output files names files = Popen("mv outfile " + method + ".out", stdin=PIPE, shell=True) (out, err) = files.communicate() files = Popen("mv outtree " + method + ".tree", stdin=PIPE, shell=True) (out, err) = files.communicate() def draw_tree(self, filename): # instructions = bytes("dnaml.tree\nl\na\ny\n", 'utf-8') # dnaml = Popen("phylip drawtree", stdin=PIPE, shell=True) # (out, err) = dnaml.communicate(instructions) tree_file = open('dnaml.tree') x = tree_file.read() # t = Tree() # ts = TreeStyle() # ts.show_leaf_name = True # ts.branch_vertical_margin = 10 # 10 pixels between adjacent branches # t.show(tree_style=ts) tree = Phylo.read(StringIO(x[:-2]), "newick") Phylo.draw(tree, do_show=False) pylab.savefig('biohackProject/static/images/'+filename+'.png', dpi=300)
normal
{ "blob_id": "5adb16c654a4e747f803590c42328fa6ba642e95", "index": 7599, "step-1": "<mask token>\n\n\nclass TreeDrawer:\n <mask token>\n <mask token>\n\n def draw_tree(self, filename):\n tree_file = open('dnaml.tree')\n x = tree_file.read()\n tree = Phylo.read(StringIO(x[:-2]), 'newick')\n Phylo.draw(tree, do_show=False)\n pylab.savefig('biohackProject/static/images/' + filename + '.png',\n dpi=300)\n", "step-2": "<mask token>\n\n\nclass TreeDrawer:\n\n def __init__(self, sequences=None):\n self.sequences = sequences\n <mask token>\n\n def draw_tree(self, filename):\n tree_file = open('dnaml.tree')\n x = tree_file.read()\n tree = Phylo.read(StringIO(x[:-2]), 'newick')\n Phylo.draw(tree, do_show=False)\n pylab.savefig('biohackProject/static/images/' + filename + '.png',\n dpi=300)\n", "step-3": "<mask token>\n\n\nclass TreeDrawer:\n\n def __init__(self, sequences=None):\n self.sequences = sequences\n\n def make_alignment(self, method):\n path = os.getcwd()\n in_file = 'example.fasta'\n out_file = 'alignment.aln'\n if os.path.isfile('alignment.aln'):\n os.remove('alignment.aln')\n clustalomega_cline = ClustalOmegaCommandline(infile=in_file,\n outfile=out_file, verbose=True, iterations=1,\n max_guidetree_iterations=1, max_hmm_iterations=1, dealign=True,\n outfmt='clu')\n print(clustalomega_cline)\n stdout, stderr = clustalomega_cline()\n SeqIO.convert('alignment.aln', 'clustal', 'alignment.phy', 'phylip')\n instructions = bytes('alignment.phy\\ny\\n', 'utf-8')\n proml = Popen('phylip ' + method, stdin=PIPE, shell=True)\n out, err = proml.communicate(instructions)\n files = Popen('mv outfile ' + method + '.out', stdin=PIPE, shell=True)\n out, err = files.communicate()\n files = Popen('mv outtree ' + method + '.tree', stdin=PIPE, shell=True)\n out, err = files.communicate()\n\n def draw_tree(self, filename):\n tree_file = open('dnaml.tree')\n x = tree_file.read()\n tree = Phylo.read(StringIO(x[:-2]), 'newick')\n Phylo.draw(tree, do_show=False)\n pylab.savefig('biohackProject/static/images/' + filename + '.png',\n dpi=300)\n", "step-4": "import os\nfrom subprocess import Popen, PIPE\nfrom Bio import SeqIO\nfrom Bio.Align.Applications import ClustalOmegaCommandline\nfrom Bio import Phylo\nfrom io import StringIO\nimport pylab\n\n\nclass TreeDrawer:\n\n def __init__(self, sequences=None):\n self.sequences = sequences\n\n def make_alignment(self, method):\n path = os.getcwd()\n in_file = 'example.fasta'\n out_file = 'alignment.aln'\n if os.path.isfile('alignment.aln'):\n os.remove('alignment.aln')\n clustalomega_cline = ClustalOmegaCommandline(infile=in_file,\n outfile=out_file, verbose=True, iterations=1,\n max_guidetree_iterations=1, max_hmm_iterations=1, dealign=True,\n outfmt='clu')\n print(clustalomega_cline)\n stdout, stderr = clustalomega_cline()\n SeqIO.convert('alignment.aln', 'clustal', 'alignment.phy', 'phylip')\n instructions = bytes('alignment.phy\\ny\\n', 'utf-8')\n proml = Popen('phylip ' + method, stdin=PIPE, shell=True)\n out, err = proml.communicate(instructions)\n files = Popen('mv outfile ' + method + '.out', stdin=PIPE, shell=True)\n out, err = files.communicate()\n files = Popen('mv outtree ' + method + '.tree', stdin=PIPE, shell=True)\n out, err = files.communicate()\n\n def draw_tree(self, filename):\n tree_file = open('dnaml.tree')\n x = tree_file.read()\n tree = Phylo.read(StringIO(x[:-2]), 'newick')\n Phylo.draw(tree, do_show=False)\n pylab.savefig('biohackProject/static/images/' + filename + '.png',\n dpi=300)\n", "step-5": "import os\nfrom subprocess import Popen, PIPE\nfrom Bio import SeqIO\nfrom Bio.Align.Applications import ClustalOmegaCommandline\nfrom Bio import Phylo\nfrom io import StringIO\n# from ete3 import Tree, TreeStyle\nimport pylab\n\n\nclass TreeDrawer:\n\n def __init__(self, sequences=None):\n self.sequences = sequences\n\n def make_alignment(self, method):\n ### Mulltiple Sequence Alignment ###\n path = os.getcwd()\n in_file = \"example.fasta\"\n out_file = \"alignment.aln\"\n\n if os.path.isfile(\"alignment.aln\"):\n os.remove(\"alignment.aln\")\n clustalomega_cline = ClustalOmegaCommandline(infile=in_file, outfile=out_file, verbose=True, iterations=1,\n max_guidetree_iterations=1, max_hmm_iterations=1, dealign=True,\n outfmt=\"clu\")\n\n print(clustalomega_cline)\n stdout, stderr = clustalomega_cline()\n ### Convert to phylip format ###\n SeqIO.convert(\"alignment.aln\", \"clustal\", \"alignment.phy\", \"phylip\")\n ### Phylogentetic analysis ###\n # Choose method proml, dnaml\n # Maximum likelihood analysis #\n # Run Phylip Proml program\n instructions = bytes(\"alignment.phy\\ny\\n\", 'utf-8')\n proml = Popen(\"phylip \" + method, stdin=PIPE, shell=True)\n (out, err) = proml.communicate(instructions)\n # Change output files names\n files = Popen(\"mv outfile \" + method + \".out\", stdin=PIPE, shell=True)\n (out, err) = files.communicate()\n files = Popen(\"mv outtree \" + method + \".tree\", stdin=PIPE, shell=True)\n (out, err) = files.communicate()\n\n def draw_tree(self, filename):\n # instructions = bytes(\"dnaml.tree\\nl\\na\\ny\\n\", 'utf-8')\n # dnaml = Popen(\"phylip drawtree\", stdin=PIPE, shell=True)\n # (out, err) = dnaml.communicate(instructions)\n tree_file = open('dnaml.tree')\n x = tree_file.read()\n # t = Tree()\n # ts = TreeStyle()\n # ts.show_leaf_name = True\n # ts.branch_vertical_margin = 10 # 10 pixels between adjacent branches\n # t.show(tree_style=ts)\n tree = Phylo.read(StringIO(x[:-2]), \"newick\")\n Phylo.draw(tree, do_show=False)\n pylab.savefig('biohackProject/static/images/'+filename+'.png', dpi=300)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# MINISTを読み込んでレイヤーAPIでCNNを構築するファイル import tensorflow as tf import numpy as np import os import tensorflow as tf import glob import numpy as np import config as cf from data_loader import DataLoader from PIL import Image from matplotlib import pylab as plt dl = DataLoader(phase='Train', shuffle=True) X_data , y_data = dl.shuffle_and_get() # dl_test = DataLoader(phase='Test', shuffle=True) X_data = np.reshape(X_data,[-1,cf.Height, cf.Width]) # plt.imshow(X_data[0]) # test_imgs, test_gts = dl_test.get_minibatch(shuffle=True) config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.visible_device_list="0" # def load_img(): # import cv2 # img = cv2.imread("test.jpg").astype(np.float32) # img = cv2.resize(img, (cf.Width, cf.Height,1)) # img = img[:,:,(2,1,0)] # img = img[np.newaxis, :] # img = img / 255. # return img # with tf.Session(config=config) as sess: # saver = tf.train.Saver() # saver.restore(sess, "out.ckpt") # img = load_img() # pred = logits.eval(feed_dict={X: img, keep_prob: 1.0})[0] # pred_label = np.argmax(pred) # print(pred_label) # X_data = dataset['train_img'] # y_data = dataset['train_label'] # print('Rows: %d, Columns: %d' % (X_data.shape[0], X_data.shape[1])) # X_test =dataset['test_img'] # y_test =dataset['test_label'] # print('Rows: %d, Columns: %d' % (X_test.shape[0], X_test.shape[1])) # X_train, y_train = X_data[:50000,:], y_data[:50000] # X_valid, y_valid = X_data[50000:,:], y_data[50000:] # print('Training: ', X_train.shape, y_train.shape) # print('Validation: ', X_valid.shape, y_valid.shape) # print('Test Set: ', X_test.shape, y_test.shape)
normal
{ "blob_id": "a5559ff22776dee133f5398bae573f515efb8484", "index": 3820, "step-1": "<mask token>\n", "step-2": "<mask token>\ndl = DataLoader(phase='Train', shuffle=True)\nX_data, y_data = dl.shuffle_and_get()\nX_data = np.reshape(X_data, [-1, cf.Height, cf.Width])\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.visible_device_list = '0'\n", "step-3": "import tensorflow as tf\nimport numpy as np\nimport os\nimport tensorflow as tf\nimport glob\nimport numpy as np\nimport config as cf\nfrom data_loader import DataLoader\nfrom PIL import Image\nfrom matplotlib import pylab as plt\ndl = DataLoader(phase='Train', shuffle=True)\nX_data, y_data = dl.shuffle_and_get()\nX_data = np.reshape(X_data, [-1, cf.Height, cf.Width])\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.visible_device_list = '0'\n", "step-4": "# MINISTを読み込んでレイヤーAPIでCNNを構築するファイル\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport os\r\n\r\nimport tensorflow as tf\r\nimport glob\r\nimport numpy as np\r\n\r\nimport config as cf\r\nfrom data_loader import DataLoader\r\nfrom PIL import Image\r\nfrom matplotlib import pylab as plt\r\n\r\ndl = DataLoader(phase='Train', shuffle=True)\r\nX_data , y_data = dl.shuffle_and_get()\r\n# dl_test = DataLoader(phase='Test', shuffle=True)\r\nX_data = np.reshape(X_data,[-1,cf.Height, cf.Width])\r\n\r\n\r\n# plt.imshow(X_data[0])\r\n# test_imgs, test_gts = dl_test.get_minibatch(shuffle=True)\r\n\r\nconfig = tf.ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\nconfig.gpu_options.visible_device_list=\"0\"\r\n\r\n\r\n\r\n# def load_img():\r\n# import cv2\r\n# img = cv2.imread(\"test.jpg\").astype(np.float32)\r\n# img = cv2.resize(img, (cf.Width, cf.Height,1))\r\n# img = img[:,:,(2,1,0)]\r\n# img = img[np.newaxis, :]\r\n# img = img / 255.\r\n# return img\r\n\r\n# with tf.Session(config=config) as sess:\r\n# saver = tf.train.Saver()\r\n# saver.restore(sess, \"out.ckpt\")\r\n\r\n# img = load_img()\r\n\r\n# pred = logits.eval(feed_dict={X: img, keep_prob: 1.0})[0]\r\n# pred_label = np.argmax(pred)\r\n# print(pred_label)\r\n\r\n# X_data = dataset['train_img']\r\n# y_data = dataset['train_label']\r\n# print('Rows: %d, Columns: %d' % (X_data.shape[0], X_data.shape[1]))\r\n# X_test =dataset['test_img']\r\n# y_test =dataset['test_label']\r\n# print('Rows: %d, Columns: %d' % (X_test.shape[0], X_test.shape[1]))\r\n\r\n# X_train, y_train = X_data[:50000,:], y_data[:50000]\r\n# X_valid, y_valid = X_data[50000:,:], y_data[50000:]\r\n\r\n# print('Training: ', X_train.shape, y_train.shape)\r\n# print('Validation: ', X_valid.shape, y_valid.shape)\r\n# print('Test Set: ', X_test.shape, y_test.shape)\r\n\r\n\r\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> def plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type= 'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True): """2D plot of depth vs some other variable, stretching first 500m of depth. Parameters ---------- depth : xarray DataArray or numpy array depth variable x : xarray DataArray or numpy array variable for x-axis. Likely to be time, latitude, or longitude fld : xarray DataArray or numpy array 2D field with depth + 1 dim stretch_depth : scalar (int or float), optional stretch top depth to this limit """ if len(x) == fld.shape[0]: fld = fld.transpose() cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax) fig = plt.figure(figsize=(12, 6), dpi=dpi) ax1 = plt.subplot(2, 1, 1) if plot_type == 'pcolormesh': p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) elif plot_type == 'contourf': p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) plt.ylim([stretch_depth, 0]) ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100)) plt.ylabel('Depth [%s]' % depth.attrs['units']) ax1.xaxis.axes.set_xticklabels([]) ax2 = plt.subplot(2, 1, 2) if plot_type == 'pcolormesh': p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) elif plot_type == 'contourf': p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) plt.ylim([depth.min(), stretch_depth]) yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000)) ax2.yaxis.axes.set_yticks(yticks) plt.ylabel('Depth [%s]' % depth.attrs['units']) fig.subplots_adjust(hspace=0.05) if title is not None: fig.suptitle(title, verticalalignment='top', fontsize=24) if show_colorbar: fig.subplots_adjust(right=0.83) cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8]) fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar) plt.show() return fig, ax1, ax2 <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type= 'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True): """2D plot of depth vs some other variable, stretching first 500m of depth. Parameters ---------- depth : xarray DataArray or numpy array depth variable x : xarray DataArray or numpy array variable for x-axis. Likely to be time, latitude, or longitude fld : xarray DataArray or numpy array 2D field with depth + 1 dim stretch_depth : scalar (int or float), optional stretch top depth to this limit """ if len(x) == fld.shape[0]: fld = fld.transpose() cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax) fig = plt.figure(figsize=(12, 6), dpi=dpi) ax1 = plt.subplot(2, 1, 1) if plot_type == 'pcolormesh': p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) elif plot_type == 'contourf': p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) plt.ylim([stretch_depth, 0]) ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100)) plt.ylabel('Depth [%s]' % depth.attrs['units']) ax1.xaxis.axes.set_xticklabels([]) ax2 = plt.subplot(2, 1, 2) if plot_type == 'pcolormesh': p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) elif plot_type == 'contourf': p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) plt.ylim([depth.min(), stretch_depth]) yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000)) ax2.yaxis.axes.set_yticks(yticks) plt.ylabel('Depth [%s]' % depth.attrs['units']) fig.subplots_adjust(hspace=0.05) if title is not None: fig.suptitle(title, verticalalignment='top', fontsize=24) if show_colorbar: fig.subplots_adjust(right=0.83) cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8]) fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar) plt.show() return fig, ax1, ax2 def set_colorbar_limits(fld, cmin, cmax): """If unset, compute colorbar limits based on field max/min values, sequential/divergent Determine if colorbar needs to be extended Parameters ---------- fld : xarray.DataArray 2D field to be plotted Output ------ cmin : double colorbar min value cmax : double colorbar max value extend_cbar : string flag to colorbar extension """ if cmin is None and cmax is not None: raise RuntimeError('Only cmax given, must provide both cmin and cmax') elif cmin is not None and cmax is None: raise RuntimeError('Only cmin given, must provide both cmin and cmax') else: if type(cmin) is xr.DataArray: cmin = cmin.values() elif cmin is not None: raise TypeError('Unsure of cmin type: ', type(cmin)) if type(cmax) is xr.DataArray: cmax = cmax.values() elif cmax is not None: raise TypeError('Unsure of cmax type: ', type(cmax)) fld_min = fld.min(skipna=True).values fld_max = fld.max(skipna=True).values if cmin is None and cmax is None: cmin = fld_min cmax = fld_max if fld_max * fld_min < 0 and fld.name is not 'THETA': cmax = np.nanmax(np.abs(fld.values)) cmin = -cmax if cmin > fld_min and cmax < fld_max: extend_cbar = 'both' elif cmin > fld_min: extend_cbar = 'min' elif cmax < fld_max: extend_cbar = 'max' else: extend_cbar = 'neither' return cmin, cmax, extend_cbar <|reserved_special_token_1|> <|reserved_special_token_0|> def global_and_stereo_map(lat, lon, fld, plot_type='pcolormesh', cmap= 'YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True): """Generate the Robinson and Arctic/Antarctic plot. Parameters ---------- lat : xarray.DataArray lon : xarray.DataArray fld : xarray.DataArray plot_type : string, optional plot type to use, 'pcolormesh', or 'contourf' cmap : string or colormap object (TBD) cmin : double, optional minimum value for colorbar cmax : double, optional maximum value for colorbar dpi : int, optiopnal plot resolution in dots (pixels) per inch title,show_colorbar figsize? Output ------ """ cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax) plt.figure(figsize=(12, 6), dpi=dpi) fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap= cmap, plot_type=plot_type, subplot_grid=[2, 1, 1], projection_type= 'robin', show_colorbar=False, cmin=cmin, cmax=cmax, user_lon_0=0) fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap= cmap, plot_type=plot_type, subplot_grid=[2, 2, 3], projection_type= 'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=50, user_lon_0=0) fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap= cmap, plot_type=plot_type, subplot_grid=[2, 2, 4], projection_type= 'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=-40, user_lon_0=180) ax1.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2) ax2.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2) ax3.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2) if title is not None: fig.suptitle(title, verticalalignment='top', fontsize=24) if show_colorbar: fig.subplots_adjust(right=0.9) cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8]) fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar) return fig, (ax1, ax2, ax3) def plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type= 'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True): """2D plot of depth vs some other variable, stretching first 500m of depth. Parameters ---------- depth : xarray DataArray or numpy array depth variable x : xarray DataArray or numpy array variable for x-axis. Likely to be time, latitude, or longitude fld : xarray DataArray or numpy array 2D field with depth + 1 dim stretch_depth : scalar (int or float), optional stretch top depth to this limit """ if len(x) == fld.shape[0]: fld = fld.transpose() cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax) fig = plt.figure(figsize=(12, 6), dpi=dpi) ax1 = plt.subplot(2, 1, 1) if plot_type == 'pcolormesh': p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) elif plot_type == 'contourf': p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) plt.ylim([stretch_depth, 0]) ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100)) plt.ylabel('Depth [%s]' % depth.attrs['units']) ax1.xaxis.axes.set_xticklabels([]) ax2 = plt.subplot(2, 1, 2) if plot_type == 'pcolormesh': p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) elif plot_type == 'contourf': p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) plt.ylim([depth.min(), stretch_depth]) yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000)) ax2.yaxis.axes.set_yticks(yticks) plt.ylabel('Depth [%s]' % depth.attrs['units']) fig.subplots_adjust(hspace=0.05) if title is not None: fig.suptitle(title, verticalalignment='top', fontsize=24) if show_colorbar: fig.subplots_adjust(right=0.83) cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8]) fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar) plt.show() return fig, ax1, ax2 def set_colorbar_limits(fld, cmin, cmax): """If unset, compute colorbar limits based on field max/min values, sequential/divergent Determine if colorbar needs to be extended Parameters ---------- fld : xarray.DataArray 2D field to be plotted Output ------ cmin : double colorbar min value cmax : double colorbar max value extend_cbar : string flag to colorbar extension """ if cmin is None and cmax is not None: raise RuntimeError('Only cmax given, must provide both cmin and cmax') elif cmin is not None and cmax is None: raise RuntimeError('Only cmin given, must provide both cmin and cmax') else: if type(cmin) is xr.DataArray: cmin = cmin.values() elif cmin is not None: raise TypeError('Unsure of cmin type: ', type(cmin)) if type(cmax) is xr.DataArray: cmax = cmax.values() elif cmax is not None: raise TypeError('Unsure of cmax type: ', type(cmax)) fld_min = fld.min(skipna=True).values fld_max = fld.max(skipna=True).values if cmin is None and cmax is None: cmin = fld_min cmax = fld_max if fld_max * fld_min < 0 and fld.name is not 'THETA': cmax = np.nanmax(np.abs(fld.values)) cmin = -cmax if cmin > fld_min and cmax < fld_max: extend_cbar = 'both' elif cmin > fld_min: extend_cbar = 'min' elif cmax < fld_max: extend_cbar = 'max' else: extend_cbar = 'neither' return cmin, cmax, extend_cbar <|reserved_special_token_1|> <|reserved_special_token_0|> import numpy as np import matplotlib.pyplot as plt import cartopy as cart import xarray as xr import ecco_v4_py as ecco def global_and_stereo_map(lat, lon, fld, plot_type='pcolormesh', cmap= 'YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True): """Generate the Robinson and Arctic/Antarctic plot. Parameters ---------- lat : xarray.DataArray lon : xarray.DataArray fld : xarray.DataArray plot_type : string, optional plot type to use, 'pcolormesh', or 'contourf' cmap : string or colormap object (TBD) cmin : double, optional minimum value for colorbar cmax : double, optional maximum value for colorbar dpi : int, optiopnal plot resolution in dots (pixels) per inch title,show_colorbar figsize? Output ------ """ cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax) plt.figure(figsize=(12, 6), dpi=dpi) fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap= cmap, plot_type=plot_type, subplot_grid=[2, 1, 1], projection_type= 'robin', show_colorbar=False, cmin=cmin, cmax=cmax, user_lon_0=0) fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap= cmap, plot_type=plot_type, subplot_grid=[2, 2, 3], projection_type= 'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=50, user_lon_0=0) fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap= cmap, plot_type=plot_type, subplot_grid=[2, 2, 4], projection_type= 'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=-40, user_lon_0=180) ax1.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2) ax2.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2) ax3.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2) if title is not None: fig.suptitle(title, verticalalignment='top', fontsize=24) if show_colorbar: fig.subplots_adjust(right=0.9) cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8]) fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar) return fig, (ax1, ax2, ax3) def plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type= 'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True): """2D plot of depth vs some other variable, stretching first 500m of depth. Parameters ---------- depth : xarray DataArray or numpy array depth variable x : xarray DataArray or numpy array variable for x-axis. Likely to be time, latitude, or longitude fld : xarray DataArray or numpy array 2D field with depth + 1 dim stretch_depth : scalar (int or float), optional stretch top depth to this limit """ if len(x) == fld.shape[0]: fld = fld.transpose() cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax) fig = plt.figure(figsize=(12, 6), dpi=dpi) ax1 = plt.subplot(2, 1, 1) if plot_type == 'pcolormesh': p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) elif plot_type == 'contourf': p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) plt.ylim([stretch_depth, 0]) ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100)) plt.ylabel('Depth [%s]' % depth.attrs['units']) ax1.xaxis.axes.set_xticklabels([]) ax2 = plt.subplot(2, 1, 2) if plot_type == 'pcolormesh': p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) elif plot_type == 'contourf': p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap) plt.ylim([depth.min(), stretch_depth]) yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000)) ax2.yaxis.axes.set_yticks(yticks) plt.ylabel('Depth [%s]' % depth.attrs['units']) fig.subplots_adjust(hspace=0.05) if title is not None: fig.suptitle(title, verticalalignment='top', fontsize=24) if show_colorbar: fig.subplots_adjust(right=0.83) cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8]) fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar) plt.show() return fig, ax1, ax2 def set_colorbar_limits(fld, cmin, cmax): """If unset, compute colorbar limits based on field max/min values, sequential/divergent Determine if colorbar needs to be extended Parameters ---------- fld : xarray.DataArray 2D field to be plotted Output ------ cmin : double colorbar min value cmax : double colorbar max value extend_cbar : string flag to colorbar extension """ if cmin is None and cmax is not None: raise RuntimeError('Only cmax given, must provide both cmin and cmax') elif cmin is not None and cmax is None: raise RuntimeError('Only cmin given, must provide both cmin and cmax') else: if type(cmin) is xr.DataArray: cmin = cmin.values() elif cmin is not None: raise TypeError('Unsure of cmin type: ', type(cmin)) if type(cmax) is xr.DataArray: cmax = cmax.values() elif cmax is not None: raise TypeError('Unsure of cmax type: ', type(cmax)) fld_min = fld.min(skipna=True).values fld_max = fld.max(skipna=True).values if cmin is None and cmax is None: cmin = fld_min cmax = fld_max if fld_max * fld_min < 0 and fld.name is not 'THETA': cmax = np.nanmax(np.abs(fld.values)) cmin = -cmax if cmin > fld_min and cmax < fld_max: extend_cbar = 'both' elif cmin > fld_min: extend_cbar = 'min' elif cmax < fld_max: extend_cbar = 'max' else: extend_cbar = 'neither' return cmin, cmax, extend_cbar <|reserved_special_token_1|> """ Module for generic standard analysis plots. """ import numpy as np import matplotlib.pyplot as plt import cartopy as cart import xarray as xr import ecco_v4_py as ecco def global_and_stereo_map(lat, lon, fld, plot_type='pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True): """Generate the Robinson and Arctic/Antarctic plot. Parameters ---------- lat : xarray.DataArray lon : xarray.DataArray fld : xarray.DataArray plot_type : string, optional plot type to use, 'pcolormesh', or 'contourf' cmap : string or colormap object (TBD) cmin : double, optional minimum value for colorbar cmax : double, optional maximum value for colorbar dpi : int, optiopnal plot resolution in dots (pixels) per inch title,show_colorbar figsize? Output ------ """ # to do # -figsize option? # -cmin/cmax defaults handling with plot_proj ... # -colorbar defaults with diverging/sequential # -number of colors in plot # -suppress dask warnings # -get the subplot size "just right" no matter the figsize # -arrows for when colorbar is exceeded # handle colorbar limits cmin, cmax, extend_cbar = set_colorbar_limits(fld,cmin,cmax) # default figsize which seems to work for a laptop screen plt.figure(figsize=(12,6),dpi=dpi) # the big top global plot fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid( lat,lon,fld, cmap=cmap, plot_type=plot_type, subplot_grid=[2,1,1], projection_type='robin', show_colorbar=False, cmin=cmin, cmax=cmax, user_lon_0=0 ) # Arctic: bottom left fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid( lat,lon,fld, cmap=cmap, plot_type=plot_type, subplot_grid=[2,2,3], projection_type='stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=50, user_lon_0=0 ) # ACC: bottom right fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid( lat,lon,fld, cmap=cmap, plot_type=plot_type, subplot_grid=[2,2,4], projection_type='stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=-40, user_lon_0=180 ) # Set land color to gray ax1.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2) ax2.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2) ax3.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2) # Make a single title if title is not None: fig.suptitle(title,verticalalignment='top',fontsize=24) # Make an overyling colorbar if show_colorbar: fig.subplots_adjust(right=0.9) cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8]) fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar) return fig, (ax1,ax2,ax3) def plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type='pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True): """2D plot of depth vs some other variable, stretching first 500m of depth. Parameters ---------- depth : xarray DataArray or numpy array depth variable x : xarray DataArray or numpy array variable for x-axis. Likely to be time, latitude, or longitude fld : xarray DataArray or numpy array 2D field with depth + 1 dim stretch_depth : scalar (int or float), optional stretch top depth to this limit """ # Ensure negative values #if (depth>0).any(): # depth = -depth #if stretch_depth > 0: # stretch_depth = -stretch_depth # Handle shape if len(x) == fld.shape[0]: fld = fld.transpose() # handle colorbar limits cmin, cmax, extend_cbar = set_colorbar_limits(fld,cmin,cmax) # default figsize which seems to work for a laptop screen fig = plt.figure(figsize=(12,6),dpi=dpi) # Could also use plt.subplots here ... # First top 500m ax1 = plt.subplot(2,1,1) if plot_type == 'pcolormesh': p1 = ax1.pcolormesh(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap) elif plot_type == 'contourf': p1 = ax1.contourf(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap) # Handle y-axis plt.ylim([stretch_depth, 0]) ax1.yaxis.axes.set_yticks(np.arange(stretch_depth,1,100)) plt.ylabel('Depth [%s]' % depth.attrs['units']) # Remove top plot xtick label ax1.xaxis.axes.set_xticklabels([]) # Now the rest ... ax2 = plt.subplot(2,1,2) if plot_type == 'pcolormesh': p2 = ax2.pcolormesh(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap) elif plot_type == 'contourf': p2 = ax2.contourf(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap) # Handle y-axis plt.ylim([depth.min(), stretch_depth]) yticks = np.flip(np.arange(2*stretch_depth,depth.min(),-1000)) ax2.yaxis.axes.set_yticks(yticks) plt.ylabel('Depth [%s]' % depth.attrs['units']) # Reduce space between subplots fig.subplots_adjust(hspace=0.05) # Make a single title if title is not None: fig.suptitle(title,verticalalignment='top',fontsize=24) # Make an overyling colorbar if show_colorbar: fig.subplots_adjust(right=0.83) cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8]) fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar) plt.show() return fig,ax1,ax2 def set_colorbar_limits(fld,cmin,cmax): """If unset, compute colorbar limits based on field max/min values, sequential/divergent Determine if colorbar needs to be extended Parameters ---------- fld : xarray.DataArray 2D field to be plotted Output ------ cmin : double colorbar min value cmax : double colorbar max value extend_cbar : string flag to colorbar extension """ # handle input if (cmin is None) and (cmax is not None): raise RuntimeError('Only cmax given, must provide both cmin and cmax') elif (cmin is not None) and (cmax is None): raise RuntimeError('Only cmin given, must provide both cmin and cmax') else: # handle colorbar limits accidentally passed as with xarray functions if type(cmin) is xr.DataArray: cmin = cmin.values() elif cmin is not None: raise TypeError('Unsure of cmin type: ',type(cmin)) if type(cmax) is xr.DataArray: cmax = cmax.values() elif cmax is not None: raise TypeError('Unsure of cmax type: ',type(cmax)) # compute fld limits fld_min = fld.min(skipna=True).values fld_max = fld.max(skipna=True).values # if cmin/cmax not set, compute if (cmin is None) and (cmax is None): cmin = fld_min cmax = fld_max # determine if divergent colorbar # Note: Not making divergent colorbar for temperature # in degC because still sequential even though +/- if (fld_max*fld_min < 0) and (fld.name is not 'THETA'): cmax = np.nanmax(np.abs(fld.values)) cmin = -cmax # determine if colorbar needs to be extended if (cmin > fld_min) and (cmax < fld_max): extend_cbar = "both" elif cmin > fld_min: extend_cbar = "min" elif cmax < fld_max: extend_cbar = "max" else: extend_cbar = "neither" return cmin, cmax, extend_cbar
flexible
{ "blob_id": "b039ed74e62f3a74e8506d4e14a3422499046c06", "index": 860, "step-1": "<mask token>\n\n\ndef plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type=\n 'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100,\n show_colorbar=True):\n \"\"\"2D plot of depth vs some other variable, stretching first 500m of depth.\n\n Parameters\n ----------\n depth : xarray DataArray or numpy array\n depth variable\n x : xarray DataArray or numpy array\n variable for x-axis. Likely to be time, latitude, or longitude\n fld : xarray DataArray or numpy array\n 2D field with depth + 1 dim\n stretch_depth : scalar (int or float), optional\n stretch top depth to this limit\n \"\"\"\n if len(x) == fld.shape[0]:\n fld = fld.transpose()\n cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)\n fig = plt.figure(figsize=(12, 6), dpi=dpi)\n ax1 = plt.subplot(2, 1, 1)\n if plot_type == 'pcolormesh':\n p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([stretch_depth, 0])\n ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100))\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n ax1.xaxis.axes.set_xticklabels([])\n ax2 = plt.subplot(2, 1, 2)\n if plot_type == 'pcolormesh':\n p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([depth.min(), stretch_depth])\n yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000))\n ax2.yaxis.axes.set_yticks(yticks)\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n fig.subplots_adjust(hspace=0.05)\n if title is not None:\n fig.suptitle(title, verticalalignment='top', fontsize=24)\n if show_colorbar:\n fig.subplots_adjust(right=0.83)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)\n plt.show()\n return fig, ax1, ax2\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type=\n 'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100,\n show_colorbar=True):\n \"\"\"2D plot of depth vs some other variable, stretching first 500m of depth.\n\n Parameters\n ----------\n depth : xarray DataArray or numpy array\n depth variable\n x : xarray DataArray or numpy array\n variable for x-axis. Likely to be time, latitude, or longitude\n fld : xarray DataArray or numpy array\n 2D field with depth + 1 dim\n stretch_depth : scalar (int or float), optional\n stretch top depth to this limit\n \"\"\"\n if len(x) == fld.shape[0]:\n fld = fld.transpose()\n cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)\n fig = plt.figure(figsize=(12, 6), dpi=dpi)\n ax1 = plt.subplot(2, 1, 1)\n if plot_type == 'pcolormesh':\n p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([stretch_depth, 0])\n ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100))\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n ax1.xaxis.axes.set_xticklabels([])\n ax2 = plt.subplot(2, 1, 2)\n if plot_type == 'pcolormesh':\n p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([depth.min(), stretch_depth])\n yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000))\n ax2.yaxis.axes.set_yticks(yticks)\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n fig.subplots_adjust(hspace=0.05)\n if title is not None:\n fig.suptitle(title, verticalalignment='top', fontsize=24)\n if show_colorbar:\n fig.subplots_adjust(right=0.83)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)\n plt.show()\n return fig, ax1, ax2\n\n\ndef set_colorbar_limits(fld, cmin, cmax):\n \"\"\"If unset, compute colorbar limits based on field max/min values, sequential/divergent\n Determine if colorbar needs to be extended\n\n Parameters\n ----------\n fld : xarray.DataArray\n 2D field to be plotted\n\n Output\n ------\n cmin : double \n colorbar min value\n cmax : double \n colorbar max value\n extend_cbar : string \n flag to colorbar extension\n\n \"\"\"\n if cmin is None and cmax is not None:\n raise RuntimeError('Only cmax given, must provide both cmin and cmax')\n elif cmin is not None and cmax is None:\n raise RuntimeError('Only cmin given, must provide both cmin and cmax')\n else:\n if type(cmin) is xr.DataArray:\n cmin = cmin.values()\n elif cmin is not None:\n raise TypeError('Unsure of cmin type: ', type(cmin))\n if type(cmax) is xr.DataArray:\n cmax = cmax.values()\n elif cmax is not None:\n raise TypeError('Unsure of cmax type: ', type(cmax))\n fld_min = fld.min(skipna=True).values\n fld_max = fld.max(skipna=True).values\n if cmin is None and cmax is None:\n cmin = fld_min\n cmax = fld_max\n if fld_max * fld_min < 0 and fld.name is not 'THETA':\n cmax = np.nanmax(np.abs(fld.values))\n cmin = -cmax\n if cmin > fld_min and cmax < fld_max:\n extend_cbar = 'both'\n elif cmin > fld_min:\n extend_cbar = 'min'\n elif cmax < fld_max:\n extend_cbar = 'max'\n else:\n extend_cbar = 'neither'\n return cmin, cmax, extend_cbar\n", "step-3": "<mask token>\n\n\ndef global_and_stereo_map(lat, lon, fld, plot_type='pcolormesh', cmap=\n 'YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True):\n \"\"\"Generate the Robinson and Arctic/Antarctic plot.\n\n Parameters\n ----------\n lat : xarray.DataArray \n\n lon : xarray.DataArray\n\n fld : xarray.DataArray\n\n plot_type : string, optional\n plot type to use, 'pcolormesh', or 'contourf'\n\n cmap : string or colormap object (TBD)\n\n cmin : double, optional\n minimum value for colorbar\n\n cmax : double, optional\n maximum value for colorbar\n\n dpi : int, optiopnal\n plot resolution in dots (pixels) per inch\n\n title,show_colorbar\n \n figsize?\n\n Output\n ------\n\n \"\"\"\n cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)\n plt.figure(figsize=(12, 6), dpi=dpi)\n fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=\n cmap, plot_type=plot_type, subplot_grid=[2, 1, 1], projection_type=\n 'robin', show_colorbar=False, cmin=cmin, cmax=cmax, user_lon_0=0)\n fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=\n cmap, plot_type=plot_type, subplot_grid=[2, 2, 3], projection_type=\n 'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=50,\n user_lon_0=0)\n fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=\n cmap, plot_type=plot_type, subplot_grid=[2, 2, 4], projection_type=\n 'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=-40,\n user_lon_0=180)\n ax1.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)\n ax2.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)\n ax3.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)\n if title is not None:\n fig.suptitle(title, verticalalignment='top', fontsize=24)\n if show_colorbar:\n fig.subplots_adjust(right=0.9)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar)\n return fig, (ax1, ax2, ax3)\n\n\ndef plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type=\n 'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100,\n show_colorbar=True):\n \"\"\"2D plot of depth vs some other variable, stretching first 500m of depth.\n\n Parameters\n ----------\n depth : xarray DataArray or numpy array\n depth variable\n x : xarray DataArray or numpy array\n variable for x-axis. Likely to be time, latitude, or longitude\n fld : xarray DataArray or numpy array\n 2D field with depth + 1 dim\n stretch_depth : scalar (int or float), optional\n stretch top depth to this limit\n \"\"\"\n if len(x) == fld.shape[0]:\n fld = fld.transpose()\n cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)\n fig = plt.figure(figsize=(12, 6), dpi=dpi)\n ax1 = plt.subplot(2, 1, 1)\n if plot_type == 'pcolormesh':\n p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([stretch_depth, 0])\n ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100))\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n ax1.xaxis.axes.set_xticklabels([])\n ax2 = plt.subplot(2, 1, 2)\n if plot_type == 'pcolormesh':\n p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([depth.min(), stretch_depth])\n yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000))\n ax2.yaxis.axes.set_yticks(yticks)\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n fig.subplots_adjust(hspace=0.05)\n if title is not None:\n fig.suptitle(title, verticalalignment='top', fontsize=24)\n if show_colorbar:\n fig.subplots_adjust(right=0.83)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)\n plt.show()\n return fig, ax1, ax2\n\n\ndef set_colorbar_limits(fld, cmin, cmax):\n \"\"\"If unset, compute colorbar limits based on field max/min values, sequential/divergent\n Determine if colorbar needs to be extended\n\n Parameters\n ----------\n fld : xarray.DataArray\n 2D field to be plotted\n\n Output\n ------\n cmin : double \n colorbar min value\n cmax : double \n colorbar max value\n extend_cbar : string \n flag to colorbar extension\n\n \"\"\"\n if cmin is None and cmax is not None:\n raise RuntimeError('Only cmax given, must provide both cmin and cmax')\n elif cmin is not None and cmax is None:\n raise RuntimeError('Only cmin given, must provide both cmin and cmax')\n else:\n if type(cmin) is xr.DataArray:\n cmin = cmin.values()\n elif cmin is not None:\n raise TypeError('Unsure of cmin type: ', type(cmin))\n if type(cmax) is xr.DataArray:\n cmax = cmax.values()\n elif cmax is not None:\n raise TypeError('Unsure of cmax type: ', type(cmax))\n fld_min = fld.min(skipna=True).values\n fld_max = fld.max(skipna=True).values\n if cmin is None and cmax is None:\n cmin = fld_min\n cmax = fld_max\n if fld_max * fld_min < 0 and fld.name is not 'THETA':\n cmax = np.nanmax(np.abs(fld.values))\n cmin = -cmax\n if cmin > fld_min and cmax < fld_max:\n extend_cbar = 'both'\n elif cmin > fld_min:\n extend_cbar = 'min'\n elif cmax < fld_max:\n extend_cbar = 'max'\n else:\n extend_cbar = 'neither'\n return cmin, cmax, extend_cbar\n", "step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cartopy as cart\nimport xarray as xr\nimport ecco_v4_py as ecco\n\n\ndef global_and_stereo_map(lat, lon, fld, plot_type='pcolormesh', cmap=\n 'YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True):\n \"\"\"Generate the Robinson and Arctic/Antarctic plot.\n\n Parameters\n ----------\n lat : xarray.DataArray \n\n lon : xarray.DataArray\n\n fld : xarray.DataArray\n\n plot_type : string, optional\n plot type to use, 'pcolormesh', or 'contourf'\n\n cmap : string or colormap object (TBD)\n\n cmin : double, optional\n minimum value for colorbar\n\n cmax : double, optional\n maximum value for colorbar\n\n dpi : int, optiopnal\n plot resolution in dots (pixels) per inch\n\n title,show_colorbar\n \n figsize?\n\n Output\n ------\n\n \"\"\"\n cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)\n plt.figure(figsize=(12, 6), dpi=dpi)\n fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=\n cmap, plot_type=plot_type, subplot_grid=[2, 1, 1], projection_type=\n 'robin', show_colorbar=False, cmin=cmin, cmax=cmax, user_lon_0=0)\n fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=\n cmap, plot_type=plot_type, subplot_grid=[2, 2, 3], projection_type=\n 'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=50,\n user_lon_0=0)\n fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=\n cmap, plot_type=plot_type, subplot_grid=[2, 2, 4], projection_type=\n 'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=-40,\n user_lon_0=180)\n ax1.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)\n ax2.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)\n ax3.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)\n if title is not None:\n fig.suptitle(title, verticalalignment='top', fontsize=24)\n if show_colorbar:\n fig.subplots_adjust(right=0.9)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar)\n return fig, (ax1, ax2, ax3)\n\n\ndef plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type=\n 'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100,\n show_colorbar=True):\n \"\"\"2D plot of depth vs some other variable, stretching first 500m of depth.\n\n Parameters\n ----------\n depth : xarray DataArray or numpy array\n depth variable\n x : xarray DataArray or numpy array\n variable for x-axis. Likely to be time, latitude, or longitude\n fld : xarray DataArray or numpy array\n 2D field with depth + 1 dim\n stretch_depth : scalar (int or float), optional\n stretch top depth to this limit\n \"\"\"\n if len(x) == fld.shape[0]:\n fld = fld.transpose()\n cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)\n fig = plt.figure(figsize=(12, 6), dpi=dpi)\n ax1 = plt.subplot(2, 1, 1)\n if plot_type == 'pcolormesh':\n p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([stretch_depth, 0])\n ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100))\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n ax1.xaxis.axes.set_xticklabels([])\n ax2 = plt.subplot(2, 1, 2)\n if plot_type == 'pcolormesh':\n p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([depth.min(), stretch_depth])\n yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000))\n ax2.yaxis.axes.set_yticks(yticks)\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n fig.subplots_adjust(hspace=0.05)\n if title is not None:\n fig.suptitle(title, verticalalignment='top', fontsize=24)\n if show_colorbar:\n fig.subplots_adjust(right=0.83)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)\n plt.show()\n return fig, ax1, ax2\n\n\ndef set_colorbar_limits(fld, cmin, cmax):\n \"\"\"If unset, compute colorbar limits based on field max/min values, sequential/divergent\n Determine if colorbar needs to be extended\n\n Parameters\n ----------\n fld : xarray.DataArray\n 2D field to be plotted\n\n Output\n ------\n cmin : double \n colorbar min value\n cmax : double \n colorbar max value\n extend_cbar : string \n flag to colorbar extension\n\n \"\"\"\n if cmin is None and cmax is not None:\n raise RuntimeError('Only cmax given, must provide both cmin and cmax')\n elif cmin is not None and cmax is None:\n raise RuntimeError('Only cmin given, must provide both cmin and cmax')\n else:\n if type(cmin) is xr.DataArray:\n cmin = cmin.values()\n elif cmin is not None:\n raise TypeError('Unsure of cmin type: ', type(cmin))\n if type(cmax) is xr.DataArray:\n cmax = cmax.values()\n elif cmax is not None:\n raise TypeError('Unsure of cmax type: ', type(cmax))\n fld_min = fld.min(skipna=True).values\n fld_max = fld.max(skipna=True).values\n if cmin is None and cmax is None:\n cmin = fld_min\n cmax = fld_max\n if fld_max * fld_min < 0 and fld.name is not 'THETA':\n cmax = np.nanmax(np.abs(fld.values))\n cmin = -cmax\n if cmin > fld_min and cmax < fld_max:\n extend_cbar = 'both'\n elif cmin > fld_min:\n extend_cbar = 'min'\n elif cmax < fld_max:\n extend_cbar = 'max'\n else:\n extend_cbar = 'neither'\n return cmin, cmax, extend_cbar\n", "step-5": "\"\"\"\nModule for generic standard analysis plots.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cartopy as cart\nimport xarray as xr\nimport ecco_v4_py as ecco\n\n\ndef global_and_stereo_map(lat, lon, fld,\n plot_type='pcolormesh',\n cmap='YlOrRd',\n title=None,\n cmin=None,\n cmax=None,\n dpi=100,\n show_colorbar=True):\n\n \"\"\"Generate the Robinson and Arctic/Antarctic plot.\n\n Parameters\n ----------\n lat : xarray.DataArray \n\n lon : xarray.DataArray\n\n fld : xarray.DataArray\n\n plot_type : string, optional\n plot type to use, 'pcolormesh', or 'contourf'\n\n cmap : string or colormap object (TBD)\n\n cmin : double, optional\n minimum value for colorbar\n\n cmax : double, optional\n maximum value for colorbar\n\n dpi : int, optiopnal\n plot resolution in dots (pixels) per inch\n\n title,show_colorbar\n \n figsize?\n\n Output\n ------\n\n \"\"\"\n\n # to do\n # -figsize option?\n # -cmin/cmax defaults handling with plot_proj ... \n # -colorbar defaults with diverging/sequential\n # -number of colors in plot\n # -suppress dask warnings\n # -get the subplot size \"just right\" no matter the figsize\n # -arrows for when colorbar is exceeded\n\n # handle colorbar limits\n cmin, cmax, extend_cbar = set_colorbar_limits(fld,cmin,cmax)\n\n # default figsize which seems to work for a laptop screen\n plt.figure(figsize=(12,6),dpi=dpi)\n\n # the big top global plot\n fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid(\n lat,lon,fld,\n cmap=cmap,\n plot_type=plot_type,\n subplot_grid=[2,1,1],\n projection_type='robin',\n show_colorbar=False,\n cmin=cmin,\n cmax=cmax,\n user_lon_0=0\n )\n\n # Arctic: bottom left\n fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid(\n lat,lon,fld,\n cmap=cmap,\n plot_type=plot_type,\n subplot_grid=[2,2,3],\n projection_type='stereo',\n show_colorbar=False,\n cmin=cmin,\n cmax=cmax,\n lat_lim=50,\n user_lon_0=0\n )\n\n\n # ACC: bottom right\n fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid(\n lat,lon,fld,\n cmap=cmap,\n plot_type=plot_type,\n subplot_grid=[2,2,4],\n projection_type='stereo',\n show_colorbar=False,\n cmin=cmin,\n cmax=cmax,\n lat_lim=-40,\n user_lon_0=180\n )\n\n # Set land color to gray\n ax1.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)\n ax2.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)\n ax3.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)\n\n # Make a single title\n if title is not None:\n fig.suptitle(title,verticalalignment='top',fontsize=24)\n\n # Make an overyling colorbar\n if show_colorbar:\n fig.subplots_adjust(right=0.9)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar)\n\n\n\n return fig, (ax1,ax2,ax3)\n\ndef plot_depth_slice(x, depth, fld, \n stretch_depth=-500,\n plot_type='pcolormesh',\n cmap='YlOrRd',\n title=None,\n cmin=None,\n cmax=None,\n dpi=100,\n show_colorbar=True):\n \"\"\"2D plot of depth vs some other variable, stretching first 500m of depth.\n\n Parameters\n ----------\n depth : xarray DataArray or numpy array\n depth variable\n x : xarray DataArray or numpy array\n variable for x-axis. Likely to be time, latitude, or longitude\n fld : xarray DataArray or numpy array\n 2D field with depth + 1 dim\n stretch_depth : scalar (int or float), optional\n stretch top depth to this limit\n \"\"\"\n\n # Ensure negative values \n #if (depth>0).any():\n # depth = -depth\n\n #if stretch_depth > 0:\n # stretch_depth = -stretch_depth\n\n # Handle shape\n if len(x) == fld.shape[0]:\n fld = fld.transpose()\n\n # handle colorbar limits\n cmin, cmax, extend_cbar = set_colorbar_limits(fld,cmin,cmax)\n\n # default figsize which seems to work for a laptop screen\n fig = plt.figure(figsize=(12,6),dpi=dpi)\n\n # Could also use plt.subplots here ...\n\n # First top 500m\n ax1 = plt.subplot(2,1,1)\n if plot_type == 'pcolormesh':\n p1 = ax1.pcolormesh(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)\n\n elif plot_type == 'contourf':\n p1 = ax1.contourf(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)\n\n # Handle y-axis\n plt.ylim([stretch_depth, 0])\n ax1.yaxis.axes.set_yticks(np.arange(stretch_depth,1,100))\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n\n\n # Remove top plot xtick label\n ax1.xaxis.axes.set_xticklabels([])\n\n # Now the rest ...\n ax2 = plt.subplot(2,1,2)\n if plot_type == 'pcolormesh':\n p2 = ax2.pcolormesh(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)\n\n elif plot_type == 'contourf':\n p2 = ax2.contourf(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)\n\n # Handle y-axis\n plt.ylim([depth.min(), stretch_depth])\n yticks = np.flip(np.arange(2*stretch_depth,depth.min(),-1000))\n ax2.yaxis.axes.set_yticks(yticks)\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n\n # Reduce space between subplots\n fig.subplots_adjust(hspace=0.05)\n\n # Make a single title\n if title is not None:\n fig.suptitle(title,verticalalignment='top',fontsize=24)\n\n # Make an overyling colorbar\n if show_colorbar:\n fig.subplots_adjust(right=0.83)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)\n\n plt.show()\n\n return fig,ax1,ax2\n\n\ndef set_colorbar_limits(fld,cmin,cmax):\n \"\"\"If unset, compute colorbar limits based on field max/min values, sequential/divergent\n Determine if colorbar needs to be extended\n\n Parameters\n ----------\n fld : xarray.DataArray\n 2D field to be plotted\n\n Output\n ------\n cmin : double \n colorbar min value\n cmax : double \n colorbar max value\n extend_cbar : string \n flag to colorbar extension\n\n \"\"\"\n\n # handle input\n if (cmin is None) and (cmax is not None):\n raise RuntimeError('Only cmax given, must provide both cmin and cmax')\n elif (cmin is not None) and (cmax is None):\n raise RuntimeError('Only cmin given, must provide both cmin and cmax')\n else:\n # handle colorbar limits accidentally passed as with xarray functions\n if type(cmin) is xr.DataArray:\n cmin = cmin.values()\n elif cmin is not None:\n raise TypeError('Unsure of cmin type: ',type(cmin))\n if type(cmax) is xr.DataArray:\n cmax = cmax.values()\n elif cmax is not None:\n raise TypeError('Unsure of cmax type: ',type(cmax))\n\n # compute fld limits\n fld_min = fld.min(skipna=True).values\n fld_max = fld.max(skipna=True).values\n\n # if cmin/cmax not set, compute\n if (cmin is None) and (cmax is None):\n\n cmin = fld_min\n cmax = fld_max\n\n # determine if divergent colorbar \n # Note: Not making divergent colorbar for temperature\n # in degC because still sequential even though +/-\n if (fld_max*fld_min < 0) and (fld.name is not 'THETA'):\n cmax = np.nanmax(np.abs(fld.values))\n cmin = -cmax\n\n # determine if colorbar needs to be extended\n if (cmin > fld_min) and (cmax < fld_max):\n extend_cbar = \"both\"\n elif cmin > fld_min:\n extend_cbar = \"min\"\n elif cmax < fld_max:\n extend_cbar = \"max\"\n else:\n extend_cbar = \"neither\"\n\n return cmin, cmax, extend_cbar\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import sys if sys.version_info.major == 2: from itertools import izip else: izip = zip
normal
{ "blob_id": "88445d8466d7acbf29d2525c7e322611d66494cd", "index": 8315, "step-1": "<mask token>\n", "step-2": "<mask token>\nif sys.version_info.major == 2:\n from itertools import izip\nelse:\n izip = zip\n", "step-3": "import sys\nif sys.version_info.major == 2:\n from itertools import izip\nelse:\n izip = zip\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from PyQt5.QtCore import * from PyQt5.QtWidgets import * from PyQt5.QtSql import * from DatabaseHandler import send_answer class PW(QWidget): def __init__(self, index, question, pid): super().__init__() self.question = question self.pid = pid self.maxim = len(self.question) self.index = index self.Pat = None print(self.maxim) self.setWindowTitle("Вопрос №" + str(question[self.index]['qid'])) self.setFixedSize(QSize(300, 400)) questionLayout = QHBoxLayout() answerLayout = QHBoxLayout() pageLayout = QVBoxLayout() self.questionLabel = QLabel(question[self.index]['question']) self.questionLabel.setAlignment(Qt.AlignCenter) buttonYes = QPushButton("Да") buttonNo = QPushButton("Нет") questionLayout.addWidget(self.questionLabel) answerLayout.addWidget(buttonYes) answerLayout.addWidget(buttonNo) pageLayout.addLayout(questionLayout) pageLayout.addLayout(answerLayout) self.setLayout(pageLayout) buttonYes.clicked.connect(self.ButtonYesAction) buttonNo.clicked.connect(self.ButtonNoAction) def ButtonYesAction(self): table = "patient_" + str(self.pid) send_answer(self.question[self.index]['qid'], 'Да', table) if (self.index<self.maxim-1): self.Pat = PW(self.index + 1, self.question, self.pid) self.Pat.show() self.close() def ButtonNoAction(self): table = "patient_" + str(self.pid) send_answer(self.question[self.index]['qid'], 'Нет', table) if (self.index<self.maxim-1): self.Pat = PW(self.index + 1, self.question, self.pid) self.Pat.show() self.close()
normal
{ "blob_id": "f35569e2d8d26f43d4b2395b5088902c6cd3b826", "index": 2232, "step-1": "<mask token>\n\n\nclass PW(QWidget):\n <mask token>\n <mask token>\n\n def ButtonNoAction(self):\n table = 'patient_' + str(self.pid)\n send_answer(self.question[self.index]['qid'], 'Нет', table)\n if self.index < self.maxim - 1:\n self.Pat = PW(self.index + 1, self.question, self.pid)\n self.Pat.show()\n self.close()\n", "step-2": "<mask token>\n\n\nclass PW(QWidget):\n\n def __init__(self, index, question, pid):\n super().__init__()\n self.question = question\n self.pid = pid\n self.maxim = len(self.question)\n self.index = index\n self.Pat = None\n print(self.maxim)\n self.setWindowTitle('Вопрос №' + str(question[self.index]['qid']))\n self.setFixedSize(QSize(300, 400))\n questionLayout = QHBoxLayout()\n answerLayout = QHBoxLayout()\n pageLayout = QVBoxLayout()\n self.questionLabel = QLabel(question[self.index]['question'])\n self.questionLabel.setAlignment(Qt.AlignCenter)\n buttonYes = QPushButton('Да')\n buttonNo = QPushButton('Нет')\n questionLayout.addWidget(self.questionLabel)\n answerLayout.addWidget(buttonYes)\n answerLayout.addWidget(buttonNo)\n pageLayout.addLayout(questionLayout)\n pageLayout.addLayout(answerLayout)\n self.setLayout(pageLayout)\n buttonYes.clicked.connect(self.ButtonYesAction)\n buttonNo.clicked.connect(self.ButtonNoAction)\n <mask token>\n\n def ButtonNoAction(self):\n table = 'patient_' + str(self.pid)\n send_answer(self.question[self.index]['qid'], 'Нет', table)\n if self.index < self.maxim - 1:\n self.Pat = PW(self.index + 1, self.question, self.pid)\n self.Pat.show()\n self.close()\n", "step-3": "<mask token>\n\n\nclass PW(QWidget):\n\n def __init__(self, index, question, pid):\n super().__init__()\n self.question = question\n self.pid = pid\n self.maxim = len(self.question)\n self.index = index\n self.Pat = None\n print(self.maxim)\n self.setWindowTitle('Вопрос №' + str(question[self.index]['qid']))\n self.setFixedSize(QSize(300, 400))\n questionLayout = QHBoxLayout()\n answerLayout = QHBoxLayout()\n pageLayout = QVBoxLayout()\n self.questionLabel = QLabel(question[self.index]['question'])\n self.questionLabel.setAlignment(Qt.AlignCenter)\n buttonYes = QPushButton('Да')\n buttonNo = QPushButton('Нет')\n questionLayout.addWidget(self.questionLabel)\n answerLayout.addWidget(buttonYes)\n answerLayout.addWidget(buttonNo)\n pageLayout.addLayout(questionLayout)\n pageLayout.addLayout(answerLayout)\n self.setLayout(pageLayout)\n buttonYes.clicked.connect(self.ButtonYesAction)\n buttonNo.clicked.connect(self.ButtonNoAction)\n\n def ButtonYesAction(self):\n table = 'patient_' + str(self.pid)\n send_answer(self.question[self.index]['qid'], 'Да', table)\n if self.index < self.maxim - 1:\n self.Pat = PW(self.index + 1, self.question, self.pid)\n self.Pat.show()\n self.close()\n\n def ButtonNoAction(self):\n table = 'patient_' + str(self.pid)\n send_answer(self.question[self.index]['qid'], 'Нет', table)\n if self.index < self.maxim - 1:\n self.Pat = PW(self.index + 1, self.question, self.pid)\n self.Pat.show()\n self.close()\n", "step-4": "from PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtSql import *\nfrom DatabaseHandler import send_answer\n\n\nclass PW(QWidget):\n\n def __init__(self, index, question, pid):\n super().__init__()\n self.question = question\n self.pid = pid\n self.maxim = len(self.question)\n self.index = index\n self.Pat = None\n print(self.maxim)\n self.setWindowTitle('Вопрос №' + str(question[self.index]['qid']))\n self.setFixedSize(QSize(300, 400))\n questionLayout = QHBoxLayout()\n answerLayout = QHBoxLayout()\n pageLayout = QVBoxLayout()\n self.questionLabel = QLabel(question[self.index]['question'])\n self.questionLabel.setAlignment(Qt.AlignCenter)\n buttonYes = QPushButton('Да')\n buttonNo = QPushButton('Нет')\n questionLayout.addWidget(self.questionLabel)\n answerLayout.addWidget(buttonYes)\n answerLayout.addWidget(buttonNo)\n pageLayout.addLayout(questionLayout)\n pageLayout.addLayout(answerLayout)\n self.setLayout(pageLayout)\n buttonYes.clicked.connect(self.ButtonYesAction)\n buttonNo.clicked.connect(self.ButtonNoAction)\n\n def ButtonYesAction(self):\n table = 'patient_' + str(self.pid)\n send_answer(self.question[self.index]['qid'], 'Да', table)\n if self.index < self.maxim - 1:\n self.Pat = PW(self.index + 1, self.question, self.pid)\n self.Pat.show()\n self.close()\n\n def ButtonNoAction(self):\n table = 'patient_' + str(self.pid)\n send_answer(self.question[self.index]['qid'], 'Нет', table)\n if self.index < self.maxim - 1:\n self.Pat = PW(self.index + 1, self.question, self.pid)\n self.Pat.show()\n self.close()\n", "step-5": "from PyQt5.QtCore import *\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtSql import *\r\nfrom DatabaseHandler import send_answer\r\n\r\nclass PW(QWidget):\r\n def __init__(self, index, question, pid):\r\n super().__init__()\r\n\r\n self.question = question\r\n self.pid = pid\r\n self.maxim = len(self.question)\r\n self.index = index\r\n self.Pat = None\r\n print(self.maxim)\r\n\r\n\r\n self.setWindowTitle(\"Вопрос №\" + str(question[self.index]['qid']))\r\n self.setFixedSize(QSize(300, 400))\r\n\r\n\r\n questionLayout = QHBoxLayout()\r\n answerLayout = QHBoxLayout()\r\n pageLayout = QVBoxLayout()\r\n\r\n self.questionLabel = QLabel(question[self.index]['question'])\r\n self.questionLabel.setAlignment(Qt.AlignCenter)\r\n buttonYes = QPushButton(\"Да\")\r\n buttonNo = QPushButton(\"Нет\")\r\n\r\n questionLayout.addWidget(self.questionLabel)\r\n answerLayout.addWidget(buttonYes)\r\n answerLayout.addWidget(buttonNo)\r\n\r\n pageLayout.addLayout(questionLayout)\r\n pageLayout.addLayout(answerLayout)\r\n\r\n self.setLayout(pageLayout)\r\n\r\n buttonYes.clicked.connect(self.ButtonYesAction)\r\n buttonNo.clicked.connect(self.ButtonNoAction)\r\n \r\n def ButtonYesAction(self):\r\n table = \"patient_\" + str(self.pid)\r\n send_answer(self.question[self.index]['qid'], 'Да', table)\r\n if (self.index<self.maxim-1):\r\n self.Pat = PW(self.index + 1, self.question, self.pid)\r\n self.Pat.show()\r\n self.close()\r\n \r\n def ButtonNoAction(self):\r\n table = \"patient_\" + str(self.pid)\r\n send_answer(self.question[self.index]['qid'], 'Нет', table)\r\n if (self.index<self.maxim-1):\r\n self.Pat = PW(self.index + 1, self.question, self.pid)\r\n self.Pat.show()\r\n self.close()\r\n \r\n\r\n \r\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> driver.maximize_window() driver.get('http://www.toolsqa.com/iframe-practice-page/') driver.switch_to.default_content() driver.find_element_by_xpath("//span[text()='VIDEOS']").click() <|reserved_special_token_1|> <|reserved_special_token_0|> path = ( '/Users/karimovrustam/PycharmProjects/01.23.2020_SeleniumAutomation/drivers/chromedriver' ) driver = Chrome(executable_path=path) driver.maximize_window() driver.get('http://www.toolsqa.com/iframe-practice-page/') driver.switch_to.default_content() driver.find_element_by_xpath("//span[text()='VIDEOS']").click() <|reserved_special_token_1|> from selenium.webdriver import Chrome path = ( '/Users/karimovrustam/PycharmProjects/01.23.2020_SeleniumAutomation/drivers/chromedriver' ) driver = Chrome(executable_path=path) driver.maximize_window() driver.get('http://www.toolsqa.com/iframe-practice-page/') driver.switch_to.default_content() driver.find_element_by_xpath("//span[text()='VIDEOS']").click() <|reserved_special_token_1|> from selenium.webdriver import Chrome path=("/Users/karimovrustam/PycharmProjects/01.23.2020_SeleniumAutomation/drivers/chromedriver") driver=Chrome(executable_path=path) driver.maximize_window() driver.get("http://www.toolsqa.com/iframe-practice-page/") # driver.switch_to.frame("iframe2") # When working with few windows, you need switch to necessary # # or # # driver.switch_to.frame("IF2") # # or # # driver.switch_to.frame(driver.find_element_by_xpath("//iframe[@name='iframe2']")) # driver.find_element_by_xpath("//a[contains(text(),'Read more')]").click() driver.switch_to.default_content() # When you need stop working with one window, and come to whole page driver.find_element_by_xpath("//span[text()='VIDEOS']").click() # TODO: Could not reproduce looking for XPath through switching windows. Repeat it!
flexible
{ "blob_id": "53eb1dcd54ce43d9844c48eb1d79f122a87dca39", "index": 3831, "step-1": "<mask token>\n", "step-2": "<mask token>\ndriver.maximize_window()\ndriver.get('http://www.toolsqa.com/iframe-practice-page/')\ndriver.switch_to.default_content()\ndriver.find_element_by_xpath(\"//span[text()='VIDEOS']\").click()\n", "step-3": "<mask token>\npath = (\n '/Users/karimovrustam/PycharmProjects/01.23.2020_SeleniumAutomation/drivers/chromedriver'\n )\ndriver = Chrome(executable_path=path)\ndriver.maximize_window()\ndriver.get('http://www.toolsqa.com/iframe-practice-page/')\ndriver.switch_to.default_content()\ndriver.find_element_by_xpath(\"//span[text()='VIDEOS']\").click()\n", "step-4": "from selenium.webdriver import Chrome\npath = (\n '/Users/karimovrustam/PycharmProjects/01.23.2020_SeleniumAutomation/drivers/chromedriver'\n )\ndriver = Chrome(executable_path=path)\ndriver.maximize_window()\ndriver.get('http://www.toolsqa.com/iframe-practice-page/')\ndriver.switch_to.default_content()\ndriver.find_element_by_xpath(\"//span[text()='VIDEOS']\").click()\n", "step-5": "from selenium.webdriver import Chrome\n\npath=(\"/Users/karimovrustam/PycharmProjects/01.23.2020_SeleniumAutomation/drivers/chromedriver\")\ndriver=Chrome(executable_path=path)\ndriver.maximize_window()\ndriver.get(\"http://www.toolsqa.com/iframe-practice-page/\")\n\n\n# driver.switch_to.frame(\"iframe2\") # When working with few windows, you need switch to necessary\n# # or\n# # driver.switch_to.frame(\"IF2\")\n# # or\n# # driver.switch_to.frame(driver.find_element_by_xpath(\"//iframe[@name='iframe2']\"))\n# driver.find_element_by_xpath(\"//a[contains(text(),'Read more')]\").click()\n\ndriver.switch_to.default_content() # When you need stop working with one window, and come to whole page\ndriver.find_element_by_xpath(\"//span[text()='VIDEOS']\").click()\n\n# TODO: Could not reproduce looking for XPath through switching windows. Repeat it!", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> send(ip / ack_packet) <|reserved_special_token_1|> <|reserved_special_token_0|> ip = IP(src=sys.argv[1], dst=sys.argv[2]) syn_packet = TCP(sport=52255, dport=1237, flags='S', seq=100, options=[( 'MSS', 689), ('WScale', 1)]) synack_packet = sr1(ip / syn_packet) my_ack = synack_packet.seq + 1 ack_packet = TCP(sport=52255, dport=1237, flags='A', seq=101, ack=my_ack) send(ip / ack_packet) <|reserved_special_token_1|> from scapy.all import * import sys ip = IP(src=sys.argv[1], dst=sys.argv[2]) syn_packet = TCP(sport=52255, dport=1237, flags='S', seq=100, options=[( 'MSS', 689), ('WScale', 1)]) synack_packet = sr1(ip / syn_packet) my_ack = synack_packet.seq + 1 ack_packet = TCP(sport=52255, dport=1237, flags='A', seq=101, ack=my_ack) send(ip / ack_packet) <|reserved_special_token_1|> #! /usr/bin/python3 from scapy.all import * import sys ip=IP(src=sys.argv[1], dst=sys.argv[2]) syn_packet = TCP(sport=52255, dport=1237, flags="S", seq=100, options=[('MSS',689),('WScale',1)]) synack_packet = sr1(ip/syn_packet) my_ack = synack_packet.seq+1 ack_packet = TCP(sport=52255, dport=1237, flags="A", seq=101, ack=my_ack) send(ip/ack_packet)
flexible
{ "blob_id": "acd6197e60cf59ffcaa33bb50a60a03592bb3559", "index": 7169, "step-1": "<mask token>\n", "step-2": "<mask token>\nsend(ip / ack_packet)\n", "step-3": "<mask token>\nip = IP(src=sys.argv[1], dst=sys.argv[2])\nsyn_packet = TCP(sport=52255, dport=1237, flags='S', seq=100, options=[(\n 'MSS', 689), ('WScale', 1)])\nsynack_packet = sr1(ip / syn_packet)\nmy_ack = synack_packet.seq + 1\nack_packet = TCP(sport=52255, dport=1237, flags='A', seq=101, ack=my_ack)\nsend(ip / ack_packet)\n", "step-4": "from scapy.all import *\nimport sys\nip = IP(src=sys.argv[1], dst=sys.argv[2])\nsyn_packet = TCP(sport=52255, dport=1237, flags='S', seq=100, options=[(\n 'MSS', 689), ('WScale', 1)])\nsynack_packet = sr1(ip / syn_packet)\nmy_ack = synack_packet.seq + 1\nack_packet = TCP(sport=52255, dport=1237, flags='A', seq=101, ack=my_ack)\nsend(ip / ack_packet)\n", "step-5": "#! /usr/bin/python3\n\nfrom scapy.all import *\nimport sys\n\nip=IP(src=sys.argv[1], dst=sys.argv[2])\nsyn_packet = TCP(sport=52255, dport=1237, flags=\"S\", seq=100, options=[('MSS',689),('WScale',1)])\nsynack_packet = sr1(ip/syn_packet)\nmy_ack = synack_packet.seq+1\nack_packet = TCP(sport=52255, dport=1237, flags=\"A\", seq=101, ack=my_ack)\nsend(ip/ack_packet)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> KEYS = ['CM', 'GM'] NOTES_FOR_KEY = {'CM': [21, 23, 24, 26, 28, 29, 31, 33, 35, 36, 38, 40, 41, 43, 45, 47, 48, 50, 52, 53, 55, 57, 59, 60, 62, 64, 65, 67, 69, 71, 72, 74, 76, 77, 79, 81, 83, 84, 86, 88, 89, 91, 93, 95, 96, 98, 100, 101, 103, 105, 107, 108], 'GM': [21, 23, 24, 26, 28, 30, 31, 33, 35, 36, 38, 40, 42, 43, 45, 47, 48, 50, 52, 54, 55, 57, 59, 60, 62, 64, 66, 67, 69, 71, 72, 74, 76, 78, 79, 81, 83, 84, 86, 88, 90, 91, 93, 95, 96, 98, 100, 102, 103, 105, 107, 108], 'DM': [], 'AM': [], 'EM': [], 'BM': [], 'FSM': [], 'CSM': [], 'Am': [], 'Em': [], 'Bm': [], 'FSm': [], 'CSm': [], 'GSm': [], 'DSm': [], 'ASm': []} TONIC_NOTE_FOR_KEY = {'CM': 60, 'GM': 67, 'DM': None, 'AM': None, 'EM': None, 'BM': None, 'FSM': None, 'CSM': None, 'Am': None, 'Em': None, 'Bm': None, 'FSm': None, 'CSm': None, 'GSm': None, 'DSm': None, 'ASm': None } STEPS_FOR_CHORD = {'major_triad': [0, 4, 7]} NOTE_IN_KEY_REWARD = 1 NOTE_IN_CHORDS_REWARD = 1 SUPER_CONSONANT_INTERVAL_REWARD = 3 CONSONANT_INTERVAL_REWARD = 2 SOMEWHAT_CONSONANT_INTERVAL_REWARD = 1 DISSONANT_INTERVAL_REWARD = -2 SOMEWHAT_DISSONANT_INTERVAL_REWARD = -1 CENTRICITY_FACTOR = 1 <|reserved_special_token_1|> """ A module for constants. """ # fin adding notes for keys and uncomment KEYS = [ "CM", "GM" # , # "DM", # "AM", # "EM", # "BM", # "FSM", # "CSM", # "Am", # "Em", # "Bm", # "FSm", # "CSm", # "GSm", # "DSm", # "ASm", ] NOTES_FOR_KEY = { "CM": [ 21, 23, 24, 26, 28, 29, 31, 33, 35, 36, 38, 40, 41, 43, 45, 47, 48, 50, 52, 53, 55, 57, 59, 60, 62, 64, 65, 67, 69, 71, 72, 74, 76, 77, 79, 81, 83, 84, 86, 88, 89, 91, 93, 95, 96, 98, 100, 101, 103, 105, 107, 108, ], "GM": [ 21, 23, 24, 26, 28, 30, 31, 33, 35, 36, 38, 40, 42, 43, 45, 47, 48, 50, 52, 54, 55, 57, 59, 60, 62, 64, 66, 67, 69, 71, 72, 74, 76, 78, 79, 81, 83, 84, 86, 88, 90, 91, 93, 95, 96, 98, 100, 102, 103, 105, 107, 108, ], "DM": [], "AM": [], "EM": [], "BM": [], "FSM": [], "CSM": [], "Am": [], "Em": [], "Bm": [], "FSm": [], "CSm": [], "GSm": [], "DSm": [], "ASm": [], } TONIC_NOTE_FOR_KEY = { "CM": 60, "GM": 67, "DM": None, "AM": None, "EM": None, "BM": None, "FSM": None, "CSM": None, "Am": None, "Em": None, "Bm": None, "FSm": None, "CSm": None, "GSm": None, "DSm": None, "ASm": None, } # add more chords later STEPS_FOR_CHORD = {"major_triad": [0, 4, 7]} # constants for value function # add more complex rewards NOTE_IN_KEY_REWARD = 1 NOTE_IN_CHORDS_REWARD = 1 SUPER_CONSONANT_INTERVAL_REWARD = 3 CONSONANT_INTERVAL_REWARD = 2 SOMEWHAT_CONSONANT_INTERVAL_REWARD = 1 DISSONANT_INTERVAL_REWARD = -2 SOMEWHAT_DISSONANT_INTERVAL_REWARD = -1 CENTRICITY_FACTOR = 1 # reward is number of times note occured before * CENTRICITY_FACTOR
flexible
{ "blob_id": "dd7ade05ef912f7c094883507768cc21f95f31f6", "index": 533, "step-1": "<mask token>\n", "step-2": "<mask token>\nKEYS = ['CM', 'GM']\nNOTES_FOR_KEY = {'CM': [21, 23, 24, 26, 28, 29, 31, 33, 35, 36, 38, 40, 41,\n 43, 45, 47, 48, 50, 52, 53, 55, 57, 59, 60, 62, 64, 65, 67, 69, 71, 72,\n 74, 76, 77, 79, 81, 83, 84, 86, 88, 89, 91, 93, 95, 96, 98, 100, 101, \n 103, 105, 107, 108], 'GM': [21, 23, 24, 26, 28, 30, 31, 33, 35, 36, 38,\n 40, 42, 43, 45, 47, 48, 50, 52, 54, 55, 57, 59, 60, 62, 64, 66, 67, 69,\n 71, 72, 74, 76, 78, 79, 81, 83, 84, 86, 88, 90, 91, 93, 95, 96, 98, 100,\n 102, 103, 105, 107, 108], 'DM': [], 'AM': [], 'EM': [], 'BM': [], 'FSM':\n [], 'CSM': [], 'Am': [], 'Em': [], 'Bm': [], 'FSm': [], 'CSm': [],\n 'GSm': [], 'DSm': [], 'ASm': []}\nTONIC_NOTE_FOR_KEY = {'CM': 60, 'GM': 67, 'DM': None, 'AM': None, 'EM':\n None, 'BM': None, 'FSM': None, 'CSM': None, 'Am': None, 'Em': None,\n 'Bm': None, 'FSm': None, 'CSm': None, 'GSm': None, 'DSm': None, 'ASm': None\n }\nSTEPS_FOR_CHORD = {'major_triad': [0, 4, 7]}\nNOTE_IN_KEY_REWARD = 1\nNOTE_IN_CHORDS_REWARD = 1\nSUPER_CONSONANT_INTERVAL_REWARD = 3\nCONSONANT_INTERVAL_REWARD = 2\nSOMEWHAT_CONSONANT_INTERVAL_REWARD = 1\nDISSONANT_INTERVAL_REWARD = -2\nSOMEWHAT_DISSONANT_INTERVAL_REWARD = -1\nCENTRICITY_FACTOR = 1\n", "step-3": "\"\"\"\nA module for constants.\n\n\"\"\"\n\n# fin adding notes for keys and uncomment \nKEYS = [\n \"CM\",\n \"GM\"\n # ,\n # \"DM\",\n # \"AM\",\n # \"EM\",\n # \"BM\",\n # \"FSM\",\n # \"CSM\",\n # \"Am\",\n # \"Em\",\n # \"Bm\",\n # \"FSm\",\n # \"CSm\",\n # \"GSm\",\n # \"DSm\",\n # \"ASm\",\n]\n\nNOTES_FOR_KEY = {\n \"CM\": [\n 21,\n 23,\n 24,\n 26,\n 28,\n 29,\n 31,\n 33,\n 35,\n 36,\n 38,\n 40,\n 41,\n 43,\n 45,\n 47,\n 48,\n 50,\n 52,\n 53,\n 55,\n 57,\n 59,\n 60,\n 62,\n 64,\n 65,\n 67,\n 69,\n 71,\n 72,\n 74,\n 76,\n 77,\n 79,\n 81,\n 83,\n 84,\n 86,\n 88,\n 89,\n 91,\n 93,\n 95,\n 96,\n 98,\n 100,\n 101,\n 103,\n 105,\n 107,\n 108,\n ],\n \"GM\": [\n 21,\n 23,\n 24,\n 26,\n 28,\n 30,\n 31,\n 33,\n 35,\n 36,\n 38,\n 40,\n 42,\n 43,\n 45,\n 47,\n 48,\n 50,\n 52,\n 54,\n 55,\n 57,\n 59,\n 60,\n 62,\n 64,\n 66,\n 67,\n 69,\n 71,\n 72,\n 74,\n 76,\n 78,\n 79,\n 81,\n 83,\n 84,\n 86,\n 88,\n 90,\n 91,\n 93,\n 95,\n 96,\n 98,\n 100,\n 102,\n 103,\n 105,\n 107,\n 108,\n ],\n \"DM\": [],\n \"AM\": [],\n \"EM\": [],\n \"BM\": [],\n \"FSM\": [],\n \"CSM\": [],\n \"Am\": [],\n \"Em\": [],\n \"Bm\": [],\n \"FSm\": [],\n \"CSm\": [],\n \"GSm\": [],\n \"DSm\": [],\n \"ASm\": [],\n}\n\nTONIC_NOTE_FOR_KEY = {\n \"CM\": 60,\n \"GM\": 67,\n \"DM\": None,\n \"AM\": None,\n \"EM\": None,\n \"BM\": None,\n \"FSM\": None,\n \"CSM\": None,\n \"Am\": None,\n \"Em\": None,\n \"Bm\": None,\n \"FSm\": None,\n \"CSm\": None,\n \"GSm\": None,\n \"DSm\": None,\n \"ASm\": None,\n}\n\n# add more chords later\nSTEPS_FOR_CHORD = {\"major_triad\": [0, 4, 7]}\n\n\n\n# constants for value function\n# add more complex rewards\nNOTE_IN_KEY_REWARD = 1\nNOTE_IN_CHORDS_REWARD = 1\nSUPER_CONSONANT_INTERVAL_REWARD = 3\nCONSONANT_INTERVAL_REWARD = 2\nSOMEWHAT_CONSONANT_INTERVAL_REWARD = 1\nDISSONANT_INTERVAL_REWARD = -2\nSOMEWHAT_DISSONANT_INTERVAL_REWARD = -1\nCENTRICITY_FACTOR = 1 # reward is number of times note occured before * CENTRICITY_FACTOR", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for line in file1: x_list.append(float(line)) <|reserved_special_token_0|> for line in file2: y_list.append(float(line)) file2.close file1.close <|reserved_special_token_0|> plt.plot(x_list, y_list, label='robot trajectory') plt.plot(desired_pos_x_list, desired_pos_y_list, marker='x', label= 'desired position') plt.title('Robot trajectory based on the wheel encoders ') plt.xlabel('x [m]') plt.ylabel('y [m]') plt.legend() plt.show() <|reserved_special_token_1|> <|reserved_special_token_0|> x_list = [] y_list = [] file1 = open('pos_data_x.txt', 'r') for line in file1: x_list.append(float(line)) file2 = open('pos_data_y.txt', 'r') for line in file2: y_list.append(float(line)) file2.close file1.close desired_x = [0.0, 0.5, 0.5] desired_y = [0.0, 0.0, 0.5] desired_pos_x_list = [1.0, 1.0, 0.0, 0.0] desired_pos_y_list = [0.0, 0.7, 0.7, 0.0] plt.plot(x_list, y_list, label='robot trajectory') plt.plot(desired_pos_x_list, desired_pos_y_list, marker='x', label= 'desired position') plt.title('Robot trajectory based on the wheel encoders ') plt.xlabel('x [m]') plt.ylabel('y [m]') plt.legend() plt.show() <|reserved_special_token_1|> import numpy as np import matplotlib.pyplot as plt x_list = [] y_list = [] file1 = open('pos_data_x.txt', 'r') for line in file1: x_list.append(float(line)) file2 = open('pos_data_y.txt', 'r') for line in file2: y_list.append(float(line)) file2.close file1.close desired_x = [0.0, 0.5, 0.5] desired_y = [0.0, 0.0, 0.5] desired_pos_x_list = [1.0, 1.0, 0.0, 0.0] desired_pos_y_list = [0.0, 0.7, 0.7, 0.0] plt.plot(x_list, y_list, label='robot trajectory') plt.plot(desired_pos_x_list, desired_pos_y_list, marker='x', label= 'desired position') plt.title('Robot trajectory based on the wheel encoders ') plt.xlabel('x [m]') plt.ylabel('y [m]') plt.legend() plt.show() <|reserved_special_token_1|> import numpy as np import matplotlib.pyplot as plt x_list = [] y_list = [] file1 = open("pos_data_x.txt", "r") for line in file1: #x_list.append(float(file1.readline(line))) x_list.append(float(line)) file2 = open("pos_data_y.txt", "r") for line in file2: #y_list.append(float(file1.readline(line))) y_list.append(float(line)) file2.close file1.close desired_x = [0.0, 0.5, 0.5] desired_y = [0.0, 0.0, 0.5] desired_pos_x_list = [1.0, 1.0, 0.0, 0.0] #[0.5, 0.5, 0.0, 0.0] desired_pos_y_list = [0.0, 0.7, 0.7, 0.0] #[0.0, 0.5, 0.5, 0.0] plt.plot(x_list, y_list, label = 'robot trajectory') #plt.plot(desired_x, desired_y, marker = 'x', label = 'desired position') plt.plot(desired_pos_x_list, desired_pos_y_list, marker = 'x', label = 'desired position') plt.title("Robot trajectory based on the wheel encoders ") plt.xlabel("x [m]") plt.ylabel("y [m]") #plt.axis("square") plt.legend() plt.show()
flexible
{ "blob_id": "d869aa32cb9793ce11a5b6a782cc66c2dd0be309", "index": 6176, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor line in file1:\n x_list.append(float(line))\n<mask token>\nfor line in file2:\n y_list.append(float(line))\nfile2.close\nfile1.close\n<mask token>\nplt.plot(x_list, y_list, label='robot trajectory')\nplt.plot(desired_pos_x_list, desired_pos_y_list, marker='x', label=\n 'desired position')\nplt.title('Robot trajectory based on the wheel encoders ')\nplt.xlabel('x [m]')\nplt.ylabel('y [m]')\nplt.legend()\nplt.show()\n", "step-3": "<mask token>\nx_list = []\ny_list = []\nfile1 = open('pos_data_x.txt', 'r')\nfor line in file1:\n x_list.append(float(line))\nfile2 = open('pos_data_y.txt', 'r')\nfor line in file2:\n y_list.append(float(line))\nfile2.close\nfile1.close\ndesired_x = [0.0, 0.5, 0.5]\ndesired_y = [0.0, 0.0, 0.5]\ndesired_pos_x_list = [1.0, 1.0, 0.0, 0.0]\ndesired_pos_y_list = [0.0, 0.7, 0.7, 0.0]\nplt.plot(x_list, y_list, label='robot trajectory')\nplt.plot(desired_pos_x_list, desired_pos_y_list, marker='x', label=\n 'desired position')\nplt.title('Robot trajectory based on the wheel encoders ')\nplt.xlabel('x [m]')\nplt.ylabel('y [m]')\nplt.legend()\nplt.show()\n", "step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nx_list = []\ny_list = []\nfile1 = open('pos_data_x.txt', 'r')\nfor line in file1:\n x_list.append(float(line))\nfile2 = open('pos_data_y.txt', 'r')\nfor line in file2:\n y_list.append(float(line))\nfile2.close\nfile1.close\ndesired_x = [0.0, 0.5, 0.5]\ndesired_y = [0.0, 0.0, 0.5]\ndesired_pos_x_list = [1.0, 1.0, 0.0, 0.0]\ndesired_pos_y_list = [0.0, 0.7, 0.7, 0.0]\nplt.plot(x_list, y_list, label='robot trajectory')\nplt.plot(desired_pos_x_list, desired_pos_y_list, marker='x', label=\n 'desired position')\nplt.title('Robot trajectory based on the wheel encoders ')\nplt.xlabel('x [m]')\nplt.ylabel('y [m]')\nplt.legend()\nplt.show()\n", "step-5": "import numpy as np\n\nimport matplotlib.pyplot as plt\n\nx_list = []\ny_list = []\n\nfile1 = open(\"pos_data_x.txt\", \"r\")\nfor line in file1:\n\t#x_list.append(float(file1.readline(line)))\n\tx_list.append(float(line))\n\t\nfile2 = open(\"pos_data_y.txt\", \"r\")\nfor line in file2:\n\t#y_list.append(float(file1.readline(line)))\n\ty_list.append(float(line))\n\t\n\nfile2.close\nfile1.close\n\ndesired_x = [0.0, 0.5, 0.5]\ndesired_y = [0.0, 0.0, 0.5]\n\ndesired_pos_x_list = [1.0, 1.0, 0.0, 0.0] #[0.5, 0.5, 0.0, 0.0]\ndesired_pos_y_list = [0.0, 0.7, 0.7, 0.0] #[0.0, 0.5, 0.5, 0.0]\n\nplt.plot(x_list, y_list, label = 'robot trajectory')\n#plt.plot(desired_x, desired_y, marker = 'x', label = 'desired position')\nplt.plot(desired_pos_x_list, desired_pos_y_list, marker = 'x', label = 'desired position')\nplt.title(\"Robot trajectory based on the wheel encoders \")\nplt.xlabel(\"x [m]\")\nplt.ylabel(\"y [m]\")\n#plt.axis(\"square\")\nplt.legend()\nplt.show()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def add_owner_mce(m) ->MetadataChangeEventClass: entity = m['Table'] schema = m['Schema'] dataset_name = f'{schema}.{entity}' owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for owner in m['Owner']] changed_snapshot = DatasetSnapshotClass(urn= f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})' , aspects=[]) changed_snapshot.aspects.append(OwnershipClass(owners)) mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot) return mce def callback(err, msg): print('ingested row') if err: print('error:', err) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> with open(source_file_path, 'r') as f: for _i in f: row = json.loads(_i.rstrip('\n')) Email = row['Email'] row['Owner'] = [f'urn:li:corpuser:{Email}'] recs.append(row) def add_owner_mce(m) ->MetadataChangeEventClass: entity = m['Table'] schema = m['Schema'] dataset_name = f'{schema}.{entity}' owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for owner in m['Owner']] changed_snapshot = DatasetSnapshotClass(urn= f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})' , aspects=[]) changed_snapshot.aspects.append(OwnershipClass(owners)) mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot) return mce def callback(err, msg): print('ingested row') if err: print('error:', err) <|reserved_special_token_0|> for _i in range(num_recs): print('sending data to datahub') mce = add_owner_mce(recs[_i]) print(mce) Restemitter.emit_mce(mce) num_recs -= 1 <|reserved_special_token_1|> env = 'DEV' platform = 'hive' <|reserved_special_token_0|> source_file_path = '/Users/snandi/Downloads/data/owner_data.json' <|reserved_special_token_0|> recs = [] with open(source_file_path, 'r') as f: for _i in f: row = json.loads(_i.rstrip('\n')) Email = row['Email'] row['Owner'] = [f'urn:li:corpuser:{Email}'] recs.append(row) def add_owner_mce(m) ->MetadataChangeEventClass: entity = m['Table'] schema = m['Schema'] dataset_name = f'{schema}.{entity}' owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for owner in m['Owner']] changed_snapshot = DatasetSnapshotClass(urn= f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})' , aspects=[]) changed_snapshot.aspects.append(OwnershipClass(owners)) mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot) return mce def callback(err, msg): print('ingested row') if err: print('error:', err) num_recs = len(recs) Restemitter = DatahubRestEmitter('http://10.174.24.179:8080') for _i in range(num_recs): print('sending data to datahub') mce = add_owner_mce(recs[_i]) print(mce) Restemitter.emit_mce(mce) num_recs -= 1 <|reserved_special_token_1|> env = 'DEV' platform = 'hive' from datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig from datahub.emitter.rest_emitter import DatahubRestEmitter from datahub.ingestion.extractor.schema_util import * from datahub.metadata.schema_classes import DatasetSnapshotClass, MetadataChangeEventClass, OwnerClass, OwnershipClass, OwnershipTypeClass source_file_path = '/Users/snandi/Downloads/data/owner_data.json' import json recs = [] with open(source_file_path, 'r') as f: for _i in f: row = json.loads(_i.rstrip('\n')) Email = row['Email'] row['Owner'] = [f'urn:li:corpuser:{Email}'] recs.append(row) def add_owner_mce(m) ->MetadataChangeEventClass: entity = m['Table'] schema = m['Schema'] dataset_name = f'{schema}.{entity}' owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for owner in m['Owner']] changed_snapshot = DatasetSnapshotClass(urn= f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})' , aspects=[]) changed_snapshot.aspects.append(OwnershipClass(owners)) mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot) return mce def callback(err, msg): print('ingested row') if err: print('error:', err) num_recs = len(recs) Restemitter = DatahubRestEmitter('http://10.174.24.179:8080') for _i in range(num_recs): print('sending data to datahub') mce = add_owner_mce(recs[_i]) print(mce) Restemitter.emit_mce(mce) num_recs -= 1 <|reserved_special_token_1|> env = 'DEV' ## this had to be in uppercase platform = 'hive' from datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig from datahub.emitter.rest_emitter import DatahubRestEmitter from datahub.ingestion.extractor.schema_util import * from datahub.metadata.schema_classes import ( DatasetSnapshotClass, MetadataChangeEventClass, OwnerClass, OwnershipClass, OwnershipTypeClass, ) source_file_path = '/Users/snandi/Downloads/data/owner_data.json' # created an emitter where the mce will be emitted, it will be DataHub's Kafka broker in docker (for PoC) # emitter = DatahubKafkaEmitter( # KafkaEmitterConfig.parse_obj( # # This is the same config format as the standard Kafka sink's YAML. # { # "connection": { # "bootstrap": "localhost:9002", # "producer_config": {}, # "schema_registry_url": "localhost:8081", # } # } # ) # ) # todo: 1. We have to make a living doc of table ownership 2. If we decide that to be google doc, # then create an Oauth or service account to access the sheet programatically import json recs = [] with open(source_file_path, 'r') as f: for _i in f: row = json.loads(_i.rstrip('\n')) Email= row['Email'] row['Owner'] = [f"urn:li:corpuser:{Email}"] recs.append(row) # recs = [{'schema_name': 'integrated_core', 'table_name': 'order_fact', 'owner': ["urn:li:corpuser:hsk@grubhub.com"]}] # Process messages def add_owner_mce(m) -> MetadataChangeEventClass: entity = m['Table'] schema = m['Schema'] dataset_name = f"{schema}.{entity}" owners = [ OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for owner in m['Owner'] ] changed_snapshot = DatasetSnapshotClass( urn=f"urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})", aspects=[], # we append to this list later on ) changed_snapshot.aspects.append(OwnershipClass(owners)) mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot) return mce def callback(err, msg): print('ingested row') if err: # Handle the metadata emission error. print("error:", err) num_recs = len(recs) # try REST emitter Restemitter = DatahubRestEmitter("http://10.174.24.179:8080") for _i in range(num_recs): print('sending data to datahub') mce = add_owner_mce(recs[_i]) print(mce) # emit the mce to kafka # emitter.emit_mce_async(mce, callback) # emitter.flush() # emit mce to REST Restemitter.emit_mce(mce) num_recs -= 1
flexible
{ "blob_id": "7ad5e803afa42790e878bfb923eddcfde2d21928", "index": 1501, "step-1": "<mask token>\n\n\ndef add_owner_mce(m) ->MetadataChangeEventClass:\n entity = m['Table']\n schema = m['Schema']\n dataset_name = f'{schema}.{entity}'\n owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for\n owner in m['Owner']]\n changed_snapshot = DatasetSnapshotClass(urn=\n f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})'\n , aspects=[])\n changed_snapshot.aspects.append(OwnershipClass(owners))\n mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)\n return mce\n\n\ndef callback(err, msg):\n print('ingested row')\n if err:\n print('error:', err)\n\n\n<mask token>\n", "step-2": "<mask token>\nwith open(source_file_path, 'r') as f:\n for _i in f:\n row = json.loads(_i.rstrip('\\n'))\n Email = row['Email']\n row['Owner'] = [f'urn:li:corpuser:{Email}']\n recs.append(row)\n\n\ndef add_owner_mce(m) ->MetadataChangeEventClass:\n entity = m['Table']\n schema = m['Schema']\n dataset_name = f'{schema}.{entity}'\n owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for\n owner in m['Owner']]\n changed_snapshot = DatasetSnapshotClass(urn=\n f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})'\n , aspects=[])\n changed_snapshot.aspects.append(OwnershipClass(owners))\n mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)\n return mce\n\n\ndef callback(err, msg):\n print('ingested row')\n if err:\n print('error:', err)\n\n\n<mask token>\nfor _i in range(num_recs):\n print('sending data to datahub')\n mce = add_owner_mce(recs[_i])\n print(mce)\n Restemitter.emit_mce(mce)\n num_recs -= 1\n", "step-3": "env = 'DEV'\nplatform = 'hive'\n<mask token>\nsource_file_path = '/Users/snandi/Downloads/data/owner_data.json'\n<mask token>\nrecs = []\nwith open(source_file_path, 'r') as f:\n for _i in f:\n row = json.loads(_i.rstrip('\\n'))\n Email = row['Email']\n row['Owner'] = [f'urn:li:corpuser:{Email}']\n recs.append(row)\n\n\ndef add_owner_mce(m) ->MetadataChangeEventClass:\n entity = m['Table']\n schema = m['Schema']\n dataset_name = f'{schema}.{entity}'\n owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for\n owner in m['Owner']]\n changed_snapshot = DatasetSnapshotClass(urn=\n f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})'\n , aspects=[])\n changed_snapshot.aspects.append(OwnershipClass(owners))\n mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)\n return mce\n\n\ndef callback(err, msg):\n print('ingested row')\n if err:\n print('error:', err)\n\n\nnum_recs = len(recs)\nRestemitter = DatahubRestEmitter('http://10.174.24.179:8080')\nfor _i in range(num_recs):\n print('sending data to datahub')\n mce = add_owner_mce(recs[_i])\n print(mce)\n Restemitter.emit_mce(mce)\n num_recs -= 1\n", "step-4": "env = 'DEV'\nplatform = 'hive'\nfrom datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig\nfrom datahub.emitter.rest_emitter import DatahubRestEmitter\nfrom datahub.ingestion.extractor.schema_util import *\nfrom datahub.metadata.schema_classes import DatasetSnapshotClass, MetadataChangeEventClass, OwnerClass, OwnershipClass, OwnershipTypeClass\nsource_file_path = '/Users/snandi/Downloads/data/owner_data.json'\nimport json\nrecs = []\nwith open(source_file_path, 'r') as f:\n for _i in f:\n row = json.loads(_i.rstrip('\\n'))\n Email = row['Email']\n row['Owner'] = [f'urn:li:corpuser:{Email}']\n recs.append(row)\n\n\ndef add_owner_mce(m) ->MetadataChangeEventClass:\n entity = m['Table']\n schema = m['Schema']\n dataset_name = f'{schema}.{entity}'\n owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for\n owner in m['Owner']]\n changed_snapshot = DatasetSnapshotClass(urn=\n f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})'\n , aspects=[])\n changed_snapshot.aspects.append(OwnershipClass(owners))\n mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)\n return mce\n\n\ndef callback(err, msg):\n print('ingested row')\n if err:\n print('error:', err)\n\n\nnum_recs = len(recs)\nRestemitter = DatahubRestEmitter('http://10.174.24.179:8080')\nfor _i in range(num_recs):\n print('sending data to datahub')\n mce = add_owner_mce(recs[_i])\n print(mce)\n Restemitter.emit_mce(mce)\n num_recs -= 1\n", "step-5": "\nenv = 'DEV' ## this had to be in uppercase\nplatform = 'hive'\n\nfrom datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig\nfrom datahub.emitter.rest_emitter import DatahubRestEmitter\n\nfrom datahub.ingestion.extractor.schema_util import *\n\nfrom datahub.metadata.schema_classes import (\n DatasetSnapshotClass,\n MetadataChangeEventClass,\n OwnerClass,\n OwnershipClass,\n OwnershipTypeClass,\n)\n\nsource_file_path = '/Users/snandi/Downloads/data/owner_data.json'\n\n# created an emitter where the mce will be emitted, it will be DataHub's Kafka broker in docker (for PoC)\n# emitter = DatahubKafkaEmitter(\n# KafkaEmitterConfig.parse_obj(\n# # This is the same config format as the standard Kafka sink's YAML.\n# {\n# \"connection\": {\n# \"bootstrap\": \"localhost:9002\",\n# \"producer_config\": {},\n# \"schema_registry_url\": \"localhost:8081\",\n# }\n# }\n# )\n# )\n\n\n# todo: 1. We have to make a living doc of table ownership 2. If we decide that to be google doc,\n# then create an Oauth or service account to access the sheet programatically\n\nimport json\nrecs = []\nwith open(source_file_path, 'r') as f:\n for _i in f:\n row = json.loads(_i.rstrip('\\n'))\n Email= row['Email']\n row['Owner'] = [f\"urn:li:corpuser:{Email}\"]\n recs.append(row)\n\n# recs = [{'schema_name': 'integrated_core', 'table_name': 'order_fact', 'owner': [\"urn:li:corpuser:hsk@grubhub.com\"]}]\n\n\n# Process messages\ndef add_owner_mce(m) -> MetadataChangeEventClass:\n entity = m['Table']\n schema = m['Schema']\n dataset_name = f\"{schema}.{entity}\"\n\n owners = [\n OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER)\n for owner in m['Owner']\n ]\n\n changed_snapshot = DatasetSnapshotClass(\n urn=f\"urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})\",\n aspects=[], # we append to this list later on\n )\n changed_snapshot.aspects.append(OwnershipClass(owners))\n mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)\n return mce\n\n\ndef callback(err, msg):\n print('ingested row')\n if err:\n # Handle the metadata emission error.\n print(\"error:\", err)\n\n\nnum_recs = len(recs)\n\n# try REST emitter\nRestemitter = DatahubRestEmitter(\"http://10.174.24.179:8080\")\n\n\nfor _i in range(num_recs):\n print('sending data to datahub')\n mce = add_owner_mce(recs[_i])\n print(mce)\n # emit the mce to kafka\n # emitter.emit_mce_async(mce, callback)\n # emitter.flush()\n # emit mce to REST\n Restemitter.emit_mce(mce)\n num_recs -= 1\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# -*- coding:utf-8 -*- #实现同义词词林的规格化 with open('C:\\Users\\lenovo\\Desktop\\哈工大社会计算与信息检索研究中心同义词词林扩展版.txt') as f: with open('convert.txt','a') as w: for line in f: data = line[8:-1].split() for item in data: tmp = data.copy() tmp.remove(item) tmp.insert(0,item) w.writelines('\t'.join(tmp)+'\n')
normal
{ "blob_id": "9109e649a90730df022df898a7760140275ad724", "index": 4854, "step-1": "<mask token>\n", "step-2": "with open('C:\\\\Users\\\\lenovo\\\\Desktop\\\\哈工大社会计算与信息检索研究中心同义词词林扩展版.txt') as f:\n with open('convert.txt', 'a') as w:\n for line in f:\n data = line[8:-1].split()\n for item in data:\n tmp = data.copy()\n tmp.remove(item)\n tmp.insert(0, item)\n w.writelines('\\t'.join(tmp) + '\\n')\n", "step-3": "# -*- coding:utf-8 -*- \r\n#实现同义词词林的规格化\r\n\r\n\r\nwith open('C:\\\\Users\\\\lenovo\\\\Desktop\\\\哈工大社会计算与信息检索研究中心同义词词林扩展版.txt') as f:\r\n with open('convert.txt','a') as w:\r\n for line in f:\r\n \r\n data = line[8:-1].split()\r\n for item in data:\r\n tmp = data.copy()\r\n tmp.remove(item)\r\n tmp.insert(0,item)\r\n w.writelines('\\t'.join(tmp)+'\\n')", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> res.read_rcf() res.read_his() <|reserved_special_token_0|> for kt, step in enumerate(res.steps): if step.conv_status in [-1]: if step.time in tx: tsteps.append(kt) <|reserved_special_token_0|> res.read_dat() res.read_s00() for lab in res.ele_group_labels: if lab == 'VOLUMICS': res.read_s01() vtktools.write_vtu(res, vol=True, verbose=False, outline=True) <|reserved_special_token_1|> <|reserved_special_token_0|> pathname = ( '\\\\192.168.1.51\\Mandats sur H RAID0\\M1010_Tourbillon\\stab_panneau') prob = 'M1010_stabPann_m2_renfLat' res = zr(pathname, prob) res.read_rcf() res.read_his() tx = [67] tsteps = [] for kt, step in enumerate(res.steps): if step.conv_status in [-1]: if step.time in tx: tsteps.append(kt) res.out_steps = tsteps res.read_dat() res.read_s00() for lab in res.ele_group_labels: if lab == 'VOLUMICS': res.read_s01() vtktools.write_vtu(res, vol=True, verbose=False, outline=True) <|reserved_special_token_1|> import numpy as np from zsoil_tools import zsoil_results as zr from zsoil_tools import vtktools pathname = ( '\\\\192.168.1.51\\Mandats sur H RAID0\\M1010_Tourbillon\\stab_panneau') prob = 'M1010_stabPann_m2_renfLat' res = zr(pathname, prob) res.read_rcf() res.read_his() tx = [67] tsteps = [] for kt, step in enumerate(res.steps): if step.conv_status in [-1]: if step.time in tx: tsteps.append(kt) res.out_steps = tsteps res.read_dat() res.read_s00() for lab in res.ele_group_labels: if lab == 'VOLUMICS': res.read_s01() vtktools.write_vtu(res, vol=True, verbose=False, outline=True) <|reserved_special_token_1|> # @description Exporting outline (boundary faces) of zsoil results to vtu # @input zsoil results # @output vtu unstructured grid # @author Matthias Preisig # @date 2017/10/10 import numpy as np from zsoil_tools import zsoil_results as zr from zsoil_tools import vtktools pathname = r'\\192.168.1.51\Mandats sur H RAID0\M1010_Tourbillon\stab_panneau' prob = 'M1010_stabPann_m2_renfLat' res = zr(pathname,prob) res.read_rcf() res.read_his() tx = [67] tsteps = [] for kt,step in enumerate(res.steps): if step.conv_status in [-1]: if step.time in tx: tsteps.append(kt) res.out_steps = tsteps res.read_dat() res.read_s00() for lab in res.ele_group_labels: if lab=='VOLUMICS': res.read_s01() # volumics ## elif lab=='SHELLS': ## res.read_s02() # shells ## elif lab=='TRUSSES': ## res.read_s03() # trusses ## elif lab=='BEAMS': ## res.read_s04() # beams ## elif lab=='CONTACT': ## res.read_s07() ##vtktools.write_vtu(res,beams=True,verbose=False) ##vtktools.write_vtu(res,trusses=True,verbose=False) vtktools.write_vtu(res,vol=True,verbose=False,outline=True) ##vtktools.write_vtu(res,shells=True,verbose=False)
flexible
{ "blob_id": "fb6dd9ec7d8dc80eace90dadc2112c7c27125efd", "index": 2055, "step-1": "<mask token>\n", "step-2": "<mask token>\nres.read_rcf()\nres.read_his()\n<mask token>\nfor kt, step in enumerate(res.steps):\n if step.conv_status in [-1]:\n if step.time in tx:\n tsteps.append(kt)\n<mask token>\nres.read_dat()\nres.read_s00()\nfor lab in res.ele_group_labels:\n if lab == 'VOLUMICS':\n res.read_s01()\nvtktools.write_vtu(res, vol=True, verbose=False, outline=True)\n", "step-3": "<mask token>\npathname = (\n '\\\\\\\\192.168.1.51\\\\Mandats sur H RAID0\\\\M1010_Tourbillon\\\\stab_panneau')\nprob = 'M1010_stabPann_m2_renfLat'\nres = zr(pathname, prob)\nres.read_rcf()\nres.read_his()\ntx = [67]\ntsteps = []\nfor kt, step in enumerate(res.steps):\n if step.conv_status in [-1]:\n if step.time in tx:\n tsteps.append(kt)\nres.out_steps = tsteps\nres.read_dat()\nres.read_s00()\nfor lab in res.ele_group_labels:\n if lab == 'VOLUMICS':\n res.read_s01()\nvtktools.write_vtu(res, vol=True, verbose=False, outline=True)\n", "step-4": "import numpy as np\nfrom zsoil_tools import zsoil_results as zr\nfrom zsoil_tools import vtktools\npathname = (\n '\\\\\\\\192.168.1.51\\\\Mandats sur H RAID0\\\\M1010_Tourbillon\\\\stab_panneau')\nprob = 'M1010_stabPann_m2_renfLat'\nres = zr(pathname, prob)\nres.read_rcf()\nres.read_his()\ntx = [67]\ntsteps = []\nfor kt, step in enumerate(res.steps):\n if step.conv_status in [-1]:\n if step.time in tx:\n tsteps.append(kt)\nres.out_steps = tsteps\nres.read_dat()\nres.read_s00()\nfor lab in res.ele_group_labels:\n if lab == 'VOLUMICS':\n res.read_s01()\nvtktools.write_vtu(res, vol=True, verbose=False, outline=True)\n", "step-5": "# @description Exporting outline (boundary faces) of zsoil results to vtu\n# @input zsoil results\n# @output vtu unstructured grid\n# @author Matthias Preisig\n# @date 2017/10/10\n\nimport numpy as np\n\nfrom zsoil_tools import zsoil_results as zr\nfrom zsoil_tools import vtktools\n\n\npathname = r'\\\\192.168.1.51\\Mandats sur H RAID0\\M1010_Tourbillon\\stab_panneau'\nprob = 'M1010_stabPann_m2_renfLat'\n\nres = zr(pathname,prob)\nres.read_rcf()\nres.read_his()\ntx = [67]\ntsteps = []\nfor kt,step in enumerate(res.steps):\n if step.conv_status in [-1]:\n if step.time in tx:\n tsteps.append(kt)\nres.out_steps = tsteps\nres.read_dat()\nres.read_s00()\nfor lab in res.ele_group_labels:\n if lab=='VOLUMICS':\n res.read_s01() # volumics\n## elif lab=='SHELLS':\n## res.read_s02() # shells\n## elif lab=='TRUSSES':\n## res.read_s03() # trusses\n## elif lab=='BEAMS':\n## res.read_s04() # beams\n## elif lab=='CONTACT':\n## res.read_s07()\n\n\n##vtktools.write_vtu(res,beams=True,verbose=False)\n##vtktools.write_vtu(res,trusses=True,verbose=False)\nvtktools.write_vtu(res,vol=True,verbose=False,outline=True)\n##vtktools.write_vtu(res,shells=True,verbose=False)\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def upload_to_s3(file_name, node_number): try: key_info_json = open('awsinfo.json').read() except FileNotFoundError: print('awsinfo.json is not exist in dir.') exit(-1) data = json.loads(key_info_json) s3 = boto3.client('s3', aws_access_key_id=data['accessKeyId'], aws_secret_access_key=data['secretAccessKey']) with open(file_name, 'rb') as f: s3.upload_fileobj(f, 'capstone12', str(node_number) + '/' + file_name, ExtraArgs={'ACL': 'public-read-write'}) print('File Upload Complete to ' + str(node_number) + '/' + file_name) <|reserved_special_token_1|> import boto3 import json from botocore.exceptions import ClientError def upload_to_s3(file_name, node_number): try: key_info_json = open('awsinfo.json').read() except FileNotFoundError: print('awsinfo.json is not exist in dir.') exit(-1) data = json.loads(key_info_json) s3 = boto3.client('s3', aws_access_key_id=data['accessKeyId'], aws_secret_access_key=data['secretAccessKey']) with open(file_name, 'rb') as f: s3.upload_fileobj(f, 'capstone12', str(node_number) + '/' + file_name, ExtraArgs={'ACL': 'public-read-write'}) print('File Upload Complete to ' + str(node_number) + '/' + file_name) <|reserved_special_token_1|> import boto3 import json from botocore.exceptions import ClientError # upload_to_s3("abc.png", 1) def upload_to_s3(file_name, node_number): try: key_info_json = open("awsinfo.json").read() except FileNotFoundError: print("awsinfo.json is not exist in dir.") exit(-1) data=json.loads(key_info_json) s3 = boto3.client( 's3', aws_access_key_id = data['accessKeyId'], aws_secret_access_key = data['secretAccessKey'] ) with open(file_name, "rb") as f: s3.upload_fileobj(f,"capstone12", str(node_number)+"/"+file_name, ExtraArgs={'ACL' : 'public-read-write'} ) print("File Upload Complete to " + str(node_number) + "/" + file_name)
flexible
{ "blob_id": "2f0d611fecdb5717029938d2ec2cd2db345b8f3a", "index": 8176, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef upload_to_s3(file_name, node_number):\n try:\n key_info_json = open('awsinfo.json').read()\n except FileNotFoundError:\n print('awsinfo.json is not exist in dir.')\n exit(-1)\n data = json.loads(key_info_json)\n s3 = boto3.client('s3', aws_access_key_id=data['accessKeyId'],\n aws_secret_access_key=data['secretAccessKey'])\n with open(file_name, 'rb') as f:\n s3.upload_fileobj(f, 'capstone12', str(node_number) + '/' +\n file_name, ExtraArgs={'ACL': 'public-read-write'})\n print('File Upload Complete to ' + str(node_number) + '/' + file_name)\n", "step-3": "import boto3\nimport json\nfrom botocore.exceptions import ClientError\n\n\ndef upload_to_s3(file_name, node_number):\n try:\n key_info_json = open('awsinfo.json').read()\n except FileNotFoundError:\n print('awsinfo.json is not exist in dir.')\n exit(-1)\n data = json.loads(key_info_json)\n s3 = boto3.client('s3', aws_access_key_id=data['accessKeyId'],\n aws_secret_access_key=data['secretAccessKey'])\n with open(file_name, 'rb') as f:\n s3.upload_fileobj(f, 'capstone12', str(node_number) + '/' +\n file_name, ExtraArgs={'ACL': 'public-read-write'})\n print('File Upload Complete to ' + str(node_number) + '/' + file_name)\n", "step-4": "import boto3\r\nimport json\r\nfrom botocore.exceptions import ClientError\r\n\r\n# upload_to_s3(\"abc.png\", 1)\r\ndef upload_to_s3(file_name, node_number):\r\n try:\r\n key_info_json = open(\"awsinfo.json\").read()\r\n except FileNotFoundError:\r\n print(\"awsinfo.json is not exist in dir.\")\r\n exit(-1)\r\n\r\n data=json.loads(key_info_json)\r\n\r\n s3 = boto3.client(\r\n 's3',\r\n aws_access_key_id = data['accessKeyId'],\r\n aws_secret_access_key = data['secretAccessKey']\r\n )\r\n\r\n with open(file_name, \"rb\") as f:\r\n s3.upload_fileobj(f,\"capstone12\", str(node_number)+\"/\"+file_name,\r\n ExtraArgs={'ACL' : 'public-read-write'}\r\n )\r\n print(\"File Upload Complete to \" + str(node_number) + \"/\" + file_name)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from typing import Sequence import matplotlib.pyplot as plt import matplotlib.colors as colors import numpy as np def plot3D(X, Y, Z, proporcao=1, espelharZ = False): fig = plt.figure() ax = fig.gca(projection='3d') ax.set_xlabel('X ') ax.set_ylabel('Y ') ax.set_zlabel('Z ') np.floor colortuple = (colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88')) colorsArray = np.empty([len(X), len(Y)], dtype=tuple) for y in range(len(Y)): for x in range(len(X)): colorsArray[x, y] = colortuple[int( np.ceil(x/proporcao) + np.ceil(y/proporcao)) % len(colortuple)] surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0) if(espelharZ): surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0) #surf = ax.plot_wireframe(X, Y, Z, linewidth=1) #plt.show() def limitZ(Z, limit = 10): for i in range(len(Z)): for j in range(len(Z[i])): if(Z[i][j]>limit): Z[i][j] = np.inf if(Z[i][j]<-limit): Z[i][j] = -np.inf def plotPontos3D(X,Y,Z): fig = plt.figure() ax = fig.add_subplot(projection='3d') ax.scatter(X, Y, Z, marker='o') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') plt.show() def curvaNivel(X,Y,Z,levels): fig = plt.figure() ax = fig.add_subplot() curva = ax.contourf(X,Y,Z,levels) ax.set_xlabel('X') ax.set_ylabel('Y') #curva.cmap.set_under('white') #curva.cmap.set_over('cyan') fig.colorbar(curva) plt.show()
normal
{ "blob_id": "ff20b65f35614415ad786602c0fc2cabd08124fb", "index": 4065, "step-1": "<mask token>\n\n\ndef limitZ(Z, limit=10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if Z[i][j] > limit:\n Z[i][j] = np.inf\n if Z[i][j] < -limit:\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X, Y, Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef plot3D(X, Y, Z, proporcao=1, espelharZ=False):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.set_xlabel('X ')\n ax.set_ylabel('Y ')\n ax.set_zlabel('Z ')\n np.floor\n colortuple = colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88')\n colorsArray = np.empty([len(X), len(Y)], dtype=tuple)\n for y in range(len(Y)):\n for x in range(len(X)):\n colorsArray[x, y] = colortuple[int(np.ceil(x / proporcao) + np.\n ceil(y / proporcao)) % len(colortuple)]\n surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)\n if espelharZ:\n surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)\n\n\ndef limitZ(Z, limit=10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if Z[i][j] > limit:\n Z[i][j] = np.inf\n if Z[i][j] < -limit:\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X, Y, Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef plot3D(X, Y, Z, proporcao=1, espelharZ=False):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.set_xlabel('X ')\n ax.set_ylabel('Y ')\n ax.set_zlabel('Z ')\n np.floor\n colortuple = colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88')\n colorsArray = np.empty([len(X), len(Y)], dtype=tuple)\n for y in range(len(Y)):\n for x in range(len(X)):\n colorsArray[x, y] = colortuple[int(np.ceil(x / proporcao) + np.\n ceil(y / proporcao)) % len(colortuple)]\n surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)\n if espelharZ:\n surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)\n\n\ndef limitZ(Z, limit=10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if Z[i][j] > limit:\n Z[i][j] = np.inf\n if Z[i][j] < -limit:\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X, Y, Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\ndef curvaNivel(X, Y, Z, levels):\n fig = plt.figure()\n ax = fig.add_subplot()\n curva = ax.contourf(X, Y, Z, levels)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig.colorbar(curva)\n plt.show()\n", "step-4": "from typing import Sequence\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport numpy as np\n\n\ndef plot3D(X, Y, Z, proporcao=1, espelharZ=False):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.set_xlabel('X ')\n ax.set_ylabel('Y ')\n ax.set_zlabel('Z ')\n np.floor\n colortuple = colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88')\n colorsArray = np.empty([len(X), len(Y)], dtype=tuple)\n for y in range(len(Y)):\n for x in range(len(X)):\n colorsArray[x, y] = colortuple[int(np.ceil(x / proporcao) + np.\n ceil(y / proporcao)) % len(colortuple)]\n surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)\n if espelharZ:\n surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)\n\n\ndef limitZ(Z, limit=10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if Z[i][j] > limit:\n Z[i][j] = np.inf\n if Z[i][j] < -limit:\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X, Y, Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\ndef curvaNivel(X, Y, Z, levels):\n fig = plt.figure()\n ax = fig.add_subplot()\n curva = ax.contourf(X, Y, Z, levels)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig.colorbar(curva)\n plt.show()\n", "step-5": "from typing import Sequence\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport numpy as np\n\n\ndef plot3D(X, Y, Z, proporcao=1, espelharZ = False):\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n ax.set_xlabel('X ')\n ax.set_ylabel('Y ')\n ax.set_zlabel('Z ')\n np.floor\n colortuple = (colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88'))\n colorsArray = np.empty([len(X), len(Y)], dtype=tuple)\n for y in range(len(Y)):\n for x in range(len(X)):\n colorsArray[x, y] = colortuple[int(\n np.ceil(x/proporcao) + np.ceil(y/proporcao)) % len(colortuple)]\n\n surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)\n if(espelharZ):\n surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)\n #surf = ax.plot_wireframe(X, Y, Z, linewidth=1)\n\n #plt.show()\n\ndef limitZ(Z, limit = 10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if(Z[i][j]>limit):\n Z[i][j] = np.inf\n if(Z[i][j]<-limit):\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X,Y,Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n\n plt.show()\n\n\ndef curvaNivel(X,Y,Z,levels):\n fig = plt.figure()\n ax = fig.add_subplot()\n curva = ax.contourf(X,Y,Z,levels)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n #curva.cmap.set_under('white')\n #curva.cmap.set_over('cyan')\n fig.colorbar(curva)\n plt.show()\n\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> def save_df_surnames_as_pickle(): df_surnames, df_categories = load_surnames() df = shuffle(df_surnames, random_state=sc.RANDOM_STATE) train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO) train = df[0:train_cnt] test = df[train_cnt + 1:] df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2') df_categories.to_pickle('data/pickles/df_categories.pickle', compression='bz2') train.to_pickle('data/pickles/train.pickle', compression='bz2') test.to_pickle('data/pickles/test.pickle', compression='bz2') t1 = train.groupby(['category']).count().drop(['normalized'], axis=1) t2 = test.groupby(['category']).count().drop(['normalized'], axis=1) t1.columns = ['surname_train'] t2.columns = ['surname_test'] tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True)) tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[ 'surname_test']) tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2') return tt <|reserved_special_token_1|> <|reserved_special_token_0|> def unicode_to_ascii(s): return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS) <|reserved_special_token_0|> def save_df_surnames_as_pickle(): df_surnames, df_categories = load_surnames() df = shuffle(df_surnames, random_state=sc.RANDOM_STATE) train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO) train = df[0:train_cnt] test = df[train_cnt + 1:] df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2') df_categories.to_pickle('data/pickles/df_categories.pickle', compression='bz2') train.to_pickle('data/pickles/train.pickle', compression='bz2') test.to_pickle('data/pickles/test.pickle', compression='bz2') t1 = train.groupby(['category']).count().drop(['normalized'], axis=1) t2 = test.groupby(['category']).count().drop(['normalized'], axis=1) t1.columns = ['surname_train'] t2.columns = ['surname_test'] tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True)) tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[ 'surname_test']) tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2') return tt <|reserved_special_token_1|> <|reserved_special_token_0|> def unicode_to_ascii(s): return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS) def load_surnames(): df_surnames = pd.DataFrame() list_ = [] for filename in glob.glob('data/names/*.txt'): m = re.match('(.*)\\/(.*?)\\.txt', filename) category = m.group(2) df = pd.read_csv(filename, names=['surname']) df['category'] = category list_.append(df) df_surnames = pd.concat(list_) df_surnames['normalized'] = df_surnames['surname'].apply(lambda x: unicode_to_ascii(x)) series_categories = df_surnames.groupby(['category'])['category'].count() df_categories = pd.DataFrame({'category': series_categories.index, 'freq': series_categories.tolist(), 'index': range(0, len( series_categories))}) return df_surnames, df_categories def save_df_surnames_as_pickle(): df_surnames, df_categories = load_surnames() df = shuffle(df_surnames, random_state=sc.RANDOM_STATE) train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO) train = df[0:train_cnt] test = df[train_cnt + 1:] df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2') df_categories.to_pickle('data/pickles/df_categories.pickle', compression='bz2') train.to_pickle('data/pickles/train.pickle', compression='bz2') test.to_pickle('data/pickles/test.pickle', compression='bz2') t1 = train.groupby(['category']).count().drop(['normalized'], axis=1) t2 = test.groupby(['category']).count().drop(['normalized'], axis=1) t1.columns = ['surname_train'] t2.columns = ['surname_test'] tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True)) tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[ 'surname_test']) tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2') return tt <|reserved_special_token_1|> import surname_common as sc from sklearn.utils import shuffle import glob import os import re import pprint import pandas as pd import unicodedata import string def unicode_to_ascii(s): return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS) def load_surnames(): df_surnames = pd.DataFrame() list_ = [] for filename in glob.glob('data/names/*.txt'): m = re.match('(.*)\\/(.*?)\\.txt', filename) category = m.group(2) df = pd.read_csv(filename, names=['surname']) df['category'] = category list_.append(df) df_surnames = pd.concat(list_) df_surnames['normalized'] = df_surnames['surname'].apply(lambda x: unicode_to_ascii(x)) series_categories = df_surnames.groupby(['category'])['category'].count() df_categories = pd.DataFrame({'category': series_categories.index, 'freq': series_categories.tolist(), 'index': range(0, len( series_categories))}) return df_surnames, df_categories def save_df_surnames_as_pickle(): df_surnames, df_categories = load_surnames() df = shuffle(df_surnames, random_state=sc.RANDOM_STATE) train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO) train = df[0:train_cnt] test = df[train_cnt + 1:] df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2') df_categories.to_pickle('data/pickles/df_categories.pickle', compression='bz2') train.to_pickle('data/pickles/train.pickle', compression='bz2') test.to_pickle('data/pickles/test.pickle', compression='bz2') t1 = train.groupby(['category']).count().drop(['normalized'], axis=1) t2 = test.groupby(['category']).count().drop(['normalized'], axis=1) t1.columns = ['surname_train'] t2.columns = ['surname_test'] tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True)) tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[ 'surname_test']) tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2') return tt <|reserved_special_token_1|> import surname_common as sc from sklearn.utils import shuffle import glob import os import re import pprint import pandas as pd import unicodedata import string def unicode_to_ascii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS ) def load_surnames(): df_surnames = pd.DataFrame() list_ = [] for filename in glob.glob('data/names/*.txt'): m = re.match(r'(.*)\/(.*?)\.txt', filename) category = m.group(2) df = pd.read_csv(filename,names=['surname']) df['category'] = category list_.append(df) df_surnames = pd.concat(list_) df_surnames['normalized'] = df_surnames['surname'].apply(lambda x: unicode_to_ascii(x)) series_categories = df_surnames.groupby(['category'])['category'].count() df_categories = pd.DataFrame({ 'category':series_categories.index, 'freq':series_categories.tolist(), 'index':range(0,len(series_categories)) }) return df_surnames, df_categories def save_df_surnames_as_pickle(): df_surnames, df_categories = load_surnames() # train test split df = shuffle(df_surnames, random_state=sc.RANDOM_STATE) train_cnt = int(df['surname'].count()*sc.TRAIN_TEST_RATIO) train = df[0:train_cnt] test = df[train_cnt+1:] # save as pickle df_surnames.to_pickle('data/pickles/df_surnames.pickle',compression='bz2') df_categories.to_pickle('data/pickles/df_categories.pickle',compression='bz2') train.to_pickle('data/pickles/train.pickle',compression='bz2') test.to_pickle('data/pickles/test.pickle',compression='bz2') # train test stat t1 = train.groupby(['category']).count().drop(['normalized'],axis=1) t2 = test.groupby(['category']).count().drop(['normalized'],axis=1) t1.columns = ['surname_train'] t2.columns = ['surname_test'] tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True)) tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt['surname_test']) tt.to_pickle('data/pickles/train_test_stat.pickle',compression='bz2') return tt
flexible
{ "blob_id": "db46fbfb1acd855eebb5c9f557d70038b84e812d", "index": 8573, "step-1": "<mask token>\n\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt + 1:]\n df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',\n compression='bz2')\n train.to_pickle('data/pickles/train.pickle', compression='bz2')\n test.to_pickle('data/pickles/test.pickle', compression='bz2')\n t1 = train.groupby(['category']).count().drop(['normalized'], axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'], axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[\n 'surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2')\n return tt\n", "step-2": "<mask token>\n\n\ndef unicode_to_ascii(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s) if \n unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS)\n\n\n<mask token>\n\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt + 1:]\n df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',\n compression='bz2')\n train.to_pickle('data/pickles/train.pickle', compression='bz2')\n test.to_pickle('data/pickles/test.pickle', compression='bz2')\n t1 = train.groupby(['category']).count().drop(['normalized'], axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'], axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[\n 'surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2')\n return tt\n", "step-3": "<mask token>\n\n\ndef unicode_to_ascii(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s) if \n unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS)\n\n\ndef load_surnames():\n df_surnames = pd.DataFrame()\n list_ = []\n for filename in glob.glob('data/names/*.txt'):\n m = re.match('(.*)\\\\/(.*?)\\\\.txt', filename)\n category = m.group(2)\n df = pd.read_csv(filename, names=['surname'])\n df['category'] = category\n list_.append(df)\n df_surnames = pd.concat(list_)\n df_surnames['normalized'] = df_surnames['surname'].apply(lambda x:\n unicode_to_ascii(x))\n series_categories = df_surnames.groupby(['category'])['category'].count()\n df_categories = pd.DataFrame({'category': series_categories.index,\n 'freq': series_categories.tolist(), 'index': range(0, len(\n series_categories))})\n return df_surnames, df_categories\n\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt + 1:]\n df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',\n compression='bz2')\n train.to_pickle('data/pickles/train.pickle', compression='bz2')\n test.to_pickle('data/pickles/test.pickle', compression='bz2')\n t1 = train.groupby(['category']).count().drop(['normalized'], axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'], axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[\n 'surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2')\n return tt\n", "step-4": "import surname_common as sc\nfrom sklearn.utils import shuffle\nimport glob\nimport os\nimport re\nimport pprint\nimport pandas as pd\nimport unicodedata\nimport string\n\n\ndef unicode_to_ascii(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s) if \n unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS)\n\n\ndef load_surnames():\n df_surnames = pd.DataFrame()\n list_ = []\n for filename in glob.glob('data/names/*.txt'):\n m = re.match('(.*)\\\\/(.*?)\\\\.txt', filename)\n category = m.group(2)\n df = pd.read_csv(filename, names=['surname'])\n df['category'] = category\n list_.append(df)\n df_surnames = pd.concat(list_)\n df_surnames['normalized'] = df_surnames['surname'].apply(lambda x:\n unicode_to_ascii(x))\n series_categories = df_surnames.groupby(['category'])['category'].count()\n df_categories = pd.DataFrame({'category': series_categories.index,\n 'freq': series_categories.tolist(), 'index': range(0, len(\n series_categories))})\n return df_surnames, df_categories\n\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt + 1:]\n df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',\n compression='bz2')\n train.to_pickle('data/pickles/train.pickle', compression='bz2')\n test.to_pickle('data/pickles/test.pickle', compression='bz2')\n t1 = train.groupby(['category']).count().drop(['normalized'], axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'], axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[\n 'surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2')\n return tt\n", "step-5": "import surname_common as sc\nfrom sklearn.utils import shuffle\nimport glob\nimport os\nimport re\nimport pprint\nimport pandas as pd\nimport unicodedata\nimport string\n\n\ndef unicode_to_ascii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS\n )\n\ndef load_surnames():\n df_surnames = pd.DataFrame()\n list_ = []\n\n for filename in glob.glob('data/names/*.txt'):\n m = re.match(r'(.*)\\/(.*?)\\.txt', filename)\n category = m.group(2)\n df = pd.read_csv(filename,names=['surname'])\n df['category'] = category\n list_.append(df)\n df_surnames = pd.concat(list_) \n df_surnames['normalized'] = df_surnames['surname'].apply(lambda x: unicode_to_ascii(x))\n \n series_categories = df_surnames.groupby(['category'])['category'].count()\n df_categories = pd.DataFrame({\n 'category':series_categories.index, \n 'freq':series_categories.tolist(), \n 'index':range(0,len(series_categories))\n })\n \n return df_surnames, df_categories\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n # train test split\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count()*sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt+1:]\n # save as pickle\n df_surnames.to_pickle('data/pickles/df_surnames.pickle',compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',compression='bz2')\n train.to_pickle('data/pickles/train.pickle',compression='bz2')\n test.to_pickle('data/pickles/test.pickle',compression='bz2')\n # train test stat \n t1 = train.groupby(['category']).count().drop(['normalized'],axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'],axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt['surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle',compression='bz2')\n return tt", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> with open('txt.txt', 'r') as f: data = f.readlines() line = 0 for i in range(10, 110, 10): agg = 0 for j in range(num_tests): agg += int(data[line]) line += 1 res.append(agg / num_tests) <|reserved_special_token_0|> plt.plot(x, y, 'o') plt.plot(x, p(x), label='Best fit 2 degree polynomial') plt.title('#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)') plt.xlabel('Number of nodes in fully connected graph') plt.ylabel('Number of messages') plt.legend() plt.savefig('Messages.svg') plt.clf() <|reserved_special_token_0|> with open('txt2.txt', 'r') as f: data = f.readlines() line = 0 for procs in range(1, 13): times = [] for i in range(10, 110, 10): temp = 0 for num in range(num_tests): temp += float(data[line].split()[1]) line += 3 times.append(temp / num_tests) res.append(times) <|reserved_special_token_0|> plt.title('Time taken vs. number of cores used (Averaged over 10 runs)') plt.xlabel('Number of nodes in fully connected graph') plt.ylabel('Time taken (in seconds)') for procs in [1, 2, 4, 8, 12]: plt.plot(x, res[procs - 1], label=str(procs) + ' Cores') plt.legend() plt.savefig('Time.svg') <|reserved_special_token_1|> <|reserved_special_token_0|> steps = 10 num_tests = 100 res = [] with open('txt.txt', 'r') as f: data = f.readlines() line = 0 for i in range(10, 110, 10): agg = 0 for j in range(num_tests): agg += int(data[line]) line += 1 res.append(agg / num_tests) x = list(range(10, 110, steps)) y = res z = np.polyfit(x, res, 2) p = np.poly1d(z) plt.plot(x, y, 'o') plt.plot(x, p(x), label='Best fit 2 degree polynomial') plt.title('#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)') plt.xlabel('Number of nodes in fully connected graph') plt.ylabel('Number of messages') plt.legend() plt.savefig('Messages.svg') plt.clf() steps = 10 num_tests = 10 res = [] with open('txt2.txt', 'r') as f: data = f.readlines() line = 0 for procs in range(1, 13): times = [] for i in range(10, 110, 10): temp = 0 for num in range(num_tests): temp += float(data[line].split()[1]) line += 3 times.append(temp / num_tests) res.append(times) x = list(range(10, 110, steps)) y = res plt.title('Time taken vs. number of cores used (Averaged over 10 runs)') plt.xlabel('Number of nodes in fully connected graph') plt.ylabel('Time taken (in seconds)') for procs in [1, 2, 4, 8, 12]: plt.plot(x, res[procs - 1], label=str(procs) + ' Cores') plt.legend() plt.savefig('Time.svg') <|reserved_special_token_1|> import matplotlib.pyplot as plt import numpy as np steps = 10 num_tests = 100 res = [] with open('txt.txt', 'r') as f: data = f.readlines() line = 0 for i in range(10, 110, 10): agg = 0 for j in range(num_tests): agg += int(data[line]) line += 1 res.append(agg / num_tests) x = list(range(10, 110, steps)) y = res z = np.polyfit(x, res, 2) p = np.poly1d(z) plt.plot(x, y, 'o') plt.plot(x, p(x), label='Best fit 2 degree polynomial') plt.title('#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)') plt.xlabel('Number of nodes in fully connected graph') plt.ylabel('Number of messages') plt.legend() plt.savefig('Messages.svg') plt.clf() steps = 10 num_tests = 10 res = [] with open('txt2.txt', 'r') as f: data = f.readlines() line = 0 for procs in range(1, 13): times = [] for i in range(10, 110, 10): temp = 0 for num in range(num_tests): temp += float(data[line].split()[1]) line += 3 times.append(temp / num_tests) res.append(times) x = list(range(10, 110, steps)) y = res plt.title('Time taken vs. number of cores used (Averaged over 10 runs)') plt.xlabel('Number of nodes in fully connected graph') plt.ylabel('Time taken (in seconds)') for procs in [1, 2, 4, 8, 12]: plt.plot(x, res[procs - 1], label=str(procs) + ' Cores') plt.legend() plt.savefig('Time.svg') <|reserved_special_token_1|> import matplotlib.pyplot as plt import numpy as np steps = 10 num_tests = 100 res = [] with open('txt.txt', 'r') as f: data = f.readlines() line = 0 for i in range(10, 110, 10): agg = 0 for j in range(num_tests): agg += int(data[line]) line += 1 res.append(agg/num_tests) x = list(range(10, 110, steps)) y = res z = np.polyfit(x, res, 2) # print(z) p = np.poly1d(z) plt.plot(x, y, 'o') plt.plot(x, p(x),label = "Best fit 2 degree polynomial") plt.title("#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)") plt.xlabel("Number of nodes in fully connected graph") plt.ylabel("Number of messages") plt.legend() # plt.show() plt.savefig("Messages.svg") plt.clf() steps = 10 num_tests = 10 res = [] with open('txt2.txt', 'r') as f: data = f.readlines() line = 0 for procs in range(1,13): times = [] for i in range(10, 110, 10): temp = 0 for num in range(num_tests): temp += float(data[line].split()[1]) line += 3 times.append(temp/num_tests) res.append(times) x = list(range(10, 110, steps)) y = res # z = np.polyfit(x, res, 2) # print(z) # p = np.poly1d(z) # plt.plot(x, y, 'o') # plt.plot(x, p(x),label = "Best fit 2 degree polynomial") plt.title("Time taken vs. number of cores used (Averaged over 10 runs)") plt.xlabel("Number of nodes in fully connected graph") plt.ylabel("Time taken (in seconds)") # for procs in range(1,13): for procs in [1,2,4,8,12]: plt.plot(x,res[procs-1],label = str((procs))+' Cores') plt.legend() # plt.show() plt.savefig("Time.svg")
flexible
{ "blob_id": "176ffac7ad47f5c43a24acc664631f8353ec5100", "index": 967, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('txt.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for i in range(10, 110, 10):\n agg = 0\n for j in range(num_tests):\n agg += int(data[line])\n line += 1\n res.append(agg / num_tests)\n<mask token>\nplt.plot(x, y, 'o')\nplt.plot(x, p(x), label='Best fit 2 degree polynomial')\nplt.title('#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Number of messages')\nplt.legend()\nplt.savefig('Messages.svg')\nplt.clf()\n<mask token>\nwith open('txt2.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for procs in range(1, 13):\n times = []\n for i in range(10, 110, 10):\n temp = 0\n for num in range(num_tests):\n temp += float(data[line].split()[1])\n line += 3\n times.append(temp / num_tests)\n res.append(times)\n<mask token>\nplt.title('Time taken vs. number of cores used (Averaged over 10 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Time taken (in seconds)')\nfor procs in [1, 2, 4, 8, 12]:\n plt.plot(x, res[procs - 1], label=str(procs) + ' Cores')\nplt.legend()\nplt.savefig('Time.svg')\n", "step-3": "<mask token>\nsteps = 10\nnum_tests = 100\nres = []\nwith open('txt.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for i in range(10, 110, 10):\n agg = 0\n for j in range(num_tests):\n agg += int(data[line])\n line += 1\n res.append(agg / num_tests)\nx = list(range(10, 110, steps))\ny = res\nz = np.polyfit(x, res, 2)\np = np.poly1d(z)\nplt.plot(x, y, 'o')\nplt.plot(x, p(x), label='Best fit 2 degree polynomial')\nplt.title('#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Number of messages')\nplt.legend()\nplt.savefig('Messages.svg')\nplt.clf()\nsteps = 10\nnum_tests = 10\nres = []\nwith open('txt2.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for procs in range(1, 13):\n times = []\n for i in range(10, 110, 10):\n temp = 0\n for num in range(num_tests):\n temp += float(data[line].split()[1])\n line += 3\n times.append(temp / num_tests)\n res.append(times)\nx = list(range(10, 110, steps))\ny = res\nplt.title('Time taken vs. number of cores used (Averaged over 10 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Time taken (in seconds)')\nfor procs in [1, 2, 4, 8, 12]:\n plt.plot(x, res[procs - 1], label=str(procs) + ' Cores')\nplt.legend()\nplt.savefig('Time.svg')\n", "step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nsteps = 10\nnum_tests = 100\nres = []\nwith open('txt.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for i in range(10, 110, 10):\n agg = 0\n for j in range(num_tests):\n agg += int(data[line])\n line += 1\n res.append(agg / num_tests)\nx = list(range(10, 110, steps))\ny = res\nz = np.polyfit(x, res, 2)\np = np.poly1d(z)\nplt.plot(x, y, 'o')\nplt.plot(x, p(x), label='Best fit 2 degree polynomial')\nplt.title('#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Number of messages')\nplt.legend()\nplt.savefig('Messages.svg')\nplt.clf()\nsteps = 10\nnum_tests = 10\nres = []\nwith open('txt2.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for procs in range(1, 13):\n times = []\n for i in range(10, 110, 10):\n temp = 0\n for num in range(num_tests):\n temp += float(data[line].split()[1])\n line += 3\n times.append(temp / num_tests)\n res.append(times)\nx = list(range(10, 110, steps))\ny = res\nplt.title('Time taken vs. number of cores used (Averaged over 10 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Time taken (in seconds)')\nfor procs in [1, 2, 4, 8, 12]:\n plt.plot(x, res[procs - 1], label=str(procs) + ' Cores')\nplt.legend()\nplt.savefig('Time.svg')\n", "step-5": "import matplotlib.pyplot as plt\nimport numpy as np\n\nsteps = 10\nnum_tests = 100\n\nres = []\n\nwith open('txt.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for i in range(10, 110, 10):\n agg = 0\n for j in range(num_tests):\n agg += int(data[line])\n line += 1\n res.append(agg/num_tests)\n\nx = list(range(10, 110, steps))\ny = res\n\nz = np.polyfit(x, res, 2)\n# print(z)\np = np.poly1d(z)\nplt.plot(x, y, 'o')\nplt.plot(x, p(x),label = \"Best fit 2 degree polynomial\")\n\nplt.title(\"#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)\")\nplt.xlabel(\"Number of nodes in fully connected graph\")\nplt.ylabel(\"Number of messages\")\n\nplt.legend()\n# plt.show()\n\nplt.savefig(\"Messages.svg\")\n\nplt.clf()\nsteps = 10\nnum_tests = 10\n\nres = []\n\nwith open('txt2.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for procs in range(1,13):\n times = []\n for i in range(10, 110, 10):\n temp = 0\n for num in range(num_tests):\n temp += float(data[line].split()[1])\n line += 3\n times.append(temp/num_tests)\n res.append(times)\n\nx = list(range(10, 110, steps))\ny = res\n\n# z = np.polyfit(x, res, 2)\n# print(z)\n# p = np.poly1d(z)\n# plt.plot(x, y, 'o')\n# plt.plot(x, p(x),label = \"Best fit 2 degree polynomial\")\n\nplt.title(\"Time taken vs. number of cores used (Averaged over 10 runs)\")\nplt.xlabel(\"Number of nodes in fully connected graph\")\nplt.ylabel(\"Time taken (in seconds)\")\n\n# for procs in range(1,13):\nfor procs in [1,2,4,8,12]:\n plt.plot(x,res[procs-1],label = str((procs))+' Cores')\n\nplt.legend()\n# plt.show()\n\nplt.savefig(\"Time.svg\")\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> marks = {'S': 'subject', 'O': 'object', 'A': 'attribute', 'C': 'clause'} marks_reverse = {'subject': 'S', 'object': 'O', 'attribute': 'A', 'clause': 'C' } <|reserved_special_token_1|> marks = { "S":"subject", "O":"object", "A":"attribute", "C":"clause", } marks_reverse = { "subject":"S", "object":"O", "attribute":"A", "clause":"C", }
flexible
{ "blob_id": "c66b07c45f4a675a6c7fcec82048a3197910d0d8", "index": 3435, "step-1": "<mask token>\n", "step-2": "marks = {'S': 'subject', 'O': 'object', 'A': 'attribute', 'C': 'clause'}\nmarks_reverse = {'subject': 'S', 'object': 'O', 'attribute': 'A', 'clause': 'C'\n }\n", "step-3": "marks = {\n \"S\":\"subject\",\n \"O\":\"object\",\n \"A\":\"attribute\",\n \"C\":\"clause\",\n}\nmarks_reverse = {\n \"subject\":\"S\",\n \"object\":\"O\",\n \"attribute\":\"A\",\n \"clause\":\"C\",\n}\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> class Profile(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Profile(models.Model): <|reserved_special_token_0|> def __str__(self): return f'{self.user.username} Profile' <|reserved_special_token_1|> <|reserved_special_token_0|> class Profile(models.Model): user = models.OneToOneField(User, on_delete=CASCADE) def __str__(self): return f'{self.user.username} Profile' <|reserved_special_token_1|> from django.db import models from django.contrib.auth.models import User from django.db.models.deletion import CASCADE class Profile(models.Model): user = models.OneToOneField(User, on_delete=CASCADE) def __str__(self): return f'{self.user.username} Profile' <|reserved_special_token_1|> from django.db import models from django.contrib.auth.models import User from django.db.models.deletion import CASCADE class Profile(models.Model): user = models.OneToOneField(User, on_delete=CASCADE) # portfolio = models.ManyToOneRel(User, on_delete=) def __str__(self): return f"{self.user.username} Profile"
flexible
{ "blob_id": "51ff1181f0ddac3a8f7cbd9f9d2eedae29a6c559", "index": 6654, "step-1": "<mask token>\n\n\nclass Profile(models.Model):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Profile(models.Model):\n <mask token>\n\n def __str__(self):\n return f'{self.user.username} Profile'\n", "step-3": "<mask token>\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=CASCADE)\n\n def __str__(self):\n return f'{self.user.username} Profile'\n", "step-4": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.deletion import CASCADE\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=CASCADE)\n\n def __str__(self):\n return f'{self.user.username} Profile'\n", "step-5": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.deletion import CASCADE\n\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=CASCADE)\n # portfolio = models.ManyToOneRel(User, on_delete=)\n\n def __str__(self):\n return f\"{self.user.username} Profile\"", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2011 Eficent (<http://www.eficent.com/>) # Jordi Ballester Alomar <jordi.ballester@eficent.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import tools from osv import fields, osv from tools.translate import _ from datetime import datetime, timedelta date_format = '%Y-%m-%d' class tax(osv.Model): _inherit = 'sgr.tax' def send_alerts(self, cr, uid, context=None): self.send_alerts_with_upcoming_days(cr, uid, 2, context=context) def send_alerts_with_upcoming_days(self, cr, uid, upcoming_days, context=None): now = datetime.now() now_plus_upcoming_days = now + timedelta(days=upcoming_days) tax_to_paid_ids = self.search(cr, uid, [('state','=','to_pay')], context=context) tax_due_date_soon = [] taxs_due = [] overdue_taxs = [] for tax in self.browse(cr, uid, tax_to_paid_ids, context=context): if not tax.approval_date: continue approval_date = datetime.strptime(tax.approval_date, date_format) if approval_date <= now: overdue_taxs.append(tax) elif now < approval_date and approval_date <= now_plus_upcoming_days: taxs_due.append(tax) for tax in taxs_due: self.message_post(cr, uid, [tax.id], body="Tax payment deadline soon", subtype="sgr_alerts.mt_tax_due_date_soon", context=context) for tax in overdue_taxs: self.message_post(cr, uid, [tax.id], body="Tax payment deadline expired", subtype="sgr_alerts.mt_tax_due_date", context=context) #all_tax_ids = self.search(cr, uid, [], context=context) #for tax in self.browse(cr, uid, all_tax_ids, context=context): # print 'tax: ' + str(tax.id) # self.message_post(cr, uid, [tax.id], body="Due Date Soon", subtype="sgr_alerts.mt_tax_due_date_soon", context=context) return True tax()
normal
{ "blob_id": "1ddec426e4ad50f1d0e8a57ed841fbdf8c51b00f", "index": 9871, "step-1": "<mask token>\n\n\nclass tax(osv.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass tax(osv.Model):\n _inherit = 'sgr.tax'\n\n def send_alerts(self, cr, uid, context=None):\n self.send_alerts_with_upcoming_days(cr, uid, 2, context=context)\n\n def send_alerts_with_upcoming_days(self, cr, uid, upcoming_days,\n context=None):\n now = datetime.now()\n now_plus_upcoming_days = now + timedelta(days=upcoming_days)\n tax_to_paid_ids = self.search(cr, uid, [('state', '=', 'to_pay')],\n context=context)\n tax_due_date_soon = []\n taxs_due = []\n overdue_taxs = []\n for tax in self.browse(cr, uid, tax_to_paid_ids, context=context):\n if not tax.approval_date:\n continue\n approval_date = datetime.strptime(tax.approval_date, date_format)\n if approval_date <= now:\n overdue_taxs.append(tax)\n elif now < approval_date and approval_date <= now_plus_upcoming_days:\n taxs_due.append(tax)\n for tax in taxs_due:\n self.message_post(cr, uid, [tax.id], body=\n 'Tax payment deadline soon', subtype=\n 'sgr_alerts.mt_tax_due_date_soon', context=context)\n for tax in overdue_taxs:\n self.message_post(cr, uid, [tax.id], body=\n 'Tax payment deadline expired', subtype=\n 'sgr_alerts.mt_tax_due_date', context=context)\n return True\n\n\ntax()\n", "step-3": "<mask token>\ndate_format = '%Y-%m-%d'\n\n\nclass tax(osv.Model):\n _inherit = 'sgr.tax'\n\n def send_alerts(self, cr, uid, context=None):\n self.send_alerts_with_upcoming_days(cr, uid, 2, context=context)\n\n def send_alerts_with_upcoming_days(self, cr, uid, upcoming_days,\n context=None):\n now = datetime.now()\n now_plus_upcoming_days = now + timedelta(days=upcoming_days)\n tax_to_paid_ids = self.search(cr, uid, [('state', '=', 'to_pay')],\n context=context)\n tax_due_date_soon = []\n taxs_due = []\n overdue_taxs = []\n for tax in self.browse(cr, uid, tax_to_paid_ids, context=context):\n if not tax.approval_date:\n continue\n approval_date = datetime.strptime(tax.approval_date, date_format)\n if approval_date <= now:\n overdue_taxs.append(tax)\n elif now < approval_date and approval_date <= now_plus_upcoming_days:\n taxs_due.append(tax)\n for tax in taxs_due:\n self.message_post(cr, uid, [tax.id], body=\n 'Tax payment deadline soon', subtype=\n 'sgr_alerts.mt_tax_due_date_soon', context=context)\n for tax in overdue_taxs:\n self.message_post(cr, uid, [tax.id], body=\n 'Tax payment deadline expired', subtype=\n 'sgr_alerts.mt_tax_due_date', context=context)\n return True\n\n\ntax()\n", "step-4": "import tools\nfrom osv import fields, osv\nfrom tools.translate import _\nfrom datetime import datetime, timedelta\ndate_format = '%Y-%m-%d'\n\n\nclass tax(osv.Model):\n _inherit = 'sgr.tax'\n\n def send_alerts(self, cr, uid, context=None):\n self.send_alerts_with_upcoming_days(cr, uid, 2, context=context)\n\n def send_alerts_with_upcoming_days(self, cr, uid, upcoming_days,\n context=None):\n now = datetime.now()\n now_plus_upcoming_days = now + timedelta(days=upcoming_days)\n tax_to_paid_ids = self.search(cr, uid, [('state', '=', 'to_pay')],\n context=context)\n tax_due_date_soon = []\n taxs_due = []\n overdue_taxs = []\n for tax in self.browse(cr, uid, tax_to_paid_ids, context=context):\n if not tax.approval_date:\n continue\n approval_date = datetime.strptime(tax.approval_date, date_format)\n if approval_date <= now:\n overdue_taxs.append(tax)\n elif now < approval_date and approval_date <= now_plus_upcoming_days:\n taxs_due.append(tax)\n for tax in taxs_due:\n self.message_post(cr, uid, [tax.id], body=\n 'Tax payment deadline soon', subtype=\n 'sgr_alerts.mt_tax_due_date_soon', context=context)\n for tax in overdue_taxs:\n self.message_post(cr, uid, [tax.id], body=\n 'Tax payment deadline expired', subtype=\n 'sgr_alerts.mt_tax_due_date', context=context)\n return True\n\n\ntax()\n", "step-5": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2011 Eficent (<http://www.eficent.com/>)\n# Jordi Ballester Alomar <jordi.ballester@eficent.com>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n\nimport tools\nfrom osv import fields, osv\nfrom tools.translate import _\n\nfrom datetime import datetime, timedelta\n\ndate_format = '%Y-%m-%d'\n\nclass tax(osv.Model):\n _inherit = 'sgr.tax'\n \n def send_alerts(self, cr, uid, context=None):\n self.send_alerts_with_upcoming_days(cr, uid, 2, context=context)\n \n def send_alerts_with_upcoming_days(self, cr, uid, upcoming_days, context=None):\n now = datetime.now()\n now_plus_upcoming_days = now + timedelta(days=upcoming_days)\n \n tax_to_paid_ids = self.search(cr, uid, [('state','=','to_pay')], context=context)\n tax_due_date_soon = []\n \n taxs_due = []\n overdue_taxs = []\n \n for tax in self.browse(cr, uid, tax_to_paid_ids, context=context):\n if not tax.approval_date:\n continue\n \n approval_date = datetime.strptime(tax.approval_date, date_format)\n \n if approval_date <= now:\n overdue_taxs.append(tax)\n elif now < approval_date and approval_date <= now_plus_upcoming_days:\n taxs_due.append(tax)\n \n for tax in taxs_due:\n self.message_post(cr, uid, [tax.id], body=\"Tax payment deadline soon\", subtype=\"sgr_alerts.mt_tax_due_date_soon\", context=context)\n \n for tax in overdue_taxs:\n self.message_post(cr, uid, [tax.id], body=\"Tax payment deadline expired\", subtype=\"sgr_alerts.mt_tax_due_date\", context=context)\n \n #all_tax_ids = self.search(cr, uid, [], context=context)\n #for tax in self.browse(cr, uid, all_tax_ids, context=context):\n # print 'tax: ' + str(tax.id)\n # self.message_post(cr, uid, [tax.id], body=\"Due Date Soon\", subtype=\"sgr_alerts.mt_tax_due_date_soon\", context=context)\n \n \n return True\n \n \n \ntax()\n\n\n\n", "step-ids": [ 1, 5, 6, 7, 8 ] }
[ 1, 5, 6, 7, 8 ]
<|reserved_special_token_0|> def read_dataset(mode, args): def decode_example(protos, vocab_size): features = {'key': tf.FixedLenFeature(shape=[1], dtype=tf.int64), 'indices': tf.VarLenFeature(dtype=tf.int64), 'values': tf. VarLenFeature(dtype=tf.float32)} parsed_features = tf.parse_single_example(serialized=protos, features=features) values = tf.sparse_merge(sp_ids=parsed_features['indices'], sp_values=parsed_features['values'], vocab_size=vocab_size) key = parsed_features['key'] decoded_sparse_tensor = tf.SparseTensor(indices=tf.concat(values=[ values.indices, [key]], axis=0), values=tf.concat(values=[ values.values, [0.0]], axis=0), dense_shape=values.dense_shape) return decoded_sparse_tensor def remap_keys(sparse_tensor): bad_indices = sparse_tensor.indices bad_values = sparse_tensor.values user_mask = tf.concat(values=[bad_indices[1:, 0] - bad_indices[:-1, 0], tf.constant(value=[1], dtype=tf.int64)], axis=0) good_values = tf.boolean_mask(tensor=bad_values, mask=tf.equal(x= user_mask, y=0)) item_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x= user_mask, y=0)) user_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x= user_mask, y=1))[:, 1] good_user_indices = tf.gather(params=user_indices, indices= item_indices[:, 0]) good_user_indices_expanded = tf.expand_dims(input=good_user_indices, axis=-1) good_item_indices_expanded = tf.expand_dims(input=item_indices[:, 1 ], axis=-1) good_indices = tf.concat(values=[good_user_indices_expanded, good_item_indices_expanded], axis=1) remapped_sparse_tensor = tf.SparseTensor(indices=good_indices, values=good_values, dense_shape=sparse_tensor.dense_shape) return remapped_sparse_tensor def parse_tfrecords(filename, vocab_size): if mode == tf.estimator.ModeKeys.TRAIN: num_epochs = None else: num_epochs = 1 files = tf.gfile.Glob(filename=os.path.join(args['input_path'], filename)) dataset = tf.data.TFRecordDataset(files) dataset = dataset.map(map_func=lambda x: decode_example(x, vocab_size)) dataset = dataset.repeat(count=num_epochs) dataset = dataset.batch(batch_size=args['batch_size']) dataset = dataset.map(map_func=lambda x: remap_keys(x)) return dataset.make_one_shot_iterator().get_next() def _input_fn(): features = {WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords( 'items_for_user', args['nitems']), WALSMatrixFactorization. INPUT_COLS: parse_tfrecords('users_for_item', args['nusers']), WALSMatrixFactorization.PROJECT_ROW: tf.constant(True)} return features, None return _input_fn def input_cols(): return parse_tfrecords('users_for_item', args['nusers']) return _input_fn def find_top_k(user, item_factors, k): all_items = tf.matmul(a=tf.expand_dims(input=user, axis=0), b=tf. transpose(a=item_factors)) topk = tf.nn.top_k(input=all_items, k=k) return tf.cast(x=topk.indices, dtype=tf.int64) <|reserved_special_token_0|> def train_and_evaluate(args): train_steps = int(0.5 + 1.0 * args['num_epochs'] * args['nusers'] / args['batch_size']) steps_in_epoch = int(0.5 + args['nusers'] / args['batch_size']) print('Will train for {} steps, evaluating once every {} steps'.format( train_steps, steps_in_epoch)) def experiment_fn(output_dir): return tf.contrib.learn.Experiment(tf.contrib.factorization. WALSMatrixFactorization(num_rows=args['nusers'], num_cols=args[ 'nitems'], embedding_dimension=args['n_embeds'], model_dir=args ['output_dir']), train_input_fn=read_dataset(tf.estimator. ModeKeys.TRAIN, args), eval_input_fn=read_dataset(tf.estimator. ModeKeys.EVAL, args), train_steps=train_steps, eval_steps=1, min_eval_frequency=steps_in_epoch) from tensorflow.contrib.learn.python.learn import learn_runner learn_runner.run(experiment_fn=experiment_fn, output_dir=args['output_dir'] ) batch_predict(args) <|reserved_special_token_1|> <|reserved_special_token_0|> def read_dataset(mode, args): def decode_example(protos, vocab_size): features = {'key': tf.FixedLenFeature(shape=[1], dtype=tf.int64), 'indices': tf.VarLenFeature(dtype=tf.int64), 'values': tf. VarLenFeature(dtype=tf.float32)} parsed_features = tf.parse_single_example(serialized=protos, features=features) values = tf.sparse_merge(sp_ids=parsed_features['indices'], sp_values=parsed_features['values'], vocab_size=vocab_size) key = parsed_features['key'] decoded_sparse_tensor = tf.SparseTensor(indices=tf.concat(values=[ values.indices, [key]], axis=0), values=tf.concat(values=[ values.values, [0.0]], axis=0), dense_shape=values.dense_shape) return decoded_sparse_tensor def remap_keys(sparse_tensor): bad_indices = sparse_tensor.indices bad_values = sparse_tensor.values user_mask = tf.concat(values=[bad_indices[1:, 0] - bad_indices[:-1, 0], tf.constant(value=[1], dtype=tf.int64)], axis=0) good_values = tf.boolean_mask(tensor=bad_values, mask=tf.equal(x= user_mask, y=0)) item_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x= user_mask, y=0)) user_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x= user_mask, y=1))[:, 1] good_user_indices = tf.gather(params=user_indices, indices= item_indices[:, 0]) good_user_indices_expanded = tf.expand_dims(input=good_user_indices, axis=-1) good_item_indices_expanded = tf.expand_dims(input=item_indices[:, 1 ], axis=-1) good_indices = tf.concat(values=[good_user_indices_expanded, good_item_indices_expanded], axis=1) remapped_sparse_tensor = tf.SparseTensor(indices=good_indices, values=good_values, dense_shape=sparse_tensor.dense_shape) return remapped_sparse_tensor def parse_tfrecords(filename, vocab_size): if mode == tf.estimator.ModeKeys.TRAIN: num_epochs = None else: num_epochs = 1 files = tf.gfile.Glob(filename=os.path.join(args['input_path'], filename)) dataset = tf.data.TFRecordDataset(files) dataset = dataset.map(map_func=lambda x: decode_example(x, vocab_size)) dataset = dataset.repeat(count=num_epochs) dataset = dataset.batch(batch_size=args['batch_size']) dataset = dataset.map(map_func=lambda x: remap_keys(x)) return dataset.make_one_shot_iterator().get_next() def _input_fn(): features = {WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords( 'items_for_user', args['nitems']), WALSMatrixFactorization. INPUT_COLS: parse_tfrecords('users_for_item', args['nusers']), WALSMatrixFactorization.PROJECT_ROW: tf.constant(True)} return features, None return _input_fn def input_cols(): return parse_tfrecords('users_for_item', args['nusers']) return _input_fn def find_top_k(user, item_factors, k): all_items = tf.matmul(a=tf.expand_dims(input=user, axis=0), b=tf. transpose(a=item_factors)) topk = tf.nn.top_k(input=all_items, k=k) return tf.cast(x=topk.indices, dtype=tf.int64) def batch_predict(args): import numpy as np with tf.Session() as sess: estimator = tf.contrib.factorization.WALSMatrixFactorization(num_rows =args['nusers'], num_cols=args['nitems'], embedding_dimension= args['n_embeds'], model_dir=args['output_dir']) user_factors = tf.convert_to_tensor(value=estimator.get_row_factors ()[0]) item_factors = tf.convert_to_tensor(value=estimator.get_col_factors ()[0]) topk = tf.squeeze(input=tf.map_fn(fn=lambda user: find_top_k(user, item_factors, args['topk']), elems=user_factors, dtype=tf.int64)) with file_io.FileIO(os.path.join(args['output_dir'], 'batch_pred.txt'), mode='w') as f: for best_items_for_user in topk.eval(): f.write(','.join(str(x) for x in best_items_for_user) + '\n') def train_and_evaluate(args): train_steps = int(0.5 + 1.0 * args['num_epochs'] * args['nusers'] / args['batch_size']) steps_in_epoch = int(0.5 + args['nusers'] / args['batch_size']) print('Will train for {} steps, evaluating once every {} steps'.format( train_steps, steps_in_epoch)) def experiment_fn(output_dir): return tf.contrib.learn.Experiment(tf.contrib.factorization. WALSMatrixFactorization(num_rows=args['nusers'], num_cols=args[ 'nitems'], embedding_dimension=args['n_embeds'], model_dir=args ['output_dir']), train_input_fn=read_dataset(tf.estimator. ModeKeys.TRAIN, args), eval_input_fn=read_dataset(tf.estimator. ModeKeys.EVAL, args), train_steps=train_steps, eval_steps=1, min_eval_frequency=steps_in_epoch) from tensorflow.contrib.learn.python.learn import learn_runner learn_runner.run(experiment_fn=experiment_fn, output_dir=args['output_dir'] ) batch_predict(args) <|reserved_special_token_1|> <|reserved_special_token_0|> tf.logging.set_verbosity(tf.logging.INFO) <|reserved_special_token_0|> def read_dataset(mode, args): def decode_example(protos, vocab_size): features = {'key': tf.FixedLenFeature(shape=[1], dtype=tf.int64), 'indices': tf.VarLenFeature(dtype=tf.int64), 'values': tf. VarLenFeature(dtype=tf.float32)} parsed_features = tf.parse_single_example(serialized=protos, features=features) values = tf.sparse_merge(sp_ids=parsed_features['indices'], sp_values=parsed_features['values'], vocab_size=vocab_size) key = parsed_features['key'] decoded_sparse_tensor = tf.SparseTensor(indices=tf.concat(values=[ values.indices, [key]], axis=0), values=tf.concat(values=[ values.values, [0.0]], axis=0), dense_shape=values.dense_shape) return decoded_sparse_tensor def remap_keys(sparse_tensor): bad_indices = sparse_tensor.indices bad_values = sparse_tensor.values user_mask = tf.concat(values=[bad_indices[1:, 0] - bad_indices[:-1, 0], tf.constant(value=[1], dtype=tf.int64)], axis=0) good_values = tf.boolean_mask(tensor=bad_values, mask=tf.equal(x= user_mask, y=0)) item_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x= user_mask, y=0)) user_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x= user_mask, y=1))[:, 1] good_user_indices = tf.gather(params=user_indices, indices= item_indices[:, 0]) good_user_indices_expanded = tf.expand_dims(input=good_user_indices, axis=-1) good_item_indices_expanded = tf.expand_dims(input=item_indices[:, 1 ], axis=-1) good_indices = tf.concat(values=[good_user_indices_expanded, good_item_indices_expanded], axis=1) remapped_sparse_tensor = tf.SparseTensor(indices=good_indices, values=good_values, dense_shape=sparse_tensor.dense_shape) return remapped_sparse_tensor def parse_tfrecords(filename, vocab_size): if mode == tf.estimator.ModeKeys.TRAIN: num_epochs = None else: num_epochs = 1 files = tf.gfile.Glob(filename=os.path.join(args['input_path'], filename)) dataset = tf.data.TFRecordDataset(files) dataset = dataset.map(map_func=lambda x: decode_example(x, vocab_size)) dataset = dataset.repeat(count=num_epochs) dataset = dataset.batch(batch_size=args['batch_size']) dataset = dataset.map(map_func=lambda x: remap_keys(x)) return dataset.make_one_shot_iterator().get_next() def _input_fn(): features = {WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords( 'items_for_user', args['nitems']), WALSMatrixFactorization. INPUT_COLS: parse_tfrecords('users_for_item', args['nusers']), WALSMatrixFactorization.PROJECT_ROW: tf.constant(True)} return features, None return _input_fn def input_cols(): return parse_tfrecords('users_for_item', args['nusers']) return _input_fn def find_top_k(user, item_factors, k): all_items = tf.matmul(a=tf.expand_dims(input=user, axis=0), b=tf. transpose(a=item_factors)) topk = tf.nn.top_k(input=all_items, k=k) return tf.cast(x=topk.indices, dtype=tf.int64) def batch_predict(args): import numpy as np with tf.Session() as sess: estimator = tf.contrib.factorization.WALSMatrixFactorization(num_rows =args['nusers'], num_cols=args['nitems'], embedding_dimension= args['n_embeds'], model_dir=args['output_dir']) user_factors = tf.convert_to_tensor(value=estimator.get_row_factors ()[0]) item_factors = tf.convert_to_tensor(value=estimator.get_col_factors ()[0]) topk = tf.squeeze(input=tf.map_fn(fn=lambda user: find_top_k(user, item_factors, args['topk']), elems=user_factors, dtype=tf.int64)) with file_io.FileIO(os.path.join(args['output_dir'], 'batch_pred.txt'), mode='w') as f: for best_items_for_user in topk.eval(): f.write(','.join(str(x) for x in best_items_for_user) + '\n') def train_and_evaluate(args): train_steps = int(0.5 + 1.0 * args['num_epochs'] * args['nusers'] / args['batch_size']) steps_in_epoch = int(0.5 + args['nusers'] / args['batch_size']) print('Will train for {} steps, evaluating once every {} steps'.format( train_steps, steps_in_epoch)) def experiment_fn(output_dir): return tf.contrib.learn.Experiment(tf.contrib.factorization. WALSMatrixFactorization(num_rows=args['nusers'], num_cols=args[ 'nitems'], embedding_dimension=args['n_embeds'], model_dir=args ['output_dir']), train_input_fn=read_dataset(tf.estimator. ModeKeys.TRAIN, args), eval_input_fn=read_dataset(tf.estimator. ModeKeys.EVAL, args), train_steps=train_steps, eval_steps=1, min_eval_frequency=steps_in_epoch) from tensorflow.contrib.learn.python.learn import learn_runner learn_runner.run(experiment_fn=experiment_fn, output_dir=args['output_dir'] ) batch_predict(args) <|reserved_special_token_1|> from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import numpy as np import tensorflow as tf from tensorflow.contrib.factorization import WALSMatrixFactorization tf.logging.set_verbosity(tf.logging.INFO) import os import tensorflow as tf from tensorflow.python.lib.io import file_io from tensorflow.contrib.factorization import WALSMatrixFactorization import os import tensorflow as tf from tensorflow.python.lib.io import file_io from tensorflow.contrib.factorization import WALSMatrixFactorization def read_dataset(mode, args): def decode_example(protos, vocab_size): features = {'key': tf.FixedLenFeature(shape=[1], dtype=tf.int64), 'indices': tf.VarLenFeature(dtype=tf.int64), 'values': tf. VarLenFeature(dtype=tf.float32)} parsed_features = tf.parse_single_example(serialized=protos, features=features) values = tf.sparse_merge(sp_ids=parsed_features['indices'], sp_values=parsed_features['values'], vocab_size=vocab_size) key = parsed_features['key'] decoded_sparse_tensor = tf.SparseTensor(indices=tf.concat(values=[ values.indices, [key]], axis=0), values=tf.concat(values=[ values.values, [0.0]], axis=0), dense_shape=values.dense_shape) return decoded_sparse_tensor def remap_keys(sparse_tensor): bad_indices = sparse_tensor.indices bad_values = sparse_tensor.values user_mask = tf.concat(values=[bad_indices[1:, 0] - bad_indices[:-1, 0], tf.constant(value=[1], dtype=tf.int64)], axis=0) good_values = tf.boolean_mask(tensor=bad_values, mask=tf.equal(x= user_mask, y=0)) item_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x= user_mask, y=0)) user_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x= user_mask, y=1))[:, 1] good_user_indices = tf.gather(params=user_indices, indices= item_indices[:, 0]) good_user_indices_expanded = tf.expand_dims(input=good_user_indices, axis=-1) good_item_indices_expanded = tf.expand_dims(input=item_indices[:, 1 ], axis=-1) good_indices = tf.concat(values=[good_user_indices_expanded, good_item_indices_expanded], axis=1) remapped_sparse_tensor = tf.SparseTensor(indices=good_indices, values=good_values, dense_shape=sparse_tensor.dense_shape) return remapped_sparse_tensor def parse_tfrecords(filename, vocab_size): if mode == tf.estimator.ModeKeys.TRAIN: num_epochs = None else: num_epochs = 1 files = tf.gfile.Glob(filename=os.path.join(args['input_path'], filename)) dataset = tf.data.TFRecordDataset(files) dataset = dataset.map(map_func=lambda x: decode_example(x, vocab_size)) dataset = dataset.repeat(count=num_epochs) dataset = dataset.batch(batch_size=args['batch_size']) dataset = dataset.map(map_func=lambda x: remap_keys(x)) return dataset.make_one_shot_iterator().get_next() def _input_fn(): features = {WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords( 'items_for_user', args['nitems']), WALSMatrixFactorization. INPUT_COLS: parse_tfrecords('users_for_item', args['nusers']), WALSMatrixFactorization.PROJECT_ROW: tf.constant(True)} return features, None return _input_fn def input_cols(): return parse_tfrecords('users_for_item', args['nusers']) return _input_fn def find_top_k(user, item_factors, k): all_items = tf.matmul(a=tf.expand_dims(input=user, axis=0), b=tf. transpose(a=item_factors)) topk = tf.nn.top_k(input=all_items, k=k) return tf.cast(x=topk.indices, dtype=tf.int64) def batch_predict(args): import numpy as np with tf.Session() as sess: estimator = tf.contrib.factorization.WALSMatrixFactorization(num_rows =args['nusers'], num_cols=args['nitems'], embedding_dimension= args['n_embeds'], model_dir=args['output_dir']) user_factors = tf.convert_to_tensor(value=estimator.get_row_factors ()[0]) item_factors = tf.convert_to_tensor(value=estimator.get_col_factors ()[0]) topk = tf.squeeze(input=tf.map_fn(fn=lambda user: find_top_k(user, item_factors, args['topk']), elems=user_factors, dtype=tf.int64)) with file_io.FileIO(os.path.join(args['output_dir'], 'batch_pred.txt'), mode='w') as f: for best_items_for_user in topk.eval(): f.write(','.join(str(x) for x in best_items_for_user) + '\n') def train_and_evaluate(args): train_steps = int(0.5 + 1.0 * args['num_epochs'] * args['nusers'] / args['batch_size']) steps_in_epoch = int(0.5 + args['nusers'] / args['batch_size']) print('Will train for {} steps, evaluating once every {} steps'.format( train_steps, steps_in_epoch)) def experiment_fn(output_dir): return tf.contrib.learn.Experiment(tf.contrib.factorization. WALSMatrixFactorization(num_rows=args['nusers'], num_cols=args[ 'nitems'], embedding_dimension=args['n_embeds'], model_dir=args ['output_dir']), train_input_fn=read_dataset(tf.estimator. ModeKeys.TRAIN, args), eval_input_fn=read_dataset(tf.estimator. ModeKeys.EVAL, args), train_steps=train_steps, eval_steps=1, min_eval_frequency=steps_in_epoch) from tensorflow.contrib.learn.python.learn import learn_runner learn_runner.run(experiment_fn=experiment_fn, output_dir=args['output_dir'] ) batch_predict(args) <|reserved_special_token_1|> #!/usr/bin/env python # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import numpy as np import tensorflow as tf from tensorflow.contrib.factorization import WALSMatrixFactorization tf.logging.set_verbosity(tf.logging.INFO) import os import tensorflow as tf from tensorflow.python.lib.io import file_io from tensorflow.contrib.factorization import WALSMatrixFactorization import os import tensorflow as tf from tensorflow.python.lib.io import file_io from tensorflow.contrib.factorization import WALSMatrixFactorization def read_dataset(mode, args): def decode_example(protos, vocab_size): features = { "key": tf.FixedLenFeature(shape = [1], dtype = tf.int64), "indices": tf.VarLenFeature(dtype = tf.int64), "values": tf.VarLenFeature(dtype = tf.float32)} parsed_features = tf.parse_single_example(serialized = protos, features = features) values = tf.sparse_merge(sp_ids = parsed_features["indices"], sp_values = parsed_features["values"], vocab_size = vocab_size) # Save key to remap after batching # This is a temporary workaround to assign correct row numbers in each batch. # You can ignore details of this part and remap_keys(). key = parsed_features["key"] decoded_sparse_tensor = tf.SparseTensor(indices = tf.concat(values = [values.indices, [key]], axis = 0), values = tf.concat(values = [values.values, [0.0]], axis = 0), dense_shape = values.dense_shape) return decoded_sparse_tensor def remap_keys(sparse_tensor): # Current indices of our SparseTensor that we need to fix bad_indices = sparse_tensor.indices # shape = (current_batch_size * (number_of_items/users[i] + 1), 2) # Current values of our SparseTensor that we need to fix bad_values = sparse_tensor.values # shape = (current_batch_size * (number_of_items/users[i] + 1),) # Since batch is ordered, the last value for a batch index is the user # Find where the batch index chages to extract the user rows # 1 where user, else 0 user_mask = tf.concat(values = [bad_indices[1:,0] - bad_indices[:-1,0], tf.constant(value = [1], dtype = tf.int64)], axis = 0) # shape = (current_batch_size * (number_of_items/users[i] + 1), 2) # Mask out the user rows from the values good_values = tf.boolean_mask(tensor = bad_values, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],) item_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],) user_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 1))[:, 1] # shape = (current_batch_size,) good_user_indices = tf.gather(params = user_indices, indices = item_indices[:,0]) # shape = (current_batch_size * number_of_items/users[i],) # User and item indices are rank 1, need to make rank 1 to concat good_user_indices_expanded = tf.expand_dims(input = good_user_indices, axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1) good_item_indices_expanded = tf.expand_dims(input = item_indices[:, 1], axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1) good_indices = tf.concat(values = [good_user_indices_expanded, good_item_indices_expanded], axis = 1) # shape = (current_batch_size * number_of_items/users[i], 2) remapped_sparse_tensor = tf.SparseTensor(indices = good_indices, values = good_values, dense_shape = sparse_tensor.dense_shape) return remapped_sparse_tensor def parse_tfrecords(filename, vocab_size): if mode == tf.estimator.ModeKeys.TRAIN: num_epochs = None # indefinitely else: num_epochs = 1 # end-of-input after this files = tf.gfile.Glob(filename = os.path.join(args["input_path"], filename)) # Create dataset from file list dataset = tf.data.TFRecordDataset(files) dataset = dataset.map(map_func = lambda x: decode_example(x, vocab_size)) dataset = dataset.repeat(count = num_epochs) dataset = dataset.batch(batch_size = args["batch_size"]) dataset = dataset.map(map_func = lambda x: remap_keys(x)) return dataset.make_one_shot_iterator().get_next() def _input_fn(): features = { WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords("items_for_user", args["nitems"]), WALSMatrixFactorization.INPUT_COLS: parse_tfrecords("users_for_item", args["nusers"]), WALSMatrixFactorization.PROJECT_ROW: tf.constant(True) } return features, None return _input_fn def input_cols(): return parse_tfrecords('users_for_item', args['nusers']) return _input_fn def find_top_k(user, item_factors, k): all_items = tf.matmul(a = tf.expand_dims(input = user, axis = 0), b = tf.transpose(a = item_factors)) topk = tf.nn.top_k(input = all_items, k = k) return tf.cast(x = topk.indices, dtype = tf.int64) def batch_predict(args): import numpy as np with tf.Session() as sess: estimator = tf.contrib.factorization.WALSMatrixFactorization( num_rows = args["nusers"], num_cols = args["nitems"], embedding_dimension = args["n_embeds"], model_dir = args["output_dir"]) # This is how you would get the row factors for out-of-vocab user data # row_factors = list(estimator.get_projections(input_fn=read_dataset(tf.estimator.ModeKeys.EVAL, args))) # user_factors = tf.convert_to_tensor(np.array(row_factors)) # But for in-vocab data, the row factors are already in the checkpoint user_factors = tf.convert_to_tensor(value = estimator.get_row_factors()[0]) # (nusers, nembeds) # In either case, we have to assume catalog doesn"t change, so col_factors are read in item_factors = tf.convert_to_tensor(value = estimator.get_col_factors()[0])# (nitems, nembeds) # For each user, find the top K items topk = tf.squeeze(input = tf.map_fn(fn = lambda user: find_top_k(user, item_factors, args["topk"]), elems = user_factors, dtype = tf.int64)) with file_io.FileIO(os.path.join(args["output_dir"], "batch_pred.txt"), mode = 'w') as f: for best_items_for_user in topk.eval(): f.write(",".join(str(x) for x in best_items_for_user) + '\n') def train_and_evaluate(args): train_steps = int(0.5 + (1.0 * args["num_epochs"] * args["nusers"]) / args["batch_size"]) steps_in_epoch = int(0.5 + args["nusers"] / args["batch_size"]) print("Will train for {} steps, evaluating once every {} steps".format(train_steps, steps_in_epoch)) def experiment_fn(output_dir): return tf.contrib.learn.Experiment( tf.contrib.factorization.WALSMatrixFactorization( num_rows = args["nusers"], num_cols = args["nitems"], embedding_dimension = args["n_embeds"], model_dir = args["output_dir"]), train_input_fn = read_dataset(tf.estimator.ModeKeys.TRAIN, args), eval_input_fn = read_dataset(tf.estimator.ModeKeys.EVAL, args), train_steps = train_steps, eval_steps = 1, min_eval_frequency = steps_in_epoch ) from tensorflow.contrib.learn.python.learn import learn_runner learn_runner.run(experiment_fn = experiment_fn, output_dir = args["output_dir"]) batch_predict(args)
flexible
{ "blob_id": "fb9ae5b3cdeac0c254669e214779ad43a02bff6d", "index": 4596, "step-1": "<mask token>\n\n\ndef read_dataset(mode, args):\n\n def decode_example(protos, vocab_size):\n features = {'key': tf.FixedLenFeature(shape=[1], dtype=tf.int64),\n 'indices': tf.VarLenFeature(dtype=tf.int64), 'values': tf.\n VarLenFeature(dtype=tf.float32)}\n parsed_features = tf.parse_single_example(serialized=protos,\n features=features)\n values = tf.sparse_merge(sp_ids=parsed_features['indices'],\n sp_values=parsed_features['values'], vocab_size=vocab_size)\n key = parsed_features['key']\n decoded_sparse_tensor = tf.SparseTensor(indices=tf.concat(values=[\n values.indices, [key]], axis=0), values=tf.concat(values=[\n values.values, [0.0]], axis=0), dense_shape=values.dense_shape)\n return decoded_sparse_tensor\n\n def remap_keys(sparse_tensor):\n bad_indices = sparse_tensor.indices\n bad_values = sparse_tensor.values\n user_mask = tf.concat(values=[bad_indices[1:, 0] - bad_indices[:-1,\n 0], tf.constant(value=[1], dtype=tf.int64)], axis=0)\n good_values = tf.boolean_mask(tensor=bad_values, mask=tf.equal(x=\n user_mask, y=0))\n item_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x=\n user_mask, y=0))\n user_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x=\n user_mask, y=1))[:, 1]\n good_user_indices = tf.gather(params=user_indices, indices=\n item_indices[:, 0])\n good_user_indices_expanded = tf.expand_dims(input=good_user_indices,\n axis=-1)\n good_item_indices_expanded = tf.expand_dims(input=item_indices[:, 1\n ], axis=-1)\n good_indices = tf.concat(values=[good_user_indices_expanded,\n good_item_indices_expanded], axis=1)\n remapped_sparse_tensor = tf.SparseTensor(indices=good_indices,\n values=good_values, dense_shape=sparse_tensor.dense_shape)\n return remapped_sparse_tensor\n\n def parse_tfrecords(filename, vocab_size):\n if mode == tf.estimator.ModeKeys.TRAIN:\n num_epochs = None\n else:\n num_epochs = 1\n files = tf.gfile.Glob(filename=os.path.join(args['input_path'],\n filename))\n dataset = tf.data.TFRecordDataset(files)\n dataset = dataset.map(map_func=lambda x: decode_example(x, vocab_size))\n dataset = dataset.repeat(count=num_epochs)\n dataset = dataset.batch(batch_size=args['batch_size'])\n dataset = dataset.map(map_func=lambda x: remap_keys(x))\n return dataset.make_one_shot_iterator().get_next()\n\n def _input_fn():\n features = {WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords(\n 'items_for_user', args['nitems']), WALSMatrixFactorization.\n INPUT_COLS: parse_tfrecords('users_for_item', args['nusers']),\n WALSMatrixFactorization.PROJECT_ROW: tf.constant(True)}\n return features, None\n return _input_fn\n\n def input_cols():\n return parse_tfrecords('users_for_item', args['nusers'])\n return _input_fn\n\n\ndef find_top_k(user, item_factors, k):\n all_items = tf.matmul(a=tf.expand_dims(input=user, axis=0), b=tf.\n transpose(a=item_factors))\n topk = tf.nn.top_k(input=all_items, k=k)\n return tf.cast(x=topk.indices, dtype=tf.int64)\n\n\n<mask token>\n\n\ndef train_and_evaluate(args):\n train_steps = int(0.5 + 1.0 * args['num_epochs'] * args['nusers'] /\n args['batch_size'])\n steps_in_epoch = int(0.5 + args['nusers'] / args['batch_size'])\n print('Will train for {} steps, evaluating once every {} steps'.format(\n train_steps, steps_in_epoch))\n\n def experiment_fn(output_dir):\n return tf.contrib.learn.Experiment(tf.contrib.factorization.\n WALSMatrixFactorization(num_rows=args['nusers'], num_cols=args[\n 'nitems'], embedding_dimension=args['n_embeds'], model_dir=args\n ['output_dir']), train_input_fn=read_dataset(tf.estimator.\n ModeKeys.TRAIN, args), eval_input_fn=read_dataset(tf.estimator.\n ModeKeys.EVAL, args), train_steps=train_steps, eval_steps=1,\n min_eval_frequency=steps_in_epoch)\n from tensorflow.contrib.learn.python.learn import learn_runner\n learn_runner.run(experiment_fn=experiment_fn, output_dir=args['output_dir']\n )\n batch_predict(args)\n", "step-2": "<mask token>\n\n\ndef read_dataset(mode, args):\n\n def decode_example(protos, vocab_size):\n features = {'key': tf.FixedLenFeature(shape=[1], dtype=tf.int64),\n 'indices': tf.VarLenFeature(dtype=tf.int64), 'values': tf.\n VarLenFeature(dtype=tf.float32)}\n parsed_features = tf.parse_single_example(serialized=protos,\n features=features)\n values = tf.sparse_merge(sp_ids=parsed_features['indices'],\n sp_values=parsed_features['values'], vocab_size=vocab_size)\n key = parsed_features['key']\n decoded_sparse_tensor = tf.SparseTensor(indices=tf.concat(values=[\n values.indices, [key]], axis=0), values=tf.concat(values=[\n values.values, [0.0]], axis=0), dense_shape=values.dense_shape)\n return decoded_sparse_tensor\n\n def remap_keys(sparse_tensor):\n bad_indices = sparse_tensor.indices\n bad_values = sparse_tensor.values\n user_mask = tf.concat(values=[bad_indices[1:, 0] - bad_indices[:-1,\n 0], tf.constant(value=[1], dtype=tf.int64)], axis=0)\n good_values = tf.boolean_mask(tensor=bad_values, mask=tf.equal(x=\n user_mask, y=0))\n item_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x=\n user_mask, y=0))\n user_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x=\n user_mask, y=1))[:, 1]\n good_user_indices = tf.gather(params=user_indices, indices=\n item_indices[:, 0])\n good_user_indices_expanded = tf.expand_dims(input=good_user_indices,\n axis=-1)\n good_item_indices_expanded = tf.expand_dims(input=item_indices[:, 1\n ], axis=-1)\n good_indices = tf.concat(values=[good_user_indices_expanded,\n good_item_indices_expanded], axis=1)\n remapped_sparse_tensor = tf.SparseTensor(indices=good_indices,\n values=good_values, dense_shape=sparse_tensor.dense_shape)\n return remapped_sparse_tensor\n\n def parse_tfrecords(filename, vocab_size):\n if mode == tf.estimator.ModeKeys.TRAIN:\n num_epochs = None\n else:\n num_epochs = 1\n files = tf.gfile.Glob(filename=os.path.join(args['input_path'],\n filename))\n dataset = tf.data.TFRecordDataset(files)\n dataset = dataset.map(map_func=lambda x: decode_example(x, vocab_size))\n dataset = dataset.repeat(count=num_epochs)\n dataset = dataset.batch(batch_size=args['batch_size'])\n dataset = dataset.map(map_func=lambda x: remap_keys(x))\n return dataset.make_one_shot_iterator().get_next()\n\n def _input_fn():\n features = {WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords(\n 'items_for_user', args['nitems']), WALSMatrixFactorization.\n INPUT_COLS: parse_tfrecords('users_for_item', args['nusers']),\n WALSMatrixFactorization.PROJECT_ROW: tf.constant(True)}\n return features, None\n return _input_fn\n\n def input_cols():\n return parse_tfrecords('users_for_item', args['nusers'])\n return _input_fn\n\n\ndef find_top_k(user, item_factors, k):\n all_items = tf.matmul(a=tf.expand_dims(input=user, axis=0), b=tf.\n transpose(a=item_factors))\n topk = tf.nn.top_k(input=all_items, k=k)\n return tf.cast(x=topk.indices, dtype=tf.int64)\n\n\ndef batch_predict(args):\n import numpy as np\n with tf.Session() as sess:\n estimator = tf.contrib.factorization.WALSMatrixFactorization(num_rows\n =args['nusers'], num_cols=args['nitems'], embedding_dimension=\n args['n_embeds'], model_dir=args['output_dir'])\n user_factors = tf.convert_to_tensor(value=estimator.get_row_factors\n ()[0])\n item_factors = tf.convert_to_tensor(value=estimator.get_col_factors\n ()[0])\n topk = tf.squeeze(input=tf.map_fn(fn=lambda user: find_top_k(user,\n item_factors, args['topk']), elems=user_factors, dtype=tf.int64))\n with file_io.FileIO(os.path.join(args['output_dir'],\n 'batch_pred.txt'), mode='w') as f:\n for best_items_for_user in topk.eval():\n f.write(','.join(str(x) for x in best_items_for_user) + '\\n')\n\n\ndef train_and_evaluate(args):\n train_steps = int(0.5 + 1.0 * args['num_epochs'] * args['nusers'] /\n args['batch_size'])\n steps_in_epoch = int(0.5 + args['nusers'] / args['batch_size'])\n print('Will train for {} steps, evaluating once every {} steps'.format(\n train_steps, steps_in_epoch))\n\n def experiment_fn(output_dir):\n return tf.contrib.learn.Experiment(tf.contrib.factorization.\n WALSMatrixFactorization(num_rows=args['nusers'], num_cols=args[\n 'nitems'], embedding_dimension=args['n_embeds'], model_dir=args\n ['output_dir']), train_input_fn=read_dataset(tf.estimator.\n ModeKeys.TRAIN, args), eval_input_fn=read_dataset(tf.estimator.\n ModeKeys.EVAL, args), train_steps=train_steps, eval_steps=1,\n min_eval_frequency=steps_in_epoch)\n from tensorflow.contrib.learn.python.learn import learn_runner\n learn_runner.run(experiment_fn=experiment_fn, output_dir=args['output_dir']\n )\n batch_predict(args)\n", "step-3": "<mask token>\ntf.logging.set_verbosity(tf.logging.INFO)\n<mask token>\n\n\ndef read_dataset(mode, args):\n\n def decode_example(protos, vocab_size):\n features = {'key': tf.FixedLenFeature(shape=[1], dtype=tf.int64),\n 'indices': tf.VarLenFeature(dtype=tf.int64), 'values': tf.\n VarLenFeature(dtype=tf.float32)}\n parsed_features = tf.parse_single_example(serialized=protos,\n features=features)\n values = tf.sparse_merge(sp_ids=parsed_features['indices'],\n sp_values=parsed_features['values'], vocab_size=vocab_size)\n key = parsed_features['key']\n decoded_sparse_tensor = tf.SparseTensor(indices=tf.concat(values=[\n values.indices, [key]], axis=0), values=tf.concat(values=[\n values.values, [0.0]], axis=0), dense_shape=values.dense_shape)\n return decoded_sparse_tensor\n\n def remap_keys(sparse_tensor):\n bad_indices = sparse_tensor.indices\n bad_values = sparse_tensor.values\n user_mask = tf.concat(values=[bad_indices[1:, 0] - bad_indices[:-1,\n 0], tf.constant(value=[1], dtype=tf.int64)], axis=0)\n good_values = tf.boolean_mask(tensor=bad_values, mask=tf.equal(x=\n user_mask, y=0))\n item_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x=\n user_mask, y=0))\n user_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x=\n user_mask, y=1))[:, 1]\n good_user_indices = tf.gather(params=user_indices, indices=\n item_indices[:, 0])\n good_user_indices_expanded = tf.expand_dims(input=good_user_indices,\n axis=-1)\n good_item_indices_expanded = tf.expand_dims(input=item_indices[:, 1\n ], axis=-1)\n good_indices = tf.concat(values=[good_user_indices_expanded,\n good_item_indices_expanded], axis=1)\n remapped_sparse_tensor = tf.SparseTensor(indices=good_indices,\n values=good_values, dense_shape=sparse_tensor.dense_shape)\n return remapped_sparse_tensor\n\n def parse_tfrecords(filename, vocab_size):\n if mode == tf.estimator.ModeKeys.TRAIN:\n num_epochs = None\n else:\n num_epochs = 1\n files = tf.gfile.Glob(filename=os.path.join(args['input_path'],\n filename))\n dataset = tf.data.TFRecordDataset(files)\n dataset = dataset.map(map_func=lambda x: decode_example(x, vocab_size))\n dataset = dataset.repeat(count=num_epochs)\n dataset = dataset.batch(batch_size=args['batch_size'])\n dataset = dataset.map(map_func=lambda x: remap_keys(x))\n return dataset.make_one_shot_iterator().get_next()\n\n def _input_fn():\n features = {WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords(\n 'items_for_user', args['nitems']), WALSMatrixFactorization.\n INPUT_COLS: parse_tfrecords('users_for_item', args['nusers']),\n WALSMatrixFactorization.PROJECT_ROW: tf.constant(True)}\n return features, None\n return _input_fn\n\n def input_cols():\n return parse_tfrecords('users_for_item', args['nusers'])\n return _input_fn\n\n\ndef find_top_k(user, item_factors, k):\n all_items = tf.matmul(a=tf.expand_dims(input=user, axis=0), b=tf.\n transpose(a=item_factors))\n topk = tf.nn.top_k(input=all_items, k=k)\n return tf.cast(x=topk.indices, dtype=tf.int64)\n\n\ndef batch_predict(args):\n import numpy as np\n with tf.Session() as sess:\n estimator = tf.contrib.factorization.WALSMatrixFactorization(num_rows\n =args['nusers'], num_cols=args['nitems'], embedding_dimension=\n args['n_embeds'], model_dir=args['output_dir'])\n user_factors = tf.convert_to_tensor(value=estimator.get_row_factors\n ()[0])\n item_factors = tf.convert_to_tensor(value=estimator.get_col_factors\n ()[0])\n topk = tf.squeeze(input=tf.map_fn(fn=lambda user: find_top_k(user,\n item_factors, args['topk']), elems=user_factors, dtype=tf.int64))\n with file_io.FileIO(os.path.join(args['output_dir'],\n 'batch_pred.txt'), mode='w') as f:\n for best_items_for_user in topk.eval():\n f.write(','.join(str(x) for x in best_items_for_user) + '\\n')\n\n\ndef train_and_evaluate(args):\n train_steps = int(0.5 + 1.0 * args['num_epochs'] * args['nusers'] /\n args['batch_size'])\n steps_in_epoch = int(0.5 + args['nusers'] / args['batch_size'])\n print('Will train for {} steps, evaluating once every {} steps'.format(\n train_steps, steps_in_epoch))\n\n def experiment_fn(output_dir):\n return tf.contrib.learn.Experiment(tf.contrib.factorization.\n WALSMatrixFactorization(num_rows=args['nusers'], num_cols=args[\n 'nitems'], embedding_dimension=args['n_embeds'], model_dir=args\n ['output_dir']), train_input_fn=read_dataset(tf.estimator.\n ModeKeys.TRAIN, args), eval_input_fn=read_dataset(tf.estimator.\n ModeKeys.EVAL, args), train_steps=train_steps, eval_steps=1,\n min_eval_frequency=steps_in_epoch)\n from tensorflow.contrib.learn.python.learn import learn_runner\n learn_runner.run(experiment_fn=experiment_fn, output_dir=args['output_dir']\n )\n batch_predict(args)\n", "step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.factorization import WALSMatrixFactorization\ntf.logging.set_verbosity(tf.logging.INFO)\nimport os\nimport tensorflow as tf\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.contrib.factorization import WALSMatrixFactorization\nimport os\nimport tensorflow as tf\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.contrib.factorization import WALSMatrixFactorization\n\n\ndef read_dataset(mode, args):\n\n def decode_example(protos, vocab_size):\n features = {'key': tf.FixedLenFeature(shape=[1], dtype=tf.int64),\n 'indices': tf.VarLenFeature(dtype=tf.int64), 'values': tf.\n VarLenFeature(dtype=tf.float32)}\n parsed_features = tf.parse_single_example(serialized=protos,\n features=features)\n values = tf.sparse_merge(sp_ids=parsed_features['indices'],\n sp_values=parsed_features['values'], vocab_size=vocab_size)\n key = parsed_features['key']\n decoded_sparse_tensor = tf.SparseTensor(indices=tf.concat(values=[\n values.indices, [key]], axis=0), values=tf.concat(values=[\n values.values, [0.0]], axis=0), dense_shape=values.dense_shape)\n return decoded_sparse_tensor\n\n def remap_keys(sparse_tensor):\n bad_indices = sparse_tensor.indices\n bad_values = sparse_tensor.values\n user_mask = tf.concat(values=[bad_indices[1:, 0] - bad_indices[:-1,\n 0], tf.constant(value=[1], dtype=tf.int64)], axis=0)\n good_values = tf.boolean_mask(tensor=bad_values, mask=tf.equal(x=\n user_mask, y=0))\n item_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x=\n user_mask, y=0))\n user_indices = tf.boolean_mask(tensor=bad_indices, mask=tf.equal(x=\n user_mask, y=1))[:, 1]\n good_user_indices = tf.gather(params=user_indices, indices=\n item_indices[:, 0])\n good_user_indices_expanded = tf.expand_dims(input=good_user_indices,\n axis=-1)\n good_item_indices_expanded = tf.expand_dims(input=item_indices[:, 1\n ], axis=-1)\n good_indices = tf.concat(values=[good_user_indices_expanded,\n good_item_indices_expanded], axis=1)\n remapped_sparse_tensor = tf.SparseTensor(indices=good_indices,\n values=good_values, dense_shape=sparse_tensor.dense_shape)\n return remapped_sparse_tensor\n\n def parse_tfrecords(filename, vocab_size):\n if mode == tf.estimator.ModeKeys.TRAIN:\n num_epochs = None\n else:\n num_epochs = 1\n files = tf.gfile.Glob(filename=os.path.join(args['input_path'],\n filename))\n dataset = tf.data.TFRecordDataset(files)\n dataset = dataset.map(map_func=lambda x: decode_example(x, vocab_size))\n dataset = dataset.repeat(count=num_epochs)\n dataset = dataset.batch(batch_size=args['batch_size'])\n dataset = dataset.map(map_func=lambda x: remap_keys(x))\n return dataset.make_one_shot_iterator().get_next()\n\n def _input_fn():\n features = {WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords(\n 'items_for_user', args['nitems']), WALSMatrixFactorization.\n INPUT_COLS: parse_tfrecords('users_for_item', args['nusers']),\n WALSMatrixFactorization.PROJECT_ROW: tf.constant(True)}\n return features, None\n return _input_fn\n\n def input_cols():\n return parse_tfrecords('users_for_item', args['nusers'])\n return _input_fn\n\n\ndef find_top_k(user, item_factors, k):\n all_items = tf.matmul(a=tf.expand_dims(input=user, axis=0), b=tf.\n transpose(a=item_factors))\n topk = tf.nn.top_k(input=all_items, k=k)\n return tf.cast(x=topk.indices, dtype=tf.int64)\n\n\ndef batch_predict(args):\n import numpy as np\n with tf.Session() as sess:\n estimator = tf.contrib.factorization.WALSMatrixFactorization(num_rows\n =args['nusers'], num_cols=args['nitems'], embedding_dimension=\n args['n_embeds'], model_dir=args['output_dir'])\n user_factors = tf.convert_to_tensor(value=estimator.get_row_factors\n ()[0])\n item_factors = tf.convert_to_tensor(value=estimator.get_col_factors\n ()[0])\n topk = tf.squeeze(input=tf.map_fn(fn=lambda user: find_top_k(user,\n item_factors, args['topk']), elems=user_factors, dtype=tf.int64))\n with file_io.FileIO(os.path.join(args['output_dir'],\n 'batch_pred.txt'), mode='w') as f:\n for best_items_for_user in topk.eval():\n f.write(','.join(str(x) for x in best_items_for_user) + '\\n')\n\n\ndef train_and_evaluate(args):\n train_steps = int(0.5 + 1.0 * args['num_epochs'] * args['nusers'] /\n args['batch_size'])\n steps_in_epoch = int(0.5 + args['nusers'] / args['batch_size'])\n print('Will train for {} steps, evaluating once every {} steps'.format(\n train_steps, steps_in_epoch))\n\n def experiment_fn(output_dir):\n return tf.contrib.learn.Experiment(tf.contrib.factorization.\n WALSMatrixFactorization(num_rows=args['nusers'], num_cols=args[\n 'nitems'], embedding_dimension=args['n_embeds'], model_dir=args\n ['output_dir']), train_input_fn=read_dataset(tf.estimator.\n ModeKeys.TRAIN, args), eval_input_fn=read_dataset(tf.estimator.\n ModeKeys.EVAL, args), train_steps=train_steps, eval_steps=1,\n min_eval_frequency=steps_in_epoch)\n from tensorflow.contrib.learn.python.learn import learn_runner\n learn_runner.run(experiment_fn=experiment_fn, output_dir=args['output_dir']\n )\n batch_predict(args)\n", "step-5": "#!/usr/bin/env python\n\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.factorization import WALSMatrixFactorization\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\nimport os\nimport tensorflow as tf\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.contrib.factorization import WALSMatrixFactorization\n \nimport os\nimport tensorflow as tf\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.contrib.factorization import WALSMatrixFactorization\n \ndef read_dataset(mode, args):\n def decode_example(protos, vocab_size):\n features = {\n \"key\": tf.FixedLenFeature(shape = [1], dtype = tf.int64),\n \"indices\": tf.VarLenFeature(dtype = tf.int64),\n \"values\": tf.VarLenFeature(dtype = tf.float32)}\n parsed_features = tf.parse_single_example(serialized = protos, features = features)\n values = tf.sparse_merge(sp_ids = parsed_features[\"indices\"], sp_values = parsed_features[\"values\"], vocab_size = vocab_size)\n # Save key to remap after batching\n # This is a temporary workaround to assign correct row numbers in each batch.\n # You can ignore details of this part and remap_keys().\n key = parsed_features[\"key\"]\n decoded_sparse_tensor = tf.SparseTensor(indices = tf.concat(values = [values.indices, [key]], axis = 0), \n values = tf.concat(values = [values.values, [0.0]], axis = 0), \n dense_shape = values.dense_shape)\n return decoded_sparse_tensor\n \n \n def remap_keys(sparse_tensor):\n # Current indices of our SparseTensor that we need to fix\n bad_indices = sparse_tensor.indices # shape = (current_batch_size * (number_of_items/users[i] + 1), 2)\n # Current values of our SparseTensor that we need to fix\n bad_values = sparse_tensor.values # shape = (current_batch_size * (number_of_items/users[i] + 1),)\n\n # Since batch is ordered, the last value for a batch index is the user\n # Find where the batch index chages to extract the user rows\n # 1 where user, else 0\n user_mask = tf.concat(values = [bad_indices[1:,0] - bad_indices[:-1,0], tf.constant(value = [1], dtype = tf.int64)], axis = 0) # shape = (current_batch_size * (number_of_items/users[i] + 1), 2)\n\n # Mask out the user rows from the values\n good_values = tf.boolean_mask(tensor = bad_values, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],)\n item_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],)\n user_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 1))[:, 1] # shape = (current_batch_size,)\n\n good_user_indices = tf.gather(params = user_indices, indices = item_indices[:,0]) # shape = (current_batch_size * number_of_items/users[i],)\n\n # User and item indices are rank 1, need to make rank 1 to concat\n good_user_indices_expanded = tf.expand_dims(input = good_user_indices, axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1)\n good_item_indices_expanded = tf.expand_dims(input = item_indices[:, 1], axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1)\n good_indices = tf.concat(values = [good_user_indices_expanded, good_item_indices_expanded], axis = 1) # shape = (current_batch_size * number_of_items/users[i], 2)\n\n remapped_sparse_tensor = tf.SparseTensor(indices = good_indices, values = good_values, dense_shape = sparse_tensor.dense_shape)\n return remapped_sparse_tensor\n\n \n def parse_tfrecords(filename, vocab_size):\n if mode == tf.estimator.ModeKeys.TRAIN:\n num_epochs = None # indefinitely\n else:\n num_epochs = 1 # end-of-input after this\n\n files = tf.gfile.Glob(filename = os.path.join(args[\"input_path\"], filename))\n\n # Create dataset from file list\n dataset = tf.data.TFRecordDataset(files)\n dataset = dataset.map(map_func = lambda x: decode_example(x, vocab_size))\n dataset = dataset.repeat(count = num_epochs)\n dataset = dataset.batch(batch_size = args[\"batch_size\"])\n dataset = dataset.map(map_func = lambda x: remap_keys(x))\n return dataset.make_one_shot_iterator().get_next()\n \n def _input_fn():\n features = {\n WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords(\"items_for_user\", args[\"nitems\"]),\n WALSMatrixFactorization.INPUT_COLS: parse_tfrecords(\"users_for_item\", args[\"nusers\"]),\n WALSMatrixFactorization.PROJECT_ROW: tf.constant(True)\n }\n return features, None\n\n return _input_fn\n \n def input_cols():\n return parse_tfrecords('users_for_item', args['nusers'])\n \n return _input_fn\n\ndef find_top_k(user, item_factors, k):\n all_items = tf.matmul(a = tf.expand_dims(input = user, axis = 0), b = tf.transpose(a = item_factors))\n topk = tf.nn.top_k(input = all_items, k = k)\n return tf.cast(x = topk.indices, dtype = tf.int64)\n \ndef batch_predict(args):\n import numpy as np\n with tf.Session() as sess:\n estimator = tf.contrib.factorization.WALSMatrixFactorization(\n num_rows = args[\"nusers\"], \n num_cols = args[\"nitems\"],\n embedding_dimension = args[\"n_embeds\"],\n model_dir = args[\"output_dir\"])\n \n # This is how you would get the row factors for out-of-vocab user data\n # row_factors = list(estimator.get_projections(input_fn=read_dataset(tf.estimator.ModeKeys.EVAL, args)))\n # user_factors = tf.convert_to_tensor(np.array(row_factors))\n\n # But for in-vocab data, the row factors are already in the checkpoint\n user_factors = tf.convert_to_tensor(value = estimator.get_row_factors()[0]) # (nusers, nembeds)\n # In either case, we have to assume catalog doesn\"t change, so col_factors are read in\n item_factors = tf.convert_to_tensor(value = estimator.get_col_factors()[0])# (nitems, nembeds)\n\n # For each user, find the top K items\n topk = tf.squeeze(input = tf.map_fn(fn = lambda user: find_top_k(user, item_factors, args[\"topk\"]), elems = user_factors, dtype = tf.int64))\n with file_io.FileIO(os.path.join(args[\"output_dir\"], \"batch_pred.txt\"), mode = 'w') as f:\n for best_items_for_user in topk.eval():\n f.write(\",\".join(str(x) for x in best_items_for_user) + '\\n')\n\ndef train_and_evaluate(args):\n train_steps = int(0.5 + (1.0 * args[\"num_epochs\"] * args[\"nusers\"]) / args[\"batch_size\"])\n steps_in_epoch = int(0.5 + args[\"nusers\"] / args[\"batch_size\"])\n print(\"Will train for {} steps, evaluating once every {} steps\".format(train_steps, steps_in_epoch))\n def experiment_fn(output_dir):\n return tf.contrib.learn.Experiment(\n tf.contrib.factorization.WALSMatrixFactorization(\n num_rows = args[\"nusers\"], \n num_cols = args[\"nitems\"],\n embedding_dimension = args[\"n_embeds\"],\n model_dir = args[\"output_dir\"]),\n train_input_fn = read_dataset(tf.estimator.ModeKeys.TRAIN, args),\n eval_input_fn = read_dataset(tf.estimator.ModeKeys.EVAL, args),\n train_steps = train_steps,\n eval_steps = 1,\n min_eval_frequency = steps_in_epoch\n )\n\n from tensorflow.contrib.learn.python.learn import learn_runner\n learn_runner.run(experiment_fn = experiment_fn, output_dir = args[\"output_dir\"])\n \n batch_predict(args)", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> sys.path.append(os.path.dirname(os.path.abspath(__file__))) execute('scrapy crawl laptop'.split()) <|reserved_special_token_1|> import os, sys from scrapy.cmdline import execute sys.path.append(os.path.dirname(os.path.abspath(__file__))) execute('scrapy crawl laptop'.split())
flexible
{ "blob_id": "71ff8e8a62a3b2731071ed7a039b51c150ebaca4", "index": 3671, "step-1": "<mask token>\n", "step-2": "<mask token>\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\nexecute('scrapy crawl laptop'.split())\n", "step-3": "import os, sys\nfrom scrapy.cmdline import execute\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\nexecute('scrapy crawl laptop'.split())\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from django.contrib import admin from .models import User # Register your models here. @admin.register(User) class AuthorizationUserAdmin(admin.ModelAdmin): exclude = ['open_id'] pass
normal
{ "blob_id": "d3585e7b761fa7b2eeaacf09f84bb6a4abc1cf02", "index": 6806, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@admin.register(User)\nclass AuthorizationUserAdmin(admin.ModelAdmin):\n <mask token>\n pass\n", "step-3": "<mask token>\n\n\n@admin.register(User)\nclass AuthorizationUserAdmin(admin.ModelAdmin):\n exclude = ['open_id']\n pass\n", "step-4": "from django.contrib import admin\nfrom .models import User\n\n\n@admin.register(User)\nclass AuthorizationUserAdmin(admin.ModelAdmin):\n exclude = ['open_id']\n pass\n", "step-5": "from django.contrib import admin\r\nfrom .models import User\r\n\r\n\r\n# Register your models here.\r\n\r\n@admin.register(User)\r\nclass AuthorizationUserAdmin(admin.ModelAdmin):\r\n exclude = ['open_id']\r\n pass\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class Imprimidor(Thread): def __init__(self, nombre, berlin, bolsa_dinero): super().__init__() pass <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Imprimidor(Thread): def __init__(self, nombre, berlin, bolsa_dinero): super().__init__() pass def run(self): """ Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada iteracion chequea si se cumple que hay problema con el dinero (20%) """ pass <|reserved_special_token_0|> def problema_papel(self): """ Probabilidad de problema con el papel de 20% """ pass <|reserved_special_token_1|> <|reserved_special_token_0|> class Imprimidor(Thread): def __init__(self, nombre, berlin, bolsa_dinero): super().__init__() pass def run(self): """ Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada iteracion chequea si se cumple que hay problema con el dinero (20%) """ pass def imprimir_dinero(self, dinero): """ Llamar a este método para imprimir dinero. ***Acá debes procurarte de evitar errores de concurrencia*** :param dinero: :return: """ pass def problema_papel(self): """ Probabilidad de problema con el papel de 20% """ pass <|reserved_special_token_1|> from threading import Thread, Lock from utils import reloj import random class Imprimidor(Thread): def __init__(self, nombre, berlin, bolsa_dinero): super().__init__() pass def run(self): """ Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada iteracion chequea si se cumple que hay problema con el dinero (20%) """ pass def imprimir_dinero(self, dinero): """ Llamar a este método para imprimir dinero. ***Acá debes procurarte de evitar errores de concurrencia*** :param dinero: :return: """ pass def problema_papel(self): """ Probabilidad de problema con el papel de 20% """ pass <|reserved_special_token_1|> from threading import Thread, Lock from utils import reloj import random class Imprimidor(Thread): def __init__(self, nombre, berlin, bolsa_dinero): super().__init__() pass def run(self): ''' Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada iteracion chequea si se cumple que hay problema con el dinero (20%) ''' pass def imprimir_dinero(self, dinero): ''' Llamar a este método para imprimir dinero. ***Acá debes procurarte de evitar errores de concurrencia*** :param dinero: :return: ''' pass def problema_papel(self): ''' Probabilidad de problema con el papel de 20% ''' pass
flexible
{ "blob_id": "ab79e2f9584dbbb526c62bde882a1bc9874b56f9", "index": 7903, "step-1": "<mask token>\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n\n def run(self):\n \"\"\"\n Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada\n iteracion chequea si se cumple que hay problema con el dinero (20%)\n \"\"\"\n pass\n <mask token>\n\n def problema_papel(self):\n \"\"\"\n Probabilidad de problema con el papel de 20%\n \"\"\"\n pass\n", "step-3": "<mask token>\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n\n def run(self):\n \"\"\"\n Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada\n iteracion chequea si se cumple que hay problema con el dinero (20%)\n \"\"\"\n pass\n\n def imprimir_dinero(self, dinero):\n \"\"\"\n Llamar a este método para imprimir dinero.\n ***Acá debes procurarte de evitar errores de concurrencia***\n :param dinero:\n :return:\n \"\"\"\n pass\n\n def problema_papel(self):\n \"\"\"\n Probabilidad de problema con el papel de 20%\n \"\"\"\n pass\n", "step-4": "from threading import Thread, Lock\nfrom utils import reloj\nimport random\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n\n def run(self):\n \"\"\"\n Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada\n iteracion chequea si se cumple que hay problema con el dinero (20%)\n \"\"\"\n pass\n\n def imprimir_dinero(self, dinero):\n \"\"\"\n Llamar a este método para imprimir dinero.\n ***Acá debes procurarte de evitar errores de concurrencia***\n :param dinero:\n :return:\n \"\"\"\n pass\n\n def problema_papel(self):\n \"\"\"\n Probabilidad de problema con el papel de 20%\n \"\"\"\n pass\n", "step-5": "from threading import Thread, Lock\nfrom utils import reloj\nimport random\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n\n def run(self):\n '''\n Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada\n iteracion chequea si se cumple que hay problema con el dinero (20%)\n '''\n pass\n\n def imprimir_dinero(self, dinero):\n '''\n Llamar a este método para imprimir dinero.\n ***Acá debes procurarte de evitar errores de concurrencia***\n :param dinero:\n :return:\n '''\n pass\n\n def problema_papel(self):\n '''\n Probabilidad de problema con el papel de 20%\n '''\n pass\n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
import cv2 import imutils import detect def detectByPathVideo(path, writer): video = cv2.VideoCapture(path) check, frame = video.read() if check == False: print('Video Not Found. Please Enter a Valid Path (Full path of Video Should be Provided).') return print('Detecting people...') while video.isOpened(): #check is True if reading was successful check, frame = video.read() if check: frame = imutils.resize(frame , width=min(800,frame.shape[1])) frame = detect.detect(frame) if writer is not None: writer.write(frame) key = cv2.waitKey(1) if key== ord('q'): break else: break video.release() cv2.destroyAllWindows() def detectByCamera(writer): video = cv2.VideoCapture(0) print('Detecting people...') while True: check, frame = video.read() frame = detect.detect(frame) if writer is not None: writer.write(frame) key = cv2.waitKey(1) if key == ord('q'): break video.release() cv2.destroyAllWindows()
normal
{ "blob_id": "5044b8bc8cabd7762df6a0327828df4546ab8d96", "index": 9000, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef detectByPathVideo(path, writer):\n video = cv2.VideoCapture(path)\n check, frame = video.read()\n if check == False:\n print(\n 'Video Not Found. Please Enter a Valid Path (Full path of Video Should be Provided).'\n )\n return\n print('Detecting people...')\n while video.isOpened():\n check, frame = video.read()\n if check:\n frame = imutils.resize(frame, width=min(800, frame.shape[1]))\n frame = detect.detect(frame)\n if writer is not None:\n writer.write(frame)\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n else:\n break\n video.release()\n cv2.destroyAllWindows()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef detectByPathVideo(path, writer):\n video = cv2.VideoCapture(path)\n check, frame = video.read()\n if check == False:\n print(\n 'Video Not Found. Please Enter a Valid Path (Full path of Video Should be Provided).'\n )\n return\n print('Detecting people...')\n while video.isOpened():\n check, frame = video.read()\n if check:\n frame = imutils.resize(frame, width=min(800, frame.shape[1]))\n frame = detect.detect(frame)\n if writer is not None:\n writer.write(frame)\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n else:\n break\n video.release()\n cv2.destroyAllWindows()\n\n\ndef detectByCamera(writer):\n video = cv2.VideoCapture(0)\n print('Detecting people...')\n while True:\n check, frame = video.read()\n frame = detect.detect(frame)\n if writer is not None:\n writer.write(frame)\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n video.release()\n cv2.destroyAllWindows()\n", "step-4": "import cv2\nimport imutils\nimport detect\n\n\ndef detectByPathVideo(path, writer):\n video = cv2.VideoCapture(path)\n check, frame = video.read()\n if check == False:\n print(\n 'Video Not Found. Please Enter a Valid Path (Full path of Video Should be Provided).'\n )\n return\n print('Detecting people...')\n while video.isOpened():\n check, frame = video.read()\n if check:\n frame = imutils.resize(frame, width=min(800, frame.shape[1]))\n frame = detect.detect(frame)\n if writer is not None:\n writer.write(frame)\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n else:\n break\n video.release()\n cv2.destroyAllWindows()\n\n\ndef detectByCamera(writer):\n video = cv2.VideoCapture(0)\n print('Detecting people...')\n while True:\n check, frame = video.read()\n frame = detect.detect(frame)\n if writer is not None:\n writer.write(frame)\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n video.release()\n cv2.destroyAllWindows()\n", "step-5": "import cv2\r\nimport imutils\r\nimport detect\r\n\r\ndef detectByPathVideo(path, writer):\r\n\r\n video = cv2.VideoCapture(path)\r\n check, frame = video.read()\r\n if check == False:\r\n print('Video Not Found. Please Enter a Valid Path (Full path of Video Should be Provided).')\r\n return\r\n\r\n print('Detecting people...')\r\n while video.isOpened():\r\n #check is True if reading was successful \r\n check, frame = video.read()\r\n\r\n if check:\r\n frame = imutils.resize(frame , width=min(800,frame.shape[1]))\r\n frame = detect.detect(frame)\r\n \r\n if writer is not None:\r\n writer.write(frame)\r\n \r\n key = cv2.waitKey(1)\r\n if key== ord('q'):\r\n break\r\n else:\r\n break\r\n video.release()\r\n cv2.destroyAllWindows()\r\n\r\ndef detectByCamera(writer): \r\n video = cv2.VideoCapture(0)\r\n print('Detecting people...')\r\n\r\n while True:\r\n check, frame = video.read()\r\n\r\n frame = detect.detect(frame)\r\n if writer is not None:\r\n writer.write(frame)\r\n\r\n key = cv2.waitKey(1)\r\n if key == ord('q'):\r\n break\r\n\r\n video.release()\r\n cv2.destroyAllWindows()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def intersection(list1, list2): return set(list1).intersection(list2) def computeSteps(x, y, step, steps): curr = 0 if (x, y) in steps: curr = steps.get((x, y)) steps[x, y] = step + curr <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def intersection(list1, list2): return set(list1).intersection(list2) def computeSteps(x, y, step, steps): curr = 0 if (x, y) in steps: curr = steps.get((x, y)) steps[x, y] = step + curr def buildPoints(wire, steps): points = [] x, y = 0, 0 s = 0 for p in wire: direction = p[0] step = int(p[1:]) if direction == 'D': for i in range(0, step): y -= 1 points.append((x, y)) s += 1 computeSteps(x, y, s, steps) elif direction == 'U': for i in range(0, step): y += 1 points.append((x, y)) s += 1 computeSteps(x, y, s, steps) elif direction == 'L': for i in range(0, step): x -= 1 points.append((x, y)) s += 1 computeSteps(x, y, s, steps) elif direction == 'R': for i in range(0, step): x += 1 points.append((x, y)) s += 1 computeSteps(x, y, s, steps) return points with open(filepath) as fp: steps = {} port = 0, 0 wire1 = fp.readline().strip().split(',') wire2 = fp.readline().strip().split(',') point1 = buildPoints(wire1, steps) point2 = buildPoints(wire2, steps) commonPoints = intersection(point1, point2) min = sys.maxsize for k in commonPoints: val = steps.get(k) if val < min: min = val print(min) <|reserved_special_token_1|> <|reserved_special_token_0|> filepath = 'input.txt' def intersection(list1, list2): return set(list1).intersection(list2) def computeSteps(x, y, step, steps): curr = 0 if (x, y) in steps: curr = steps.get((x, y)) steps[x, y] = step + curr def buildPoints(wire, steps): points = [] x, y = 0, 0 s = 0 for p in wire: direction = p[0] step = int(p[1:]) if direction == 'D': for i in range(0, step): y -= 1 points.append((x, y)) s += 1 computeSteps(x, y, s, steps) elif direction == 'U': for i in range(0, step): y += 1 points.append((x, y)) s += 1 computeSteps(x, y, s, steps) elif direction == 'L': for i in range(0, step): x -= 1 points.append((x, y)) s += 1 computeSteps(x, y, s, steps) elif direction == 'R': for i in range(0, step): x += 1 points.append((x, y)) s += 1 computeSteps(x, y, s, steps) return points with open(filepath) as fp: steps = {} port = 0, 0 wire1 = fp.readline().strip().split(',') wire2 = fp.readline().strip().split(',') point1 = buildPoints(wire1, steps) point2 = buildPoints(wire2, steps) commonPoints = intersection(point1, point2) min = sys.maxsize for k in commonPoints: val = steps.get(k) if val < min: min = val print(min) <|reserved_special_token_1|> import sys filepath = 'input.txt' def intersection(list1, list2): return set(list1).intersection(list2) def computeSteps(x, y, step, steps): curr = 0 if (x, y) in steps: curr = steps.get((x, y)) steps[x, y] = step + curr def buildPoints(wire, steps): points = [] x, y = 0, 0 s = 0 for p in wire: direction = p[0] step = int(p[1:]) if direction == 'D': for i in range(0, step): y -= 1 points.append((x, y)) s += 1 computeSteps(x, y, s, steps) elif direction == 'U': for i in range(0, step): y += 1 points.append((x, y)) s += 1 computeSteps(x, y, s, steps) elif direction == 'L': for i in range(0, step): x -= 1 points.append((x, y)) s += 1 computeSteps(x, y, s, steps) elif direction == 'R': for i in range(0, step): x += 1 points.append((x, y)) s += 1 computeSteps(x, y, s, steps) return points with open(filepath) as fp: steps = {} port = 0, 0 wire1 = fp.readline().strip().split(',') wire2 = fp.readline().strip().split(',') point1 = buildPoints(wire1, steps) point2 = buildPoints(wire2, steps) commonPoints = intersection(point1, point2) min = sys.maxsize for k in commonPoints: val = steps.get(k) if val < min: min = val print(min) <|reserved_special_token_1|> import sys filepath = 'input.txt' def intersection(list1, list2): return set(list1).intersection(list2) def computeSteps(x, y, step, steps): # build dictionary with steps for each point curr = 0 if (x,y) in steps: curr = steps.get((x,y)) steps[(x,y)] = step + curr def buildPoints(wire, steps): points = [] x, y = 0, 0 s = 0 for p in wire: direction = p[0] step = int(p[1:]) if direction == 'D': for i in range(0, step): y -= 1 points.append((x,y)) s += 1 computeSteps(x, y, s, steps) elif direction == 'U': for i in range(0, step): y += 1 points.append((x,y)) s += 1 computeSteps(x, y, s, steps) elif direction == 'L': for i in range(0, step): x -= 1 points.append((x,y)) s += 1 computeSteps(x, y, s, steps) elif direction == 'R': for i in range(0, step): x += 1 points.append((x,y)) s += 1 computeSteps(x, y, s, steps) #end for return points with open(filepath) as fp: steps = {} port = (0,0) wire1 = fp.readline().strip().split(',') wire2 = fp.readline().strip().split(',') point1 = buildPoints(wire1, steps) point2 = buildPoints(wire2, steps) commonPoints = intersection(point1, point2) min = sys.maxsize for k in commonPoints: val = steps.get(k) if val < min: min = val print(min)
flexible
{ "blob_id": "e9e119dd69f9416e007e748d7f494741140efc8e", "index": 8182, "step-1": "<mask token>\n\n\ndef intersection(list1, list2):\n return set(list1).intersection(list2)\n\n\ndef computeSteps(x, y, step, steps):\n curr = 0\n if (x, y) in steps:\n curr = steps.get((x, y))\n steps[x, y] = step + curr\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef intersection(list1, list2):\n return set(list1).intersection(list2)\n\n\ndef computeSteps(x, y, step, steps):\n curr = 0\n if (x, y) in steps:\n curr = steps.get((x, y))\n steps[x, y] = step + curr\n\n\ndef buildPoints(wire, steps):\n points = []\n x, y = 0, 0\n s = 0\n for p in wire:\n direction = p[0]\n step = int(p[1:])\n if direction == 'D':\n for i in range(0, step):\n y -= 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'U':\n for i in range(0, step):\n y += 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'L':\n for i in range(0, step):\n x -= 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'R':\n for i in range(0, step):\n x += 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n return points\n\n\nwith open(filepath) as fp:\n steps = {}\n port = 0, 0\n wire1 = fp.readline().strip().split(',')\n wire2 = fp.readline().strip().split(',')\n point1 = buildPoints(wire1, steps)\n point2 = buildPoints(wire2, steps)\n commonPoints = intersection(point1, point2)\n min = sys.maxsize\n for k in commonPoints:\n val = steps.get(k)\n if val < min:\n min = val\n print(min)\n", "step-3": "<mask token>\nfilepath = 'input.txt'\n\n\ndef intersection(list1, list2):\n return set(list1).intersection(list2)\n\n\ndef computeSteps(x, y, step, steps):\n curr = 0\n if (x, y) in steps:\n curr = steps.get((x, y))\n steps[x, y] = step + curr\n\n\ndef buildPoints(wire, steps):\n points = []\n x, y = 0, 0\n s = 0\n for p in wire:\n direction = p[0]\n step = int(p[1:])\n if direction == 'D':\n for i in range(0, step):\n y -= 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'U':\n for i in range(0, step):\n y += 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'L':\n for i in range(0, step):\n x -= 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'R':\n for i in range(0, step):\n x += 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n return points\n\n\nwith open(filepath) as fp:\n steps = {}\n port = 0, 0\n wire1 = fp.readline().strip().split(',')\n wire2 = fp.readline().strip().split(',')\n point1 = buildPoints(wire1, steps)\n point2 = buildPoints(wire2, steps)\n commonPoints = intersection(point1, point2)\n min = sys.maxsize\n for k in commonPoints:\n val = steps.get(k)\n if val < min:\n min = val\n print(min)\n", "step-4": "import sys\nfilepath = 'input.txt'\n\n\ndef intersection(list1, list2):\n return set(list1).intersection(list2)\n\n\ndef computeSteps(x, y, step, steps):\n curr = 0\n if (x, y) in steps:\n curr = steps.get((x, y))\n steps[x, y] = step + curr\n\n\ndef buildPoints(wire, steps):\n points = []\n x, y = 0, 0\n s = 0\n for p in wire:\n direction = p[0]\n step = int(p[1:])\n if direction == 'D':\n for i in range(0, step):\n y -= 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'U':\n for i in range(0, step):\n y += 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'L':\n for i in range(0, step):\n x -= 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'R':\n for i in range(0, step):\n x += 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n return points\n\n\nwith open(filepath) as fp:\n steps = {}\n port = 0, 0\n wire1 = fp.readline().strip().split(',')\n wire2 = fp.readline().strip().split(',')\n point1 = buildPoints(wire1, steps)\n point2 = buildPoints(wire2, steps)\n commonPoints = intersection(point1, point2)\n min = sys.maxsize\n for k in commonPoints:\n val = steps.get(k)\n if val < min:\n min = val\n print(min)\n", "step-5": "import sys\r\nfilepath = 'input.txt' \r\n\r\ndef intersection(list1, list2): \r\n return set(list1).intersection(list2) \r\n\r\ndef computeSteps(x, y, step, steps):\r\n # build dictionary with steps for each point\r\n curr = 0\r\n if (x,y) in steps:\r\n curr = steps.get((x,y)) \r\n steps[(x,y)] = step + curr\r\n\r\n \r\ndef buildPoints(wire, steps):\r\n points = []\r\n x, y = 0, 0\r\n s = 0\r\n for p in wire:\r\n direction = p[0]\r\n step = int(p[1:])\r\n if direction == 'D':\r\n for i in range(0, step):\r\n y -= 1\r\n points.append((x,y)) \r\n s += 1 \r\n computeSteps(x, y, s, steps) \r\n elif direction == 'U':\r\n for i in range(0, step):\r\n y += 1\r\n points.append((x,y)) \r\n s += 1 \r\n computeSteps(x, y, s, steps) \r\n elif direction == 'L':\r\n for i in range(0, step):\r\n x -= 1\r\n points.append((x,y))\r\n s += 1 \r\n computeSteps(x, y, s, steps) \r\n elif direction == 'R':\r\n for i in range(0, step):\r\n x += 1\r\n points.append((x,y))\r\n s += 1 \r\n computeSteps(x, y, s, steps) \r\n \r\n #end for\r\n return points\r\n\r\nwith open(filepath) as fp: \t \r\n steps = {} \r\n port = (0,0)\r\n wire1 = fp.readline().strip().split(',')\r\n wire2 = fp.readline().strip().split(',')\r\n point1 = buildPoints(wire1, steps)\r\n point2 = buildPoints(wire2, steps)\r\n \r\n commonPoints = intersection(point1, point2)\r\n\r\n min = sys.maxsize\r\n for k in commonPoints:\r\n val = steps.get(k)\r\n if val < min:\r\n min = val\r\n \r\n print(min)\r\n \r\n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.feature_selection import SelectKBest, chi2 from sklearn import metrics, ensemble, linear_model, svm from numpy import log, ones, array, zeros, mean, std, repeat import numpy as np import scipy.sparse as sp import re import csv from time import time import functools from nltk.util import skipgrams from nltk.stem import WordNetLemmatizer from nltk.stem import PorterStemmer from nltk.tokenize import word_tokenize DIR_PATH = "" TRAIN_FILE = DIR_PATH + "train.csv" TEST_SOL_FILE = DIR_PATH + "test_with_solutions.csv" # This is also used for training, together with TRAIN_FILE BADWORDS_FILE = DIR_PATH + "bad_words.txt" # attached with submission TEST_FILE = DIR_PATH + "test.csv" # set this to the new test file name PREDICTION_FILE = DIR_PATH + "preds.csv" # predictions will be written here def normalize(f , lammatize= False): f = [x.lower() for x in f] f = [x.replace("\\n"," ") for x in f] f = [x.replace("\\t"," ") for x in f] f = [x.replace("\\xa0"," ") for x in f] f = [x.replace("\\xc2"," ") for x in f] #f = [x.replace(","," ").replace("."," ").replace(" ", " ") for x in f] #f = [re.subn(" ([a-z]) ","\\1", x)[0] for x in f] #f = [x.replace(" "," ") for x in f] f = [x.replace(" u "," you ") for x in f] f = [x.replace(" em "," them ") for x in f] f = [x.replace(" da "," the ") for x in f] f = [x.replace(" yo "," you ") for x in f] f = [x.replace(" ur "," you ") for x in f] #f = [x.replace(" ur "," your ") for x in f] #f = [x.replace(" ur "," you're ") for x in f] f = [x.replace("won't", "will not") for x in f] f = [x.replace("can't", "cannot") for x in f] f = [x.replace("i'm", "i am") for x in f] f = [x.replace(" im ", " i am ") for x in f] f = [x.replace("ain't", "is not") for x in f] f = [x.replace("'ll", " will") for x in f] f = [x.replace("'t", " not") for x in f] f = [x.replace("'ve", " have") for x in f] f = [x.replace("'s", " is") for x in f] f = [x.replace("'re", " are") for x in f] f = [x.replace("'d", " would") for x in f] #f = [x.replace("outta", "out of") for x in f] bwMap = loadBW() for key, value in bwMap.items(): kpad = " " + key + " " vpad = " " + value + " " f = [x.replace(kpad, vpad) for x in f] # stemming """ f = [re.subn("ies( |$)", "y ", x)[0].strip() for x in f] #f = [re.subn("([abcdefghijklmnopqrstuvwxyz])s( |$)", "\\1 ", x)[0].strip() for x in f] f = [re.subn("s( |$)", " ", x)[0].strip() for x in f] f = [re.subn("ing( |$)", " ", x)[0].strip() for x in f] f = [x.replace("tard ", " ") for x in f] f = [re.subn(" [*$%&#@][*$%&#@]+"," xexp ", x)[0].strip() for x in f] f = [re.subn(" [0-9]+ "," DD ", x)[0].strip() for x in f] f = [re.subn("<\S*>","", x)[0].strip() for x in f] """ tokenized_sents = [word_tokenize(i) for i in f] if not lammatize: stemmer = PorterStemmer() for i in range (0, len(tokenized_sents)): for j in range (0,len(tokenized_sents[i])): tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j]) else: lammatizer = WordNetLemmatizer() for i in range (0, len(tokenized_sents)): for j in range (0,len(tokenized_sents[i])): tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents[i][j]) for i in range (0, len(tokenized_sents)): f[i] = " ".join(tokenized_sents[i]) return f def ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features=500, binary = False, do_normalization = False, stopwords = False, verbose = True, analyzer_char = False): f = data if do_normalization: f = normalize(f) ftrain = f[:ntrain] ftest = f[ntrain:] y_train = labels[:ntrain] t0 = time() analyzer_type = 'word' if analyzer_char: analyzer_type = 'char' if binary: vectorizer = CountVectorizer(ngram_range = (min_ngrams , max_ngrams), binary =True) elif stopwords: vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),stop_words='english',analyzer=analyzer_type,sublinear_tf=True) else: vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),sublinear_tf=True,analyzer=analyzer_type) if verbose: print ("extracting ngrams... where n is [%d,%d]" % (max_ngrams,min_ngrams)) X_train = vectorizer.fit_transform(ftrain) X_test = vectorizer.transform(ftest) if verbose: print ("done in %fs" % (time() - t0), X_train.shape, X_test.shape) y = array(y_train) numFts = no_of_features if numFts < X_train.shape[1]: t0 = time() ch2 = SelectKBest(chi2, k=numFts) X_train = ch2.fit_transform(X_train, y) X_test = ch2.transform(X_test) assert sp.issparse(X_train) if verbose: print ("Extracting best features by a chi-squared test.. ", X_train.shape, X_test.shape ) return X_train, y, X_test def skipGrams(data, labels, ntrain,nm=500,min_ngrams=1, max_ngrams=1, no_of_features=500, do_normalization = False, verbose = True): f = data if do_normalization: f = normalize(f) ftrain = f[:ntrain] ftest = f[ntrain:] y_train = labels[:ntrain] t0 = time() skipper = functools.partial(skipgrams, n=2, k=3) vectorizer = TfidfVectorizer(sublinear_tf=True,analyzer=skipper) X_train = vectorizer.fit_transform(ftrain) X_test = vectorizer.transform(ftest) if verbose: print ("done in %fs" % (time() - t0), X_train.shape, X_test.shape) y = array(y_train) numFts = nm if numFts < X_train.shape[1]: t0 = time() ch2 = SelectKBest(chi2, k=numFts) X_train = ch2.fit_transform(X_train, y) X_test = ch2.transform(X_test) assert sp.issparse(X_train) if verbose: print ("Extracting best features by a chi-squared test.. ", X_train.shape, X_test.shape) return X_train, y, X_test def specialCases(data, labels, ntrain, verbose = True): g = [x.lower().replace("you are"," SSS ").replace("you're"," SSS ").replace(" ur ", " SSS ").split("SSS")[1:] for x in data] f = [] for x in g: fts = " " x = normalize(x) for y in x: w = y.strip().replace("?",".").split(".") fts = fts + " " + w[0] f.append(fts) X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100, do_normalization = True, verbose = verbose) return X_trn, y_trn, X_tst def loadBW(): f = open(BADWORDS_FILE, "r") bwMap = dict() for line in f: sp = line.strip().lower().split(",") if len(sp) == 2: bwMap[sp[0].strip()] = sp[1].strip() return bwMap def readCsv(fname, skipFirst=True, delimiter = ","): reader = csv.reader(open(fname),delimiter=delimiter) rows = [] count = 1 for row in reader: if not skipFirst or count > 1: rows.append(row) count += 1 return rows def write_submission(x,filename): wtr = open(filename,"w") for i in range(len(x)): wtr.write(format(x[i],"0.10f")) wtr.write("\n") wtr.close() def run(verbose = True): t0 = time() train_data = readCsv(TRAIN_FILE) train2_data = readCsv(TEST_SOL_FILE) train_data = train_data + train2_data # print(train_data) labels = array([int(x[0]) for x in train_data]) # print(labels) train = [x[2] for x in train_data] test_data = readCsv(TEST_FILE) test_data = [x[2] for x in test_data] data = train + test_data n = len(data) ntrain = len(train) X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose) """ X_train1, y_train, X_test1 = ngrams(data, labels, ntrain, 1, 1, 2000, do_normalization = True, verbose = verbose) X_train2, y_train, X_test2 = ngrams(data, labels, ntrain, 2, 2, 4000, do_normalization = True, verbose = verbose) X_train3, y_train, X_test3 = ngrams(data, labels, ntrain, 3, 3, 100, do_normalization = True, verbose = verbose) X_train4, y_train, X_test4 = ngrams(data, labels, ntrain, 4, 4, 1000, do_normalization = True, verbose = verbose, analyzer_char = True) X_train5, y_train, X_test5 = ngrams(data, labels, ntrain, 5, 5, 1000, do_normalization = True, verbose = verbose, analyzer_char = True) X_train6, y_train, X_test6 = ngrams(data, labels, ntrain, 3, 3, 2000, do_normalization = True, verbose = verbose, analyzer_char = True) X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose) X_train8, y_train, X_test8 = skipGrams(data, labels, ntrain, verbose = verbose) X_tn = sp.hstack([X_train1, X_train2, X_train3, X_train4, X_train5, X_train6, X_train7, X_train8]) X_tt = sp.hstack([X_test1, X_test2, X_test3, X_test4, X_test5, X_test6, X_test7, X_test8]) if verbose: print "######## Total time for feature extraction: %fs" % (time() - t0), X_tn.shape, X_tt.shape predictions = runClassifiers(X_tn, labels, X_tt) write_submission(predictions, PREDICTION_FILE) print "Predictions written to:", PREDICTION_FILE """ run() #some code for n grams (use tdifvectorizer)
normal
{ "blob_id": "91eb0ae8e59f24aeefdabd46546bc8fb7a0b6f6c", "index": 3833, "step-1": "<mask token>\n\n\ndef normalize(f, lammatize=False):\n f = [x.lower() for x in f]\n f = [x.replace('\\\\n', ' ') for x in f]\n f = [x.replace('\\\\t', ' ') for x in f]\n f = [x.replace('\\\\xa0', ' ') for x in f]\n f = [x.replace('\\\\xc2', ' ') for x in f]\n f = [x.replace(' u ', ' you ') for x in f]\n f = [x.replace(' em ', ' them ') for x in f]\n f = [x.replace(' da ', ' the ') for x in f]\n f = [x.replace(' yo ', ' you ') for x in f]\n f = [x.replace(' ur ', ' you ') for x in f]\n f = [x.replace(\"won't\", 'will not') for x in f]\n f = [x.replace(\"can't\", 'cannot') for x in f]\n f = [x.replace(\"i'm\", 'i am') for x in f]\n f = [x.replace(' im ', ' i am ') for x in f]\n f = [x.replace(\"ain't\", 'is not') for x in f]\n f = [x.replace(\"'ll\", ' will') for x in f]\n f = [x.replace(\"'t\", ' not') for x in f]\n f = [x.replace(\"'ve\", ' have') for x in f]\n f = [x.replace(\"'s\", ' is') for x in f]\n f = [x.replace(\"'re\", ' are') for x in f]\n f = [x.replace(\"'d\", ' would') for x in f]\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = ' ' + key + ' '\n vpad = ' ' + value + ' '\n f = [x.replace(kpad, vpad) for x in f]\n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents\n [i][j])\n for i in range(0, len(tokenized_sents)):\n f[i] = ' '.join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features\n =500, binary=False, do_normalization=False, stopwords=False, verbose=\n True, analyzer_char=False):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n if binary:\n vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),\n binary=True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n stop_words='english', analyzer=analyzer_type, sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n sublinear_tf=True, analyzer=analyzer_type)\n if verbose:\n print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,\n min_ngrams))\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,\n no_of_features=500, do_normalization=False, verbose=True):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef specialCases(data, labels, ntrain, verbose=True):\n g = [x.lower().replace('you are', ' SSS ').replace(\"you're\", ' SSS ').\n replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]\n f = []\n for x in g:\n fts = ' '\n x = normalize(x)\n for y in x:\n w = y.strip().replace('?', '.').split('.')\n fts = fts + ' ' + w[0]\n f.append(fts)\n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,\n do_normalization=True, verbose=verbose)\n return X_trn, y_trn, X_tst\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, 'r')\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(',')\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n\n<mask token>\n\n\ndef write_submission(x, filename):\n wtr = open(filename, 'w')\n for i in range(len(x)):\n wtr.write(format(x[i], '0.10f'))\n wtr.write('\\n')\n wtr.close()\n\n\ndef run(verbose=True):\n t0 = time()\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n train_data = train_data + train2_data\n labels = array([int(x[0]) for x in train_data])\n train = [x[2] for x in train_data]\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data]\n data = train + test_data\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose\n =verbose)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef normalize(f, lammatize=False):\n f = [x.lower() for x in f]\n f = [x.replace('\\\\n', ' ') for x in f]\n f = [x.replace('\\\\t', ' ') for x in f]\n f = [x.replace('\\\\xa0', ' ') for x in f]\n f = [x.replace('\\\\xc2', ' ') for x in f]\n f = [x.replace(' u ', ' you ') for x in f]\n f = [x.replace(' em ', ' them ') for x in f]\n f = [x.replace(' da ', ' the ') for x in f]\n f = [x.replace(' yo ', ' you ') for x in f]\n f = [x.replace(' ur ', ' you ') for x in f]\n f = [x.replace(\"won't\", 'will not') for x in f]\n f = [x.replace(\"can't\", 'cannot') for x in f]\n f = [x.replace(\"i'm\", 'i am') for x in f]\n f = [x.replace(' im ', ' i am ') for x in f]\n f = [x.replace(\"ain't\", 'is not') for x in f]\n f = [x.replace(\"'ll\", ' will') for x in f]\n f = [x.replace(\"'t\", ' not') for x in f]\n f = [x.replace(\"'ve\", ' have') for x in f]\n f = [x.replace(\"'s\", ' is') for x in f]\n f = [x.replace(\"'re\", ' are') for x in f]\n f = [x.replace(\"'d\", ' would') for x in f]\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = ' ' + key + ' '\n vpad = ' ' + value + ' '\n f = [x.replace(kpad, vpad) for x in f]\n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents\n [i][j])\n for i in range(0, len(tokenized_sents)):\n f[i] = ' '.join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features\n =500, binary=False, do_normalization=False, stopwords=False, verbose=\n True, analyzer_char=False):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n if binary:\n vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),\n binary=True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n stop_words='english', analyzer=analyzer_type, sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n sublinear_tf=True, analyzer=analyzer_type)\n if verbose:\n print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,\n min_ngrams))\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,\n no_of_features=500, do_normalization=False, verbose=True):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef specialCases(data, labels, ntrain, verbose=True):\n g = [x.lower().replace('you are', ' SSS ').replace(\"you're\", ' SSS ').\n replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]\n f = []\n for x in g:\n fts = ' '\n x = normalize(x)\n for y in x:\n w = y.strip().replace('?', '.').split('.')\n fts = fts + ' ' + w[0]\n f.append(fts)\n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,\n do_normalization=True, verbose=verbose)\n return X_trn, y_trn, X_tst\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, 'r')\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(',')\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n\ndef readCsv(fname, skipFirst=True, delimiter=','):\n reader = csv.reader(open(fname), delimiter=delimiter)\n rows = []\n count = 1\n for row in reader:\n if not skipFirst or count > 1:\n rows.append(row)\n count += 1\n return rows\n\n\ndef write_submission(x, filename):\n wtr = open(filename, 'w')\n for i in range(len(x)):\n wtr.write(format(x[i], '0.10f'))\n wtr.write('\\n')\n wtr.close()\n\n\ndef run(verbose=True):\n t0 = time()\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n train_data = train_data + train2_data\n labels = array([int(x[0]) for x in train_data])\n train = [x[2] for x in train_data]\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data]\n data = train + test_data\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose\n =verbose)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef normalize(f, lammatize=False):\n f = [x.lower() for x in f]\n f = [x.replace('\\\\n', ' ') for x in f]\n f = [x.replace('\\\\t', ' ') for x in f]\n f = [x.replace('\\\\xa0', ' ') for x in f]\n f = [x.replace('\\\\xc2', ' ') for x in f]\n f = [x.replace(' u ', ' you ') for x in f]\n f = [x.replace(' em ', ' them ') for x in f]\n f = [x.replace(' da ', ' the ') for x in f]\n f = [x.replace(' yo ', ' you ') for x in f]\n f = [x.replace(' ur ', ' you ') for x in f]\n f = [x.replace(\"won't\", 'will not') for x in f]\n f = [x.replace(\"can't\", 'cannot') for x in f]\n f = [x.replace(\"i'm\", 'i am') for x in f]\n f = [x.replace(' im ', ' i am ') for x in f]\n f = [x.replace(\"ain't\", 'is not') for x in f]\n f = [x.replace(\"'ll\", ' will') for x in f]\n f = [x.replace(\"'t\", ' not') for x in f]\n f = [x.replace(\"'ve\", ' have') for x in f]\n f = [x.replace(\"'s\", ' is') for x in f]\n f = [x.replace(\"'re\", ' are') for x in f]\n f = [x.replace(\"'d\", ' would') for x in f]\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = ' ' + key + ' '\n vpad = ' ' + value + ' '\n f = [x.replace(kpad, vpad) for x in f]\n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents\n [i][j])\n for i in range(0, len(tokenized_sents)):\n f[i] = ' '.join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features\n =500, binary=False, do_normalization=False, stopwords=False, verbose=\n True, analyzer_char=False):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n if binary:\n vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),\n binary=True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n stop_words='english', analyzer=analyzer_type, sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n sublinear_tf=True, analyzer=analyzer_type)\n if verbose:\n print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,\n min_ngrams))\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,\n no_of_features=500, do_normalization=False, verbose=True):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef specialCases(data, labels, ntrain, verbose=True):\n g = [x.lower().replace('you are', ' SSS ').replace(\"you're\", ' SSS ').\n replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]\n f = []\n for x in g:\n fts = ' '\n x = normalize(x)\n for y in x:\n w = y.strip().replace('?', '.').split('.')\n fts = fts + ' ' + w[0]\n f.append(fts)\n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,\n do_normalization=True, verbose=verbose)\n return X_trn, y_trn, X_tst\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, 'r')\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(',')\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n\ndef readCsv(fname, skipFirst=True, delimiter=','):\n reader = csv.reader(open(fname), delimiter=delimiter)\n rows = []\n count = 1\n for row in reader:\n if not skipFirst or count > 1:\n rows.append(row)\n count += 1\n return rows\n\n\ndef write_submission(x, filename):\n wtr = open(filename, 'w')\n for i in range(len(x)):\n wtr.write(format(x[i], '0.10f'))\n wtr.write('\\n')\n wtr.close()\n\n\ndef run(verbose=True):\n t0 = time()\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n train_data = train_data + train2_data\n labels = array([int(x[0]) for x in train_data])\n train = [x[2] for x in train_data]\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data]\n data = train + test_data\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose\n =verbose)\n\n\n<mask token>\nrun()\n", "step-4": "<mask token>\nDIR_PATH = ''\nTRAIN_FILE = DIR_PATH + 'train.csv'\nTEST_SOL_FILE = DIR_PATH + 'test_with_solutions.csv'\nBADWORDS_FILE = DIR_PATH + 'bad_words.txt'\nTEST_FILE = DIR_PATH + 'test.csv'\nPREDICTION_FILE = DIR_PATH + 'preds.csv'\n\n\ndef normalize(f, lammatize=False):\n f = [x.lower() for x in f]\n f = [x.replace('\\\\n', ' ') for x in f]\n f = [x.replace('\\\\t', ' ') for x in f]\n f = [x.replace('\\\\xa0', ' ') for x in f]\n f = [x.replace('\\\\xc2', ' ') for x in f]\n f = [x.replace(' u ', ' you ') for x in f]\n f = [x.replace(' em ', ' them ') for x in f]\n f = [x.replace(' da ', ' the ') for x in f]\n f = [x.replace(' yo ', ' you ') for x in f]\n f = [x.replace(' ur ', ' you ') for x in f]\n f = [x.replace(\"won't\", 'will not') for x in f]\n f = [x.replace(\"can't\", 'cannot') for x in f]\n f = [x.replace(\"i'm\", 'i am') for x in f]\n f = [x.replace(' im ', ' i am ') for x in f]\n f = [x.replace(\"ain't\", 'is not') for x in f]\n f = [x.replace(\"'ll\", ' will') for x in f]\n f = [x.replace(\"'t\", ' not') for x in f]\n f = [x.replace(\"'ve\", ' have') for x in f]\n f = [x.replace(\"'s\", ' is') for x in f]\n f = [x.replace(\"'re\", ' are') for x in f]\n f = [x.replace(\"'d\", ' would') for x in f]\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = ' ' + key + ' '\n vpad = ' ' + value + ' '\n f = [x.replace(kpad, vpad) for x in f]\n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents\n [i][j])\n for i in range(0, len(tokenized_sents)):\n f[i] = ' '.join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features\n =500, binary=False, do_normalization=False, stopwords=False, verbose=\n True, analyzer_char=False):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n if binary:\n vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),\n binary=True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n stop_words='english', analyzer=analyzer_type, sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n sublinear_tf=True, analyzer=analyzer_type)\n if verbose:\n print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,\n min_ngrams))\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,\n no_of_features=500, do_normalization=False, verbose=True):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef specialCases(data, labels, ntrain, verbose=True):\n g = [x.lower().replace('you are', ' SSS ').replace(\"you're\", ' SSS ').\n replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]\n f = []\n for x in g:\n fts = ' '\n x = normalize(x)\n for y in x:\n w = y.strip().replace('?', '.').split('.')\n fts = fts + ' ' + w[0]\n f.append(fts)\n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,\n do_normalization=True, verbose=verbose)\n return X_trn, y_trn, X_tst\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, 'r')\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(',')\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n\ndef readCsv(fname, skipFirst=True, delimiter=','):\n reader = csv.reader(open(fname), delimiter=delimiter)\n rows = []\n count = 1\n for row in reader:\n if not skipFirst or count > 1:\n rows.append(row)\n count += 1\n return rows\n\n\ndef write_submission(x, filename):\n wtr = open(filename, 'w')\n for i in range(len(x)):\n wtr.write(format(x[i], '0.10f'))\n wtr.write('\\n')\n wtr.close()\n\n\ndef run(verbose=True):\n t0 = time()\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n train_data = train_data + train2_data\n labels = array([int(x[0]) for x in train_data])\n train = [x[2] for x in train_data]\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data]\n data = train + test_data\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose\n =verbose)\n\n\n<mask token>\nrun()\n", "step-5": "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn import metrics, ensemble, linear_model, svm\nfrom numpy import log, ones, array, zeros, mean, std, repeat\nimport numpy as np\nimport scipy.sparse as sp\nimport re\nimport csv\nfrom time import time\nimport functools\nfrom nltk.util import skipgrams\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import word_tokenize\n\n\nDIR_PATH = \"\"\n\nTRAIN_FILE = DIR_PATH + \"train.csv\"\nTEST_SOL_FILE = DIR_PATH + \"test_with_solutions.csv\" # This is also used for training, together with TRAIN_FILE\nBADWORDS_FILE = DIR_PATH + \"bad_words.txt\" # attached with submission \n\nTEST_FILE = DIR_PATH + \"test.csv\" # set this to the new test file name\nPREDICTION_FILE = DIR_PATH + \"preds.csv\" # predictions will be written here \n\ndef normalize(f , lammatize= False):\n f = [x.lower() for x in f]\n f = [x.replace(\"\\\\n\",\" \") for x in f] \n f = [x.replace(\"\\\\t\",\" \") for x in f] \n f = [x.replace(\"\\\\xa0\",\" \") for x in f]\n f = [x.replace(\"\\\\xc2\",\" \") for x in f]\n\n #f = [x.replace(\",\",\" \").replace(\".\",\" \").replace(\" \", \" \") for x in f]\n #f = [re.subn(\" ([a-z]) \",\"\\\\1\", x)[0] for x in f] \n #f = [x.replace(\" \",\" \") for x in f]\n\n f = [x.replace(\" u \",\" you \") for x in f]\n f = [x.replace(\" em \",\" them \") for x in f]\n f = [x.replace(\" da \",\" the \") for x in f]\n f = [x.replace(\" yo \",\" you \") for x in f]\n f = [x.replace(\" ur \",\" you \") for x in f]\n #f = [x.replace(\" ur \",\" your \") for x in f]\n #f = [x.replace(\" ur \",\" you're \") for x in f]\n \n f = [x.replace(\"won't\", \"will not\") for x in f]\n f = [x.replace(\"can't\", \"cannot\") for x in f]\n f = [x.replace(\"i'm\", \"i am\") for x in f]\n f = [x.replace(\" im \", \" i am \") for x in f]\n f = [x.replace(\"ain't\", \"is not\") for x in f]\n f = [x.replace(\"'ll\", \" will\") for x in f]\n f = [x.replace(\"'t\", \" not\") for x in f]\n f = [x.replace(\"'ve\", \" have\") for x in f]\n f = [x.replace(\"'s\", \" is\") for x in f]\n f = [x.replace(\"'re\", \" are\") for x in f]\n f = [x.replace(\"'d\", \" would\") for x in f]\n\n #f = [x.replace(\"outta\", \"out of\") for x in f]\n\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = \" \" + key + \" \"\n vpad = \" \" + value + \" \"\n f = [x.replace(kpad, vpad) for x in f]\n \n # stemming \n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range (0, len(tokenized_sents)):\n for j in range (0,len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range (0, len(tokenized_sents)):\n for j in range (0,len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents[i][j]) \n for i in range (0, len(tokenized_sents)):\n f[i] = \" \".join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features=500, binary = False, do_normalization = False, stopwords = False, verbose = True, analyzer_char = False):\n f = data\n if do_normalization:\n f = normalize(f)\n \n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n \n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n \n if binary:\n vectorizer = CountVectorizer(ngram_range = (min_ngrams , max_ngrams), binary =True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),stop_words='english',analyzer=analyzer_type,sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),sublinear_tf=True,analyzer=analyzer_type)\n\n if verbose:\n print (\"extracting ngrams... where n is [%d,%d]\" % (max_ngrams,min_ngrams))\n \n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n \n if verbose:\n print (\"done in %fs\" % (time() - t0), X_train.shape, X_test.shape)\n\n y = array(y_train) \n \n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train) \n\n if verbose:\n print (\"Extracting best features by a chi-squared test.. \", X_train.shape, X_test.shape ) \n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain,nm=500,min_ngrams=1, max_ngrams=1, no_of_features=500, do_normalization = False, verbose = True):\n f = data\n if do_normalization:\n f = normalize(f)\n \n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n \n vectorizer = TfidfVectorizer(sublinear_tf=True,analyzer=skipper)\n \n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n \n if verbose:\n print (\"done in %fs\" % (time() - t0), X_train.shape, X_test.shape)\n\n y = array(y_train) \n \n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train) \n if verbose:\n print (\"Extracting best features by a chi-squared test.. \", X_train.shape, X_test.shape) \n return X_train, y, X_test\n\n\n\ndef specialCases(data, labels, ntrain, verbose = True):\n g = [x.lower().replace(\"you are\",\" SSS \").replace(\"you're\",\" SSS \").replace(\" ur \", \" SSS \").split(\"SSS\")[1:] for x in data]\n\n f = []\n for x in g:\n fts = \" \"\n x = normalize(x)\n for y in x:\n w = y.strip().replace(\"?\",\".\").split(\".\")\n fts = fts + \" \" + w[0] \n f.append(fts)\n \n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100, do_normalization = True, verbose = verbose)\n return X_trn, y_trn, X_tst\n\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, \"r\")\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(\",\")\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n \n\ndef readCsv(fname, skipFirst=True, delimiter = \",\"):\n reader = csv.reader(open(fname),delimiter=delimiter)\n \n rows = []\n count = 1\n for row in reader:\n if not skipFirst or count > 1: \n rows.append(row)\n count += 1\n return rows\n\ndef write_submission(x,filename):\n wtr = open(filename,\"w\")\n for i in range(len(x)):\n wtr.write(format(x[i],\"0.10f\"))\n wtr.write(\"\\n\")\n wtr.close()\n\ndef run(verbose = True):\n t0 = time()\n\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n \n train_data = train_data + train2_data\n # print(train_data)\n labels = array([int(x[0]) for x in train_data])\n # print(labels) \n train = [x[2] for x in train_data]\n\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data] \n \n data = train + test_data\n\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose)\n \n\"\"\"\n X_train1, y_train, X_test1 = ngrams(data, labels, ntrain, 1, 1, 2000, do_normalization = True, verbose = verbose)\n \n X_train2, y_train, X_test2 = ngrams(data, labels, ntrain, 2, 2, 4000, do_normalization = True, verbose = verbose)\n X_train3, y_train, X_test3 = ngrams(data, labels, ntrain, 3, 3, 100, do_normalization = True, verbose = verbose) \n X_train4, y_train, X_test4 = ngrams(data, labels, ntrain, 4, 4, 1000, do_normalization = True, verbose = verbose, analyzer_char = True) \n X_train5, y_train, X_test5 = ngrams(data, labels, ntrain, 5, 5, 1000, do_normalization = True, verbose = verbose, analyzer_char = True) \n X_train6, y_train, X_test6 = ngrams(data, labels, ntrain, 3, 3, 2000, do_normalization = True, verbose = verbose, analyzer_char = True) \n\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose)\n X_train8, y_train, X_test8 = skipGrams(data, labels, ntrain, verbose = verbose)\n\n X_tn = sp.hstack([X_train1, X_train2, X_train3, X_train4, X_train5, X_train6, X_train7, X_train8])\n X_tt = sp.hstack([X_test1, X_test2, X_test3, X_test4, X_test5, X_test6, X_test7, X_test8])\n \n if verbose:\n print \"######## Total time for feature extraction: %fs\" % (time() - t0), X_tn.shape, X_tt.shape\n \n predictions = runClassifiers(X_tn, labels, X_tt)\n \n write_submission(predictions, PREDICTION_FILE) \n print \"Predictions written to:\", PREDICTION_FILE\n\"\"\"\n\nrun()\n#some code for n grams (use tdifvectorizer)\n\n\n\n\n\n", "step-ids": [ 7, 8, 9, 10, 12 ] }
[ 7, 8, 9, 10, 12 ]
<|reserved_special_token_0|> class ListVolumeType(command.Lister): <|reserved_special_token_0|> <|reserved_special_token_0|> class ShowVolumeType(command.ShowOne): def get_parser(self, prog_name): parser = super(ShowVolumeType, self).get_parser(prog_name) parser.add_argument('volume_type', metavar='VOLUME_TYPE_ID', help= 'volume type to display (ID)') return parser def take_action(self, parsed_args): storage_client = self.app.client_manager.storage try: volume_type = storage_client.volume_types.get(parsed_args. volume_type) printout = volume_type._info for key, value in printout.get('extra_specs').items(): printout[key] = copy.copy(value) del printout['extra_specs'] except exceptions.ClientException as clientexp: printout = {'message': clientexp.message, 'details': clientexp. details, 'code': clientexp.code} return zip(*sorted(six.iteritems(printout))) <|reserved_special_token_1|> <|reserved_special_token_0|> class ListVolumeType(command.Lister): def get_parser(self, prog_name): parser = super(ListVolumeType, self).get_parser(prog_name) parser.add_argument('--name', metavar='<string>', help= 'Filter results by virtual storage name') return parser <|reserved_special_token_0|> class ShowVolumeType(command.ShowOne): def get_parser(self, prog_name): parser = super(ShowVolumeType, self).get_parser(prog_name) parser.add_argument('volume_type', metavar='VOLUME_TYPE_ID', help= 'volume type to display (ID)') return parser def take_action(self, parsed_args): storage_client = self.app.client_manager.storage try: volume_type = storage_client.volume_types.get(parsed_args. volume_type) printout = volume_type._info for key, value in printout.get('extra_specs').items(): printout[key] = copy.copy(value) del printout['extra_specs'] except exceptions.ClientException as clientexp: printout = {'message': clientexp.message, 'details': clientexp. details, 'code': clientexp.code} return zip(*sorted(six.iteritems(printout))) <|reserved_special_token_1|> <|reserved_special_token_0|> class ListVolumeType(command.Lister): def get_parser(self, prog_name): parser = super(ListVolumeType, self).get_parser(prog_name) parser.add_argument('--name', metavar='<string>', help= 'Filter results by virtual storage name') return parser def take_action(self, parsed_args): storage_client = self.app.client_manager.storage search_opts = {'display_name': parsed_args.name} columns = ['ID', 'Name', 'available_volume_size', 'available_volume_throughput', 'available_iops_per_gb'] column_headers = copy.deepcopy(columns) data = storage_client.volume_types.list(search_opts=search_opts) if parsed_args.name is not None: data = utils.filter_list_with_property(data, 'name', parsed_args.name) for vtype in data: for key, value in vtype.extra_specs.items(): setattr(vtype, key, value) return column_headers, (utils.get_item_properties(s, columns) for s in data) class ShowVolumeType(command.ShowOne): def get_parser(self, prog_name): parser = super(ShowVolumeType, self).get_parser(prog_name) parser.add_argument('volume_type', metavar='VOLUME_TYPE_ID', help= 'volume type to display (ID)') return parser def take_action(self, parsed_args): storage_client = self.app.client_manager.storage try: volume_type = storage_client.volume_types.get(parsed_args. volume_type) printout = volume_type._info for key, value in printout.get('extra_specs').items(): printout[key] = copy.copy(value) del printout['extra_specs'] except exceptions.ClientException as clientexp: printout = {'message': clientexp.message, 'details': clientexp. details, 'code': clientexp.code} return zip(*sorted(six.iteritems(printout))) <|reserved_special_token_1|> import copy import six from eclcli.common import command from eclcli.common import utils from eclcli.storage.storageclient import exceptions class ListVolumeType(command.Lister): def get_parser(self, prog_name): parser = super(ListVolumeType, self).get_parser(prog_name) parser.add_argument('--name', metavar='<string>', help= 'Filter results by virtual storage name') return parser def take_action(self, parsed_args): storage_client = self.app.client_manager.storage search_opts = {'display_name': parsed_args.name} columns = ['ID', 'Name', 'available_volume_size', 'available_volume_throughput', 'available_iops_per_gb'] column_headers = copy.deepcopy(columns) data = storage_client.volume_types.list(search_opts=search_opts) if parsed_args.name is not None: data = utils.filter_list_with_property(data, 'name', parsed_args.name) for vtype in data: for key, value in vtype.extra_specs.items(): setattr(vtype, key, value) return column_headers, (utils.get_item_properties(s, columns) for s in data) class ShowVolumeType(command.ShowOne): def get_parser(self, prog_name): parser = super(ShowVolumeType, self).get_parser(prog_name) parser.add_argument('volume_type', metavar='VOLUME_TYPE_ID', help= 'volume type to display (ID)') return parser def take_action(self, parsed_args): storage_client = self.app.client_manager.storage try: volume_type = storage_client.volume_types.get(parsed_args. volume_type) printout = volume_type._info for key, value in printout.get('extra_specs').items(): printout[key] = copy.copy(value) del printout['extra_specs'] except exceptions.ClientException as clientexp: printout = {'message': clientexp.message, 'details': clientexp. details, 'code': clientexp.code} return zip(*sorted(six.iteritems(printout))) <|reserved_special_token_1|> import copy import six from eclcli.common import command from eclcli.common import utils from eclcli.storage.storageclient import exceptions class ListVolumeType(command.Lister): def get_parser(self, prog_name): parser = super(ListVolumeType, self).get_parser(prog_name) parser.add_argument( "--name", metavar="<string>", help="Filter results by virtual storage name") return parser def take_action(self, parsed_args): storage_client = self.app.client_manager.storage search_opts = { 'display_name': parsed_args.name, } columns = ['ID', 'Name', 'available_volume_size', 'available_volume_throughput', 'available_iops_per_gb'] column_headers = copy.deepcopy(columns) data = storage_client.volume_types.list(search_opts=search_opts) if parsed_args.name is not None: data = utils.filter_list_with_property(data, "name", parsed_args.name) for vtype in data: for key, value in vtype.extra_specs.items(): setattr(vtype, key, value) return (column_headers, (utils.get_item_properties( s, columns, ) for s in data)) class ShowVolumeType(command.ShowOne): def get_parser(self, prog_name): parser = super(ShowVolumeType, self).get_parser(prog_name) parser.add_argument( "volume_type", metavar="VOLUME_TYPE_ID", help="volume type to display (ID)") return parser def take_action(self, parsed_args): storage_client = self.app.client_manager.storage try: volume_type = storage_client.volume_types.get(parsed_args.volume_type) printout = volume_type._info for key, value in printout.get("extra_specs").items(): printout[key] = copy.copy(value) del printout["extra_specs"] except exceptions.ClientException as clientexp: printout = {"message": clientexp.message, "details": clientexp.details, "code": clientexp.code} return zip(*sorted(six.iteritems(printout)))
flexible
{ "blob_id": "c73bea686786a30f298500968cfd01e2d5125d75", "index": 4013, "step-1": "<mask token>\n\n\nclass ListVolumeType(command.Lister):\n <mask token>\n <mask token>\n\n\nclass ShowVolumeType(command.ShowOne):\n\n def get_parser(self, prog_name):\n parser = super(ShowVolumeType, self).get_parser(prog_name)\n parser.add_argument('volume_type', metavar='VOLUME_TYPE_ID', help=\n 'volume type to display (ID)')\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n try:\n volume_type = storage_client.volume_types.get(parsed_args.\n volume_type)\n printout = volume_type._info\n for key, value in printout.get('extra_specs').items():\n printout[key] = copy.copy(value)\n del printout['extra_specs']\n except exceptions.ClientException as clientexp:\n printout = {'message': clientexp.message, 'details': clientexp.\n details, 'code': clientexp.code}\n return zip(*sorted(six.iteritems(printout)))\n", "step-2": "<mask token>\n\n\nclass ListVolumeType(command.Lister):\n\n def get_parser(self, prog_name):\n parser = super(ListVolumeType, self).get_parser(prog_name)\n parser.add_argument('--name', metavar='<string>', help=\n 'Filter results by virtual storage name')\n return parser\n <mask token>\n\n\nclass ShowVolumeType(command.ShowOne):\n\n def get_parser(self, prog_name):\n parser = super(ShowVolumeType, self).get_parser(prog_name)\n parser.add_argument('volume_type', metavar='VOLUME_TYPE_ID', help=\n 'volume type to display (ID)')\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n try:\n volume_type = storage_client.volume_types.get(parsed_args.\n volume_type)\n printout = volume_type._info\n for key, value in printout.get('extra_specs').items():\n printout[key] = copy.copy(value)\n del printout['extra_specs']\n except exceptions.ClientException as clientexp:\n printout = {'message': clientexp.message, 'details': clientexp.\n details, 'code': clientexp.code}\n return zip(*sorted(six.iteritems(printout)))\n", "step-3": "<mask token>\n\n\nclass ListVolumeType(command.Lister):\n\n def get_parser(self, prog_name):\n parser = super(ListVolumeType, self).get_parser(prog_name)\n parser.add_argument('--name', metavar='<string>', help=\n 'Filter results by virtual storage name')\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n search_opts = {'display_name': parsed_args.name}\n columns = ['ID', 'Name', 'available_volume_size',\n 'available_volume_throughput', 'available_iops_per_gb']\n column_headers = copy.deepcopy(columns)\n data = storage_client.volume_types.list(search_opts=search_opts)\n if parsed_args.name is not None:\n data = utils.filter_list_with_property(data, 'name',\n parsed_args.name)\n for vtype in data:\n for key, value in vtype.extra_specs.items():\n setattr(vtype, key, value)\n return column_headers, (utils.get_item_properties(s, columns) for s in\n data)\n\n\nclass ShowVolumeType(command.ShowOne):\n\n def get_parser(self, prog_name):\n parser = super(ShowVolumeType, self).get_parser(prog_name)\n parser.add_argument('volume_type', metavar='VOLUME_TYPE_ID', help=\n 'volume type to display (ID)')\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n try:\n volume_type = storage_client.volume_types.get(parsed_args.\n volume_type)\n printout = volume_type._info\n for key, value in printout.get('extra_specs').items():\n printout[key] = copy.copy(value)\n del printout['extra_specs']\n except exceptions.ClientException as clientexp:\n printout = {'message': clientexp.message, 'details': clientexp.\n details, 'code': clientexp.code}\n return zip(*sorted(six.iteritems(printout)))\n", "step-4": "import copy\nimport six\nfrom eclcli.common import command\nfrom eclcli.common import utils\nfrom eclcli.storage.storageclient import exceptions\n\n\nclass ListVolumeType(command.Lister):\n\n def get_parser(self, prog_name):\n parser = super(ListVolumeType, self).get_parser(prog_name)\n parser.add_argument('--name', metavar='<string>', help=\n 'Filter results by virtual storage name')\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n search_opts = {'display_name': parsed_args.name}\n columns = ['ID', 'Name', 'available_volume_size',\n 'available_volume_throughput', 'available_iops_per_gb']\n column_headers = copy.deepcopy(columns)\n data = storage_client.volume_types.list(search_opts=search_opts)\n if parsed_args.name is not None:\n data = utils.filter_list_with_property(data, 'name',\n parsed_args.name)\n for vtype in data:\n for key, value in vtype.extra_specs.items():\n setattr(vtype, key, value)\n return column_headers, (utils.get_item_properties(s, columns) for s in\n data)\n\n\nclass ShowVolumeType(command.ShowOne):\n\n def get_parser(self, prog_name):\n parser = super(ShowVolumeType, self).get_parser(prog_name)\n parser.add_argument('volume_type', metavar='VOLUME_TYPE_ID', help=\n 'volume type to display (ID)')\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n try:\n volume_type = storage_client.volume_types.get(parsed_args.\n volume_type)\n printout = volume_type._info\n for key, value in printout.get('extra_specs').items():\n printout[key] = copy.copy(value)\n del printout['extra_specs']\n except exceptions.ClientException as clientexp:\n printout = {'message': clientexp.message, 'details': clientexp.\n details, 'code': clientexp.code}\n return zip(*sorted(six.iteritems(printout)))\n", "step-5": "import copy\n\nimport six\n\nfrom eclcli.common import command\nfrom eclcli.common import utils\nfrom eclcli.storage.storageclient import exceptions\n\n\nclass ListVolumeType(command.Lister):\n\n def get_parser(self, prog_name):\n parser = super(ListVolumeType, self).get_parser(prog_name)\n parser.add_argument(\n \"--name\",\n metavar=\"<string>\",\n help=\"Filter results by virtual storage name\")\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n\n search_opts = {\n 'display_name': parsed_args.name,\n }\n\n columns = ['ID', 'Name', 'available_volume_size',\n 'available_volume_throughput',\n 'available_iops_per_gb']\n column_headers = copy.deepcopy(columns)\n\n data = storage_client.volume_types.list(search_opts=search_opts)\n\n if parsed_args.name is not None:\n data = utils.filter_list_with_property(data, \"name\", parsed_args.name)\n\n for vtype in data:\n for key, value in vtype.extra_specs.items():\n setattr(vtype, key, value)\n\n return (column_headers,\n (utils.get_item_properties(\n s, columns,\n ) for s in data))\n\n\nclass ShowVolumeType(command.ShowOne):\n\n def get_parser(self, prog_name):\n parser = super(ShowVolumeType, self).get_parser(prog_name)\n parser.add_argument(\n \"volume_type\",\n metavar=\"VOLUME_TYPE_ID\",\n help=\"volume type to display (ID)\")\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n try:\n volume_type = storage_client.volume_types.get(parsed_args.volume_type)\n printout = volume_type._info\n for key, value in printout.get(\"extra_specs\").items():\n printout[key] = copy.copy(value)\n del printout[\"extra_specs\"]\n except exceptions.ClientException as clientexp:\n printout = {\"message\": clientexp.message,\n \"details\": clientexp.details,\n \"code\": clientexp.code}\n return zip(*sorted(six.iteritems(printout)))\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
import os import shutil import json from django.shortcuts import render, HttpResponse from django.utils.encoding import escape_uri_path from django.db import transaction from web_pan.settings import files_folder from disk import models # Create your views here. def logined(func): def wrapper(request, *args, **kwargs): session = request.session.get('user') if not session: return render(request, 'login.html') else: return func(request, *args, **kwargs) return wrapper def api_check(func): def wrapper(request, *args, **kwargs): session = request.session.get('user') if not session: res = dict( state_code=-3, error_msg="登陆过期" ) return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json') else: return func(request, *args, **kwargs) return wrapper def login(request): if request.method == 'GET': if request.session.get('user'): return render(request, 'index.html') return render(request, 'login.html') else: req = json.loads(request.body) user = req.get('username') pwd = req.get('pwd') obj_user = models.Users.objects.filter(user_name=user).all() if not obj_user: res = dict( state_code=1, error_msg="用户不存在" ) else: password = obj_user.first().password if str(pwd) != str(password): res = dict( state_code=2, error_msg="密码错误" ) else: request.session['user'] = user request.session.set_expiry(60*60*4) res = dict( state_code=0, error_msg="密码错误" ) return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json') def logout(request): if request.session.get('user'): del request.session['user'] return render(request, 'login.html') @logined def index(request): return render(request, 'index.html') @api_check def get_dir_list(request): user = request.session.get('user') obj_dir = models.Dirs.objects.filter(user_name=user).all() dir_list = [] for dirs in obj_dir: user_dir = dirs.dir dir_list.append(user_dir) res = dict( state_code=0, error_msg='ok', data={ "dir_list": dir_list } ) return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json') @api_check def user_mkdir(request): req = json.loads(request.body) dir_name = req.get('dir_name') if not dir_name: res = dict( state_code=-2, error_msg='参数错误' ) return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json') dir_path = os.path.join(files_folder, dir_name) if os.path.exists(dir_path): res = dict( state_code=1, error_msg="该目录已被使用" ) else: user = request.session.get('user') if user: models.Dirs.objects.create( user_name=user, dir=dir_name ) os.mkdir(dir_path) res = dict( state_code=0, error_msg='ok' ) else: res = dict( state_code=-3, error_msg="登陆过期" ) return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json') @api_check def del_dir(request): req = json.loads(request.body) dir_name = req.get('dir_name') if not dir_name: res = dict( state_code=-2, error_msg='参数错误' ) return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json') dir_path = os.path.join(files_folder, dir_name) if not os.path.exists(dir_path): res = dict( state_code=1, error_msg='目录不存在' ) else: with transaction.atomic(): obj_dir = models.Dirs.objects.filter(dir=dir_name).all() if obj_dir: obj_dir.delete() shutil.rmtree(dir_path) res = dict( state_code=0, eror_msg='ok' ) return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json') @api_check def upload_file(request): dir_name = request.POST.get('dir_name') if not dir_name: res = dict( state_code=-2, error_msg='参数错误' ) return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json') dir_path = os.path.join(files_folder, dir_name) if not os.path.exists(dir_path): res = dict( state_code=1, error_msg='目录不存在' ) return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json') # 获取上传的文件,如果没有文件,则默认为None; File = request.FILES.get("file", None) if File is None: res = dict( state_code=-2, error_msg='参数错误' ) return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json') file_name = File.name file_path = os.path.join(dir_path, file_name) # 打开特定的文件进行二进制的写操作; with open(file_path, 'wb+') as f: # 分块写入文件; for chunk in File.chunks(): f.write(chunk) res = dict( state_code=0, error_msg='ok', ) return HttpResponse(json.dumps(res), content_type='application/json') @api_check def query_file(request): req = json.loads(request.body) dir_name = req.get('dir_name') dir_path = os.path.join(files_folder, dir_name) cmd_info = os.popen("ls -l -h {}".format(dir_path)).read() file_list = cmd_info.split('\n')[1:-1] file_list_data = [] for file_info_cmd in file_list: file_info_list = file_info_cmd.split(' ') file_info = list(filter(None, file_info_list)) file = file_info[-1] file_size = file_info[4] name_type = file.rsplit('.', 1) if len(name_type) < 2: name_type.append('未知') file_name, file_type = name_type file_list_data.append({ 'file_name': file_name, 'file_type': file_type, 'file_size': file_size }) res = dict( state_code=0, error_msg='ok', data={ 'file_list': file_list_data } ) return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json') @api_check def del_file(request): req = json.loads(request.body) dir_name = req.get('dir_name') file_name = req.get('file_name') file_type = req.get('file_type') file = file_name + '.' + file_type if file_type != '未知' else file_name file_path = os.path.join(os.path.join(files_folder,dir_name),file) if not os.path.exists(file_path): res = dict( state_code=1, error_msg='文件不存在' ) else: os.remove(file_path) res = dict( state_code=0, error_msg='ok' ) return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json') @api_check def download_file(request): req = json.loads(request.body) dir_name = req.get('dir_name') file_name = req.get('file_name') file_type = req.get('file_type') file = file_name+'.'+file_type if file_type != '未知' else file_name file_path = os.path.join(os.path.join(files_folder,dir_name),file) if not os.path.exists(file_path): res = dict( state_code=1, error_msg='文件不存在' ) return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json') from django.http import StreamingHttpResponse file_size = os.path.getsize(file_path) def file_iterator(file_name, chunk_size=512): # 用于形成二进制数据 with open(file_name, 'rb') as f: while True: c = f.read(chunk_size) if c: yield c else: break the_file_name = file_path # 要下载的文件路径 res = file_iterator(the_file_name) response = StreamingHttpResponse(res) # 这里创建返回 response['Content-Type'] = 'application/octet-stream; charset=UTF-8' # 注意格式 response['Content-Length'] = file_size response['Content-Disposition'] = 'attachment;filename="{}"'.format(escape_uri_path(file)) # 注意filename 这个是下载后的名字 return response
normal
{ "blob_id": "eeb87891d1a02484a61537745ec6f13387017929", "index": 705, "step-1": "<mask token>\n\n\ndef logined(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n return render(request, 'login.html')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef api_check(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n res = dict(state_code=-3, error_msg='登陆过期')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef login(request):\n if request.method == 'GET':\n if request.session.get('user'):\n return render(request, 'index.html')\n return render(request, 'login.html')\n else:\n req = json.loads(request.body)\n user = req.get('username')\n pwd = req.get('pwd')\n obj_user = models.Users.objects.filter(user_name=user).all()\n if not obj_user:\n res = dict(state_code=1, error_msg='用户不存在')\n else:\n password = obj_user.first().password\n if str(pwd) != str(password):\n res = dict(state_code=2, error_msg='密码错误')\n else:\n request.session['user'] = user\n request.session.set_expiry(60 * 60 * 4)\n res = dict(state_code=0, error_msg='密码错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n\n\ndef logout(request):\n if request.session.get('user'):\n del request.session['user']\n return render(request, 'login.html')\n\n\n@logined\ndef index(request):\n return render(request, 'index.html')\n\n\n@api_check\ndef get_dir_list(request):\n user = request.session.get('user')\n obj_dir = models.Dirs.objects.filter(user_name=user).all()\n dir_list = []\n for dirs in obj_dir:\n user_dir = dirs.dir\n dir_list.append(user_dir)\n res = dict(state_code=0, error_msg='ok', data={'dir_list': dir_list})\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n<mask token>\n\n\n@api_check\ndef upload_file(request):\n dir_name = request.POST.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='目录不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n File = request.FILES.get('file', None)\n if File is None:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n file_name = File.name\n file_path = os.path.join(dir_path, file_name)\n with open(file_path, 'wb+') as f:\n for chunk in File.chunks():\n f.write(chunk)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res), content_type='application/json')\n\n\n@api_check\ndef query_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n dir_path = os.path.join(files_folder, dir_name)\n cmd_info = os.popen('ls -l -h {}'.format(dir_path)).read()\n file_list = cmd_info.split('\\n')[1:-1]\n file_list_data = []\n for file_info_cmd in file_list:\n file_info_list = file_info_cmd.split(' ')\n file_info = list(filter(None, file_info_list))\n file = file_info[-1]\n file_size = file_info[4]\n name_type = file.rsplit('.', 1)\n if len(name_type) < 2:\n name_type.append('未知')\n file_name, file_type = name_type\n file_list_data.append({'file_name': file_name, 'file_type':\n file_type, 'file_size': file_size})\n res = dict(state_code=0, error_msg='ok', data={'file_list': file_list_data}\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef del_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n else:\n os.remove(file_path)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef logined(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n return render(request, 'login.html')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef api_check(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n res = dict(state_code=-3, error_msg='登陆过期')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef login(request):\n if request.method == 'GET':\n if request.session.get('user'):\n return render(request, 'index.html')\n return render(request, 'login.html')\n else:\n req = json.loads(request.body)\n user = req.get('username')\n pwd = req.get('pwd')\n obj_user = models.Users.objects.filter(user_name=user).all()\n if not obj_user:\n res = dict(state_code=1, error_msg='用户不存在')\n else:\n password = obj_user.first().password\n if str(pwd) != str(password):\n res = dict(state_code=2, error_msg='密码错误')\n else:\n request.session['user'] = user\n request.session.set_expiry(60 * 60 * 4)\n res = dict(state_code=0, error_msg='密码错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n\n\ndef logout(request):\n if request.session.get('user'):\n del request.session['user']\n return render(request, 'login.html')\n\n\n@logined\ndef index(request):\n return render(request, 'index.html')\n\n\n@api_check\ndef get_dir_list(request):\n user = request.session.get('user')\n obj_dir = models.Dirs.objects.filter(user_name=user).all()\n dir_list = []\n for dirs in obj_dir:\n user_dir = dirs.dir\n dir_list.append(user_dir)\n res = dict(state_code=0, error_msg='ok', data={'dir_list': dir_list})\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n<mask token>\n\n\n@api_check\ndef upload_file(request):\n dir_name = request.POST.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='目录不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n File = request.FILES.get('file', None)\n if File is None:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n file_name = File.name\n file_path = os.path.join(dir_path, file_name)\n with open(file_path, 'wb+') as f:\n for chunk in File.chunks():\n f.write(chunk)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res), content_type='application/json')\n\n\n@api_check\ndef query_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n dir_path = os.path.join(files_folder, dir_name)\n cmd_info = os.popen('ls -l -h {}'.format(dir_path)).read()\n file_list = cmd_info.split('\\n')[1:-1]\n file_list_data = []\n for file_info_cmd in file_list:\n file_info_list = file_info_cmd.split(' ')\n file_info = list(filter(None, file_info_list))\n file = file_info[-1]\n file_size = file_info[4]\n name_type = file.rsplit('.', 1)\n if len(name_type) < 2:\n name_type.append('未知')\n file_name, file_type = name_type\n file_list_data.append({'file_name': file_name, 'file_type':\n file_type, 'file_size': file_size})\n res = dict(state_code=0, error_msg='ok', data={'file_list': file_list_data}\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef del_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n else:\n os.remove(file_path)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef download_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n from django.http import StreamingHttpResponse\n file_size = os.path.getsize(file_path)\n\n def file_iterator(file_name, chunk_size=512):\n with open(file_name, 'rb') as f:\n while True:\n c = f.read(chunk_size)\n if c:\n yield c\n else:\n break\n the_file_name = file_path\n res = file_iterator(the_file_name)\n response = StreamingHttpResponse(res)\n response['Content-Type'] = 'application/octet-stream; charset=UTF-8'\n response['Content-Length'] = file_size\n response['Content-Disposition'] = 'attachment;filename=\"{}\"'.format(\n escape_uri_path(file))\n return response\n", "step-3": "<mask token>\n\n\ndef logined(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n return render(request, 'login.html')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef api_check(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n res = dict(state_code=-3, error_msg='登陆过期')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef login(request):\n if request.method == 'GET':\n if request.session.get('user'):\n return render(request, 'index.html')\n return render(request, 'login.html')\n else:\n req = json.loads(request.body)\n user = req.get('username')\n pwd = req.get('pwd')\n obj_user = models.Users.objects.filter(user_name=user).all()\n if not obj_user:\n res = dict(state_code=1, error_msg='用户不存在')\n else:\n password = obj_user.first().password\n if str(pwd) != str(password):\n res = dict(state_code=2, error_msg='密码错误')\n else:\n request.session['user'] = user\n request.session.set_expiry(60 * 60 * 4)\n res = dict(state_code=0, error_msg='密码错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n\n\ndef logout(request):\n if request.session.get('user'):\n del request.session['user']\n return render(request, 'login.html')\n\n\n@logined\ndef index(request):\n return render(request, 'index.html')\n\n\n@api_check\ndef get_dir_list(request):\n user = request.session.get('user')\n obj_dir = models.Dirs.objects.filter(user_name=user).all()\n dir_list = []\n for dirs in obj_dir:\n user_dir = dirs.dir\n dir_list.append(user_dir)\n res = dict(state_code=0, error_msg='ok', data={'dir_list': dir_list})\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef user_mkdir(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='该目录已被使用')\n else:\n user = request.session.get('user')\n if user:\n models.Dirs.objects.create(user_name=user, dir=dir_name)\n os.mkdir(dir_path)\n res = dict(state_code=0, error_msg='ok')\n else:\n res = dict(state_code=-3, error_msg='登陆过期')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef del_dir(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='目录不存在')\n else:\n with transaction.atomic():\n obj_dir = models.Dirs.objects.filter(dir=dir_name).all()\n if obj_dir:\n obj_dir.delete()\n shutil.rmtree(dir_path)\n res = dict(state_code=0, eror_msg='ok')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef upload_file(request):\n dir_name = request.POST.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='目录不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n File = request.FILES.get('file', None)\n if File is None:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n file_name = File.name\n file_path = os.path.join(dir_path, file_name)\n with open(file_path, 'wb+') as f:\n for chunk in File.chunks():\n f.write(chunk)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res), content_type='application/json')\n\n\n@api_check\ndef query_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n dir_path = os.path.join(files_folder, dir_name)\n cmd_info = os.popen('ls -l -h {}'.format(dir_path)).read()\n file_list = cmd_info.split('\\n')[1:-1]\n file_list_data = []\n for file_info_cmd in file_list:\n file_info_list = file_info_cmd.split(' ')\n file_info = list(filter(None, file_info_list))\n file = file_info[-1]\n file_size = file_info[4]\n name_type = file.rsplit('.', 1)\n if len(name_type) < 2:\n name_type.append('未知')\n file_name, file_type = name_type\n file_list_data.append({'file_name': file_name, 'file_type':\n file_type, 'file_size': file_size})\n res = dict(state_code=0, error_msg='ok', data={'file_list': file_list_data}\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef del_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n else:\n os.remove(file_path)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef download_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n from django.http import StreamingHttpResponse\n file_size = os.path.getsize(file_path)\n\n def file_iterator(file_name, chunk_size=512):\n with open(file_name, 'rb') as f:\n while True:\n c = f.read(chunk_size)\n if c:\n yield c\n else:\n break\n the_file_name = file_path\n res = file_iterator(the_file_name)\n response = StreamingHttpResponse(res)\n response['Content-Type'] = 'application/octet-stream; charset=UTF-8'\n response['Content-Length'] = file_size\n response['Content-Disposition'] = 'attachment;filename=\"{}\"'.format(\n escape_uri_path(file))\n return response\n", "step-4": "import os\nimport shutil\nimport json\nfrom django.shortcuts import render, HttpResponse\nfrom django.utils.encoding import escape_uri_path\nfrom django.db import transaction\nfrom web_pan.settings import files_folder\nfrom disk import models\n\n\ndef logined(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n return render(request, 'login.html')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef api_check(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n res = dict(state_code=-3, error_msg='登陆过期')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef login(request):\n if request.method == 'GET':\n if request.session.get('user'):\n return render(request, 'index.html')\n return render(request, 'login.html')\n else:\n req = json.loads(request.body)\n user = req.get('username')\n pwd = req.get('pwd')\n obj_user = models.Users.objects.filter(user_name=user).all()\n if not obj_user:\n res = dict(state_code=1, error_msg='用户不存在')\n else:\n password = obj_user.first().password\n if str(pwd) != str(password):\n res = dict(state_code=2, error_msg='密码错误')\n else:\n request.session['user'] = user\n request.session.set_expiry(60 * 60 * 4)\n res = dict(state_code=0, error_msg='密码错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n\n\ndef logout(request):\n if request.session.get('user'):\n del request.session['user']\n return render(request, 'login.html')\n\n\n@logined\ndef index(request):\n return render(request, 'index.html')\n\n\n@api_check\ndef get_dir_list(request):\n user = request.session.get('user')\n obj_dir = models.Dirs.objects.filter(user_name=user).all()\n dir_list = []\n for dirs in obj_dir:\n user_dir = dirs.dir\n dir_list.append(user_dir)\n res = dict(state_code=0, error_msg='ok', data={'dir_list': dir_list})\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef user_mkdir(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='该目录已被使用')\n else:\n user = request.session.get('user')\n if user:\n models.Dirs.objects.create(user_name=user, dir=dir_name)\n os.mkdir(dir_path)\n res = dict(state_code=0, error_msg='ok')\n else:\n res = dict(state_code=-3, error_msg='登陆过期')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef del_dir(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='目录不存在')\n else:\n with transaction.atomic():\n obj_dir = models.Dirs.objects.filter(dir=dir_name).all()\n if obj_dir:\n obj_dir.delete()\n shutil.rmtree(dir_path)\n res = dict(state_code=0, eror_msg='ok')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef upload_file(request):\n dir_name = request.POST.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='目录不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n File = request.FILES.get('file', None)\n if File is None:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n file_name = File.name\n file_path = os.path.join(dir_path, file_name)\n with open(file_path, 'wb+') as f:\n for chunk in File.chunks():\n f.write(chunk)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res), content_type='application/json')\n\n\n@api_check\ndef query_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n dir_path = os.path.join(files_folder, dir_name)\n cmd_info = os.popen('ls -l -h {}'.format(dir_path)).read()\n file_list = cmd_info.split('\\n')[1:-1]\n file_list_data = []\n for file_info_cmd in file_list:\n file_info_list = file_info_cmd.split(' ')\n file_info = list(filter(None, file_info_list))\n file = file_info[-1]\n file_size = file_info[4]\n name_type = file.rsplit('.', 1)\n if len(name_type) < 2:\n name_type.append('未知')\n file_name, file_type = name_type\n file_list_data.append({'file_name': file_name, 'file_type':\n file_type, 'file_size': file_size})\n res = dict(state_code=0, error_msg='ok', data={'file_list': file_list_data}\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef del_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n else:\n os.remove(file_path)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef download_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n from django.http import StreamingHttpResponse\n file_size = os.path.getsize(file_path)\n\n def file_iterator(file_name, chunk_size=512):\n with open(file_name, 'rb') as f:\n while True:\n c = f.read(chunk_size)\n if c:\n yield c\n else:\n break\n the_file_name = file_path\n res = file_iterator(the_file_name)\n response = StreamingHttpResponse(res)\n response['Content-Type'] = 'application/octet-stream; charset=UTF-8'\n response['Content-Length'] = file_size\n response['Content-Disposition'] = 'attachment;filename=\"{}\"'.format(\n escape_uri_path(file))\n return response\n", "step-5": "import os\nimport shutil\nimport json\nfrom django.shortcuts import render, HttpResponse\nfrom django.utils.encoding import escape_uri_path\nfrom django.db import transaction\nfrom web_pan.settings import files_folder\nfrom disk import models\n\n\n# Create your views here.\n\n\ndef logined(func):\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n return render(request, 'login.html')\n else:\n return func(request, *args, **kwargs)\n\n return wrapper\n\n\ndef api_check(func):\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n res = dict(\n state_code=-3,\n error_msg=\"登陆过期\"\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n else:\n return func(request, *args, **kwargs)\n\n return wrapper\n\n\ndef login(request):\n if request.method == 'GET':\n if request.session.get('user'):\n return render(request, 'index.html')\n return render(request, 'login.html')\n else:\n req = json.loads(request.body)\n user = req.get('username')\n pwd = req.get('pwd')\n obj_user = models.Users.objects.filter(user_name=user).all()\n if not obj_user:\n res = dict(\n state_code=1,\n error_msg=\"用户不存在\"\n )\n else:\n password = obj_user.first().password\n if str(pwd) != str(password):\n res = dict(\n state_code=2,\n error_msg=\"密码错误\"\n )\n else:\n request.session['user'] = user\n request.session.set_expiry(60*60*4)\n res = dict(\n state_code=0,\n error_msg=\"密码错误\"\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n\n\ndef logout(request):\n if request.session.get('user'):\n del request.session['user']\n return render(request, 'login.html')\n\n\n@logined\ndef index(request):\n return render(request, 'index.html')\n\n\n@api_check\ndef get_dir_list(request):\n user = request.session.get('user')\n obj_dir = models.Dirs.objects.filter(user_name=user).all()\n dir_list = []\n for dirs in obj_dir:\n user_dir = dirs.dir\n dir_list.append(user_dir)\n res = dict(\n state_code=0,\n error_msg='ok',\n data={\n \"dir_list\": dir_list\n }\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n\n\n@api_check\ndef user_mkdir(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n if not dir_name:\n res = dict(\n state_code=-2,\n error_msg='参数错误'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if os.path.exists(dir_path):\n res = dict(\n state_code=1,\n error_msg=\"该目录已被使用\"\n )\n else:\n user = request.session.get('user')\n if user:\n models.Dirs.objects.create(\n user_name=user,\n dir=dir_name\n )\n os.mkdir(dir_path)\n res = dict(\n state_code=0,\n error_msg='ok'\n )\n else:\n res = dict(\n state_code=-3,\n error_msg=\"登陆过期\"\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n\n\n@api_check\ndef del_dir(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n if not dir_name:\n res = dict(\n state_code=-2,\n error_msg='参数错误'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(\n state_code=1,\n error_msg='目录不存在'\n )\n else:\n with transaction.atomic():\n obj_dir = models.Dirs.objects.filter(dir=dir_name).all()\n if obj_dir:\n obj_dir.delete()\n shutil.rmtree(dir_path)\n res = dict(\n state_code=0,\n eror_msg='ok'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n\n\n@api_check\ndef upload_file(request):\n dir_name = request.POST.get('dir_name')\n if not dir_name:\n res = dict(\n state_code=-2,\n error_msg='参数错误'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(\n state_code=1,\n error_msg='目录不存在'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n # 获取上传的文件,如果没有文件,则默认为None;\n File = request.FILES.get(\"file\", None)\n if File is None:\n res = dict(\n state_code=-2,\n error_msg='参数错误'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n file_name = File.name\n file_path = os.path.join(dir_path, file_name)\n # 打开特定的文件进行二进制的写操作;\n with open(file_path, 'wb+') as f:\n # 分块写入文件;\n for chunk in File.chunks():\n f.write(chunk)\n res = dict(\n state_code=0,\n error_msg='ok',\n )\n return HttpResponse(json.dumps(res), content_type='application/json')\n\n\n@api_check\ndef query_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n dir_path = os.path.join(files_folder, dir_name)\n cmd_info = os.popen(\"ls -l -h {}\".format(dir_path)).read()\n file_list = cmd_info.split('\\n')[1:-1]\n file_list_data = []\n for file_info_cmd in file_list:\n file_info_list = file_info_cmd.split(' ')\n file_info = list(filter(None, file_info_list))\n file = file_info[-1]\n file_size = file_info[4]\n name_type = file.rsplit('.', 1)\n if len(name_type) < 2:\n name_type.append('未知')\n file_name, file_type = name_type\n file_list_data.append({\n 'file_name': file_name,\n 'file_type': file_type,\n 'file_size': file_size\n })\n res = dict(\n state_code=0,\n error_msg='ok',\n data={\n 'file_list': file_list_data\n }\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n\n\n@api_check\ndef del_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder,dir_name),file)\n if not os.path.exists(file_path):\n res = dict(\n state_code=1,\n error_msg='文件不存在'\n )\n else:\n os.remove(file_path)\n res = dict(\n state_code=0,\n error_msg='ok'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n\n\n@api_check\ndef download_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name+'.'+file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder,dir_name),file)\n if not os.path.exists(file_path):\n res = dict(\n state_code=1,\n error_msg='文件不存在'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n from django.http import StreamingHttpResponse\n file_size = os.path.getsize(file_path)\n def file_iterator(file_name, chunk_size=512): # 用于形成二进制数据\n with open(file_name, 'rb') as f:\n while True:\n c = f.read(chunk_size)\n if c:\n yield c\n else:\n break\n the_file_name = file_path # 要下载的文件路径\n res = file_iterator(the_file_name)\n response = StreamingHttpResponse(res) # 这里创建返回\n response['Content-Type'] = 'application/octet-stream; charset=UTF-8' # 注意格式\n response['Content-Length'] = file_size\n response['Content-Disposition'] = 'attachment;filename=\"{}\"'.format(escape_uri_path(file)) # 注意filename 这个是下载后的名字\n return response\n", "step-ids": [ 9, 10, 12, 13, 14 ] }
[ 9, 10, 12, 13, 14 ]
from functions.service_funcs.get_data import get_data_character def clean_room(update): char, db_sess = get_data_character(update, return_sess=True) # удаляем старую комнату и всю инфу о ней if char and char.room: if char.room.mobs: for mob in char.room.mobs: db_sess.delete(mob) if char.room.items: for item in char.room.items: db_sess.delete(item) db_sess.delete(char.room) db_sess.commit()
normal
{ "blob_id": "4d57fa22282d7b3f8adabedd7a04e32767181890", "index": 5693, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef clean_room(update):\n char, db_sess = get_data_character(update, return_sess=True)\n if char and char.room:\n if char.room.mobs:\n for mob in char.room.mobs:\n db_sess.delete(mob)\n if char.room.items:\n for item in char.room.items:\n db_sess.delete(item)\n db_sess.delete(char.room)\n db_sess.commit()\n", "step-3": "from functions.service_funcs.get_data import get_data_character\n\n\ndef clean_room(update):\n char, db_sess = get_data_character(update, return_sess=True)\n if char and char.room:\n if char.room.mobs:\n for mob in char.room.mobs:\n db_sess.delete(mob)\n if char.room.items:\n for item in char.room.items:\n db_sess.delete(item)\n db_sess.delete(char.room)\n db_sess.commit()\n", "step-4": "from functions.service_funcs.get_data import get_data_character\n\n\ndef clean_room(update):\n char, db_sess = get_data_character(update, return_sess=True)\n # удаляем старую комнату и всю инфу о ней\n if char and char.room:\n if char.room.mobs:\n for mob in char.room.mobs:\n db_sess.delete(mob)\n if char.room.items:\n for item in char.room.items:\n db_sess.delete(item)\n db_sess.delete(char.room)\n db_sess.commit()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class Comment(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def __str__(self): return self.text class Rating(models.Model): rating = models.PositiveIntegerField() profile = models.ForeignKey('accounts.Profile', related_name='ratings', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='ratings', on_delete= models.CASCADE) def __str__(self): return ( f'{self.profile.user.username} gave {self.paper.title} a rating of {self.rating}' ) class UserSavedPaper(models.Model): profile = models.ForeignKey('accounts.Profile', related_name= 'saved_papers', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='saved_papers', on_delete=models.CASCADE) comment = models.CharField(max_length=500, blank=True) def __str__(self): return ( f'user {self.profile.user.username} saved paper {self.paper.title} - comment: {self.comment}' ) <|reserved_special_token_1|> <|reserved_special_token_0|> class Comment(models.Model): text = models.CharField(max_length=500) time = models.DateTimeField(default=timezone.now) profile = models.ForeignKey('accounts.Profile', related_name='comments', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='comments', on_delete= models.CASCADE) def __str__(self): return self.text class Rating(models.Model): rating = models.PositiveIntegerField() profile = models.ForeignKey('accounts.Profile', related_name='ratings', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='ratings', on_delete= models.CASCADE) def __str__(self): return ( f'{self.profile.user.username} gave {self.paper.title} a rating of {self.rating}' ) class UserSavedPaper(models.Model): profile = models.ForeignKey('accounts.Profile', related_name= 'saved_papers', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='saved_papers', on_delete=models.CASCADE) comment = models.CharField(max_length=500, blank=True) def __str__(self): return ( f'user {self.profile.user.username} saved paper {self.paper.title} - comment: {self.comment}' ) <|reserved_special_token_1|> <|reserved_special_token_0|> class Paper(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> class Comment(models.Model): text = models.CharField(max_length=500) time = models.DateTimeField(default=timezone.now) profile = models.ForeignKey('accounts.Profile', related_name='comments', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='comments', on_delete= models.CASCADE) def __str__(self): return self.text class Rating(models.Model): rating = models.PositiveIntegerField() profile = models.ForeignKey('accounts.Profile', related_name='ratings', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='ratings', on_delete= models.CASCADE) def __str__(self): return ( f'{self.profile.user.username} gave {self.paper.title} a rating of {self.rating}' ) class UserSavedPaper(models.Model): profile = models.ForeignKey('accounts.Profile', related_name= 'saved_papers', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='saved_papers', on_delete=models.CASCADE) comment = models.CharField(max_length=500, blank=True) def __str__(self): return ( f'user {self.profile.user.username} saved paper {self.paper.title} - comment: {self.comment}' ) <|reserved_special_token_1|> from django.db import models from django.utils import timezone from django.contrib.auth.models import User <|reserved_special_token_0|> class Paper(models.Model): title = models.CharField(max_length=200) authors = models.CharField(max_length=200) abstract = models.CharField(max_length=2000, blank=True) journal = models.CharField(max_length=80, blank=True) date_published = models.DateField(blank=True) doi = models.CharField(max_length=32, blank=True) pdflink = models.CharField(max_length=80, blank=True) avg_rating = models.FloatField(default=0) num_ratings = models.PositiveIntegerField(default=0) commented_by_users = models.ManyToManyField('accounts.Profile', related_name='comments_made', through='Comment', blank=True) rated_by_users = models.ManyToManyField('accounts.Profile', related_name='ratings_given', through='Rating', blank=True) saved_by_users = models.ManyToManyField('accounts.Profile', related_name='papers_saved', through='UserSavedPaper', blank=True) def __str__(self): return self.title class Comment(models.Model): text = models.CharField(max_length=500) time = models.DateTimeField(default=timezone.now) profile = models.ForeignKey('accounts.Profile', related_name='comments', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='comments', on_delete= models.CASCADE) def __str__(self): return self.text class Rating(models.Model): rating = models.PositiveIntegerField() profile = models.ForeignKey('accounts.Profile', related_name='ratings', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='ratings', on_delete= models.CASCADE) def __str__(self): return ( f'{self.profile.user.username} gave {self.paper.title} a rating of {self.rating}' ) class UserSavedPaper(models.Model): profile = models.ForeignKey('accounts.Profile', related_name= 'saved_papers', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='saved_papers', on_delete=models.CASCADE) comment = models.CharField(max_length=500, blank=True) def __str__(self): return ( f'user {self.profile.user.username} saved paper {self.paper.title} - comment: {self.comment}' ) <|reserved_special_token_1|> from django.db import models from django.utils import timezone from django.contrib.auth.models import User """ Using the django shell: $ python manage.py shell from django.contrib.auth.models import User from accounts.models import Profile from papers.models import Paper, Comment, Rating, UserSavedPaper users = User.objects.all() profiles = Profile.objects.all() papers = Paper.objects.all() comments = Comment.objects.all() ratings = Rating.objects.all() usps = UserSavedPaper.objects.all() comments.create(text='this is an awesome paper!', profile=profiles[0], paper=papers[0]) """ # Reversing migrations # https://docs.djangoproject.com/en/3.0/topics/migrations/#reversing-migrations # - Ex) $ python manage.py migrate papers zero <- reverses all migrations for app "papers", see all migrations with "$ python manage.py showmigrations" # -> python manage.py makemigrations -> python manage.py migrate # https://docs.djangoproject.com/en/3.0/ref/models/fields/ class Paper(models.Model): title = models.CharField(max_length=200) # About data storage space when specifying a max_length: https://stackoverflow.com/questions/30663791/do-setting-the-max-length-to-a-very-large-value-consume-extra-space authors = models.CharField(max_length=200) abstract = models.CharField(max_length=2000, blank=True) journal = models.CharField(max_length=80, blank=True) date_published = models.DateField(blank=True) # https://www.django-rest-framework.org/api-guide/fields/#datefield doi = models.CharField(max_length=32, blank=True) pdflink = models.CharField(max_length=80, blank=True) avg_rating = models.FloatField(default=0) num_ratings = models.PositiveIntegerField(default=0) # Useful example for many-to-many in django: https://www.revsys.com/tidbits/tips-using-djangos-manytomanyfield/ # - TO DO: Get rid of these fields below in the Paper model? The Comment/Rating/UserSavedPaper tables can exist without them being here!? commented_by_users = models.ManyToManyField( 'accounts.Profile', related_name='comments_made', through='Comment', blank=True ) rated_by_users = models.ManyToManyField( 'accounts.Profile', related_name='ratings_given', through='Rating', blank=True ) saved_by_users = models.ManyToManyField( 'accounts.Profile', related_name="papers_saved", through='UserSavedPaper', blank=True ) def __str__(self): return self.title # Custom "through" models: https://docs.djangoproject.com/en/3.0/ref/models/fields/#django.db.models.ManyToManyField.through_fields class Comment(models.Model): text = models.CharField(max_length=500) #rating = models.PositiveIntegerField(blank=True) # should rating be given simultaneously with posting a comment? time = models.DateTimeField(default=timezone.now) # - TO DO: Look into and decide format for the timestamping profile = models.ForeignKey('accounts.Profile', related_name='comments', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='comments', on_delete=models.CASCADE) def __str__(self): return self.text # No support for composite primary key, e.g. (profile_id, paper_id) in django? https://stackoverflow.com/questions/15440593/tell-djangos-model-to-use-as-primary-key-a-set-of-foreign-keys # - https://code.djangoproject.com/wiki/MultipleColumnPrimaryKeys # - possible to enforce it using SQL commands, using something other than the Django ORM, e.g. SQLAlchemy) # - there are validators that can be used with a Serializer to enforce "unique together" - https://www.django-rest-framework.org/api-guide/validators/#uniquetogethervalidator class Rating(models.Model): rating = models.PositiveIntegerField() profile = models.ForeignKey('accounts.Profile', related_name='ratings', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='ratings', on_delete=models.CASCADE) def __str__(self): return f"{self.profile.user.username} gave {self.paper.title} a rating of {self.rating}" # class UserSavedPaper(models.Model): profile = models.ForeignKey('accounts.Profile', related_name='saved_papers', on_delete=models.CASCADE) paper = models.ForeignKey('Paper', related_name='saved_papers', on_delete=models.CASCADE) comment = models.CharField(max_length=500, blank=True) def __str__(self): return f"user {self.profile.user.username} saved paper {self.paper.title} - comment: {self.comment}"
flexible
{ "blob_id": "052574be3f4a46bceefc0a54b1fe268a7cef18a9", "index": 3061, "step-1": "<mask token>\n\n\nclass Comment(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.text\n\n\nclass Rating(models.Model):\n rating = models.PositiveIntegerField()\n profile = models.ForeignKey('accounts.Profile', related_name='ratings',\n on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='ratings', on_delete=\n models.CASCADE)\n\n def __str__(self):\n return (\n f'{self.profile.user.username} gave {self.paper.title} a rating of {self.rating}'\n )\n\n\nclass UserSavedPaper(models.Model):\n profile = models.ForeignKey('accounts.Profile', related_name=\n 'saved_papers', on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='saved_papers',\n on_delete=models.CASCADE)\n comment = models.CharField(max_length=500, blank=True)\n\n def __str__(self):\n return (\n f'user {self.profile.user.username} saved paper {self.paper.title} - comment: {self.comment}'\n )\n", "step-2": "<mask token>\n\n\nclass Comment(models.Model):\n text = models.CharField(max_length=500)\n time = models.DateTimeField(default=timezone.now)\n profile = models.ForeignKey('accounts.Profile', related_name='comments',\n on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='comments', on_delete=\n models.CASCADE)\n\n def __str__(self):\n return self.text\n\n\nclass Rating(models.Model):\n rating = models.PositiveIntegerField()\n profile = models.ForeignKey('accounts.Profile', related_name='ratings',\n on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='ratings', on_delete=\n models.CASCADE)\n\n def __str__(self):\n return (\n f'{self.profile.user.username} gave {self.paper.title} a rating of {self.rating}'\n )\n\n\nclass UserSavedPaper(models.Model):\n profile = models.ForeignKey('accounts.Profile', related_name=\n 'saved_papers', on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='saved_papers',\n on_delete=models.CASCADE)\n comment = models.CharField(max_length=500, blank=True)\n\n def __str__(self):\n return (\n f'user {self.profile.user.username} saved paper {self.paper.title} - comment: {self.comment}'\n )\n", "step-3": "<mask token>\n\n\nclass Paper(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Comment(models.Model):\n text = models.CharField(max_length=500)\n time = models.DateTimeField(default=timezone.now)\n profile = models.ForeignKey('accounts.Profile', related_name='comments',\n on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='comments', on_delete=\n models.CASCADE)\n\n def __str__(self):\n return self.text\n\n\nclass Rating(models.Model):\n rating = models.PositiveIntegerField()\n profile = models.ForeignKey('accounts.Profile', related_name='ratings',\n on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='ratings', on_delete=\n models.CASCADE)\n\n def __str__(self):\n return (\n f'{self.profile.user.username} gave {self.paper.title} a rating of {self.rating}'\n )\n\n\nclass UserSavedPaper(models.Model):\n profile = models.ForeignKey('accounts.Profile', related_name=\n 'saved_papers', on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='saved_papers',\n on_delete=models.CASCADE)\n comment = models.CharField(max_length=500, blank=True)\n\n def __str__(self):\n return (\n f'user {self.profile.user.username} saved paper {self.paper.title} - comment: {self.comment}'\n )\n", "step-4": "from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\n<mask token>\n\n\nclass Paper(models.Model):\n title = models.CharField(max_length=200)\n authors = models.CharField(max_length=200)\n abstract = models.CharField(max_length=2000, blank=True)\n journal = models.CharField(max_length=80, blank=True)\n date_published = models.DateField(blank=True)\n doi = models.CharField(max_length=32, blank=True)\n pdflink = models.CharField(max_length=80, blank=True)\n avg_rating = models.FloatField(default=0)\n num_ratings = models.PositiveIntegerField(default=0)\n commented_by_users = models.ManyToManyField('accounts.Profile',\n related_name='comments_made', through='Comment', blank=True)\n rated_by_users = models.ManyToManyField('accounts.Profile',\n related_name='ratings_given', through='Rating', blank=True)\n saved_by_users = models.ManyToManyField('accounts.Profile',\n related_name='papers_saved', through='UserSavedPaper', blank=True)\n\n def __str__(self):\n return self.title\n\n\nclass Comment(models.Model):\n text = models.CharField(max_length=500)\n time = models.DateTimeField(default=timezone.now)\n profile = models.ForeignKey('accounts.Profile', related_name='comments',\n on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='comments', on_delete=\n models.CASCADE)\n\n def __str__(self):\n return self.text\n\n\nclass Rating(models.Model):\n rating = models.PositiveIntegerField()\n profile = models.ForeignKey('accounts.Profile', related_name='ratings',\n on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='ratings', on_delete=\n models.CASCADE)\n\n def __str__(self):\n return (\n f'{self.profile.user.username} gave {self.paper.title} a rating of {self.rating}'\n )\n\n\nclass UserSavedPaper(models.Model):\n profile = models.ForeignKey('accounts.Profile', related_name=\n 'saved_papers', on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='saved_papers',\n on_delete=models.CASCADE)\n comment = models.CharField(max_length=500, blank=True)\n\n def __str__(self):\n return (\n f'user {self.profile.user.username} saved paper {self.paper.title} - comment: {self.comment}'\n )\n", "step-5": "from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\n\n\"\"\"\nUsing the django shell:\n$ python manage.py shell\n\nfrom django.contrib.auth.models import User\nfrom accounts.models import Profile\nfrom papers.models import Paper, Comment, Rating, UserSavedPaper\n\nusers = User.objects.all()\nprofiles = Profile.objects.all()\npapers = Paper.objects.all()\ncomments = Comment.objects.all()\nratings = Rating.objects.all()\nusps = UserSavedPaper.objects.all()\n\ncomments.create(text='this is an awesome paper!', profile=profiles[0], paper=papers[0])\n\"\"\"\n\n# Reversing migrations\n# https://docs.djangoproject.com/en/3.0/topics/migrations/#reversing-migrations\n# - Ex) $ python manage.py migrate papers zero <- reverses all migrations for app \"papers\", see all migrations with \"$ python manage.py showmigrations\"\n# -> python manage.py makemigrations -> python manage.py migrate\n\n\n# https://docs.djangoproject.com/en/3.0/ref/models/fields/\nclass Paper(models.Model):\n title = models.CharField(max_length=200) # About data storage space when specifying a max_length: https://stackoverflow.com/questions/30663791/do-setting-the-max-length-to-a-very-large-value-consume-extra-space\n authors = models.CharField(max_length=200)\n abstract = models.CharField(max_length=2000, blank=True)\n journal = models.CharField(max_length=80, blank=True) \n date_published = models.DateField(blank=True) # https://www.django-rest-framework.org/api-guide/fields/#datefield\n doi = models.CharField(max_length=32, blank=True)\n pdflink = models.CharField(max_length=80, blank=True)\n avg_rating = models.FloatField(default=0)\n num_ratings = models.PositiveIntegerField(default=0)\n\n # Useful example for many-to-many in django: https://www.revsys.com/tidbits/tips-using-djangos-manytomanyfield/\n # - TO DO: Get rid of these fields below in the Paper model? The Comment/Rating/UserSavedPaper tables can exist without them being here!?\n commented_by_users = models.ManyToManyField(\n 'accounts.Profile',\n related_name='comments_made',\n through='Comment',\n blank=True\n )\n\n rated_by_users = models.ManyToManyField(\n 'accounts.Profile',\n related_name='ratings_given',\n through='Rating',\n blank=True\n )\n \n saved_by_users = models.ManyToManyField(\n 'accounts.Profile',\n related_name=\"papers_saved\",\n through='UserSavedPaper',\n blank=True\n )\n\n def __str__(self):\n return self.title\n\n\n# Custom \"through\" models: https://docs.djangoproject.com/en/3.0/ref/models/fields/#django.db.models.ManyToManyField.through_fields\nclass Comment(models.Model):\n text = models.CharField(max_length=500)\n #rating = models.PositiveIntegerField(blank=True) # should rating be given simultaneously with posting a comment?\n time = models.DateTimeField(default=timezone.now) # - TO DO: Look into and decide format for the timestamping\n profile = models.ForeignKey('accounts.Profile', related_name='comments', on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='comments', on_delete=models.CASCADE)\n\n def __str__(self):\n return self.text\n\n\n# No support for composite primary key, e.g. (profile_id, paper_id) in django? https://stackoverflow.com/questions/15440593/tell-djangos-model-to-use-as-primary-key-a-set-of-foreign-keys\n# - https://code.djangoproject.com/wiki/MultipleColumnPrimaryKeys\n# - possible to enforce it using SQL commands, using something other than the Django ORM, e.g. SQLAlchemy)\n# - there are validators that can be used with a Serializer to enforce \"unique together\" - https://www.django-rest-framework.org/api-guide/validators/#uniquetogethervalidator\nclass Rating(models.Model):\n rating = models.PositiveIntegerField()\n profile = models.ForeignKey('accounts.Profile', related_name='ratings', on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='ratings', on_delete=models.CASCADE)\n\n def __str__(self):\n return f\"{self.profile.user.username} gave {self.paper.title} a rating of {self.rating}\"\n\n\n# \nclass UserSavedPaper(models.Model): \n profile = models.ForeignKey('accounts.Profile', related_name='saved_papers', on_delete=models.CASCADE)\n paper = models.ForeignKey('Paper', related_name='saved_papers', on_delete=models.CASCADE)\n comment = models.CharField(max_length=500, blank=True)\n\n def __str__(self):\n return f\"user {self.profile.user.username} saved paper {self.paper.title} - comment: {self.comment}\"\n", "step-ids": [ 8, 9, 10, 13, 14 ] }
[ 8, 9, 10, 13, 14 ]
# -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-10-28 17:50 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='EMR', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('emergency', models.CharField(default='', max_length=10)), ], ), migrations.CreateModel( name='EMRNote', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('dateCreated', models.DateTimeField(default=django.utils.timezone.now)), ('comments', models.CharField(default='', max_length=500)), ('emr', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='emr.EMR')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='EMRTrackedMetric', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('dateCreated', models.DateTimeField(default=django.utils.timezone.now)), ('label', models.CharField(default='', max_length=200)), ('comments', models.CharField(default='', max_length=500)), ('emr', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='emr.EMR')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='EMRVitals', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('dateCreated', models.DateTimeField(default=django.utils.timezone.now)), ('restingBPM', models.IntegerField(default=0)), ('bloodPressure', models.CharField(default='', max_length=10)), ('height', models.FloatField(default=0)), ('weight', models.FloatField(default=0)), ('age', models.IntegerField(default=0)), ('comments', models.CharField(default='', max_length=1000)), ('emr', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='emr.EMR')), ], options={ 'abstract': False, }, ), ]
normal
{ "blob_id": "b0064a5cd494d5ad232f27c63a4df2c56a4c6a66", "index": 5241, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='EMR', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('emergency', models.CharField(default='',\n max_length=10))]), migrations.CreateModel(name='EMRNote', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('dateCreated', models.\n DateTimeField(default=django.utils.timezone.now)), ('comments',\n models.CharField(default='', max_length=500)), ('emr', models.\n ForeignKey(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='emr.EMR'))], options={'abstract': False}),\n migrations.CreateModel(name='EMRTrackedMetric', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('dateCreated', models.DateTimeField(\n default=django.utils.timezone.now)), ('label', models.CharField(\n default='', max_length=200)), ('comments', models.CharField(default\n ='', max_length=500)), ('emr', models.ForeignKey(blank=True, null=\n True, on_delete=django.db.models.deletion.CASCADE, to='emr.EMR'))],\n options={'abstract': False}), migrations.CreateModel(name=\n 'EMRVitals', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), (\n 'dateCreated', models.DateTimeField(default=django.utils.timezone.\n now)), ('restingBPM', models.IntegerField(default=0)), (\n 'bloodPressure', models.CharField(default='', max_length=10)), (\n 'height', models.FloatField(default=0)), ('weight', models.\n FloatField(default=0)), ('age', models.IntegerField(default=0)), (\n 'comments', models.CharField(default='', max_length=1000)), ('emr',\n models.ForeignKey(blank=True, null=True, on_delete=django.db.models\n .deletion.CASCADE, to='emr.EMR'))], options={'abstract': False})]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='EMR', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('emergency', models.CharField(default='',\n max_length=10))]), migrations.CreateModel(name='EMRNote', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('dateCreated', models.\n DateTimeField(default=django.utils.timezone.now)), ('comments',\n models.CharField(default='', max_length=500)), ('emr', models.\n ForeignKey(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='emr.EMR'))], options={'abstract': False}),\n migrations.CreateModel(name='EMRTrackedMetric', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('dateCreated', models.DateTimeField(\n default=django.utils.timezone.now)), ('label', models.CharField(\n default='', max_length=200)), ('comments', models.CharField(default\n ='', max_length=500)), ('emr', models.ForeignKey(blank=True, null=\n True, on_delete=django.db.models.deletion.CASCADE, to='emr.EMR'))],\n options={'abstract': False}), migrations.CreateModel(name=\n 'EMRVitals', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), (\n 'dateCreated', models.DateTimeField(default=django.utils.timezone.\n now)), ('restingBPM', models.IntegerField(default=0)), (\n 'bloodPressure', models.CharField(default='', max_length=10)), (\n 'height', models.FloatField(default=0)), ('weight', models.\n FloatField(default=0)), ('age', models.IntegerField(default=0)), (\n 'comments', models.CharField(default='', max_length=1000)), ('emr',\n models.ForeignKey(blank=True, null=True, on_delete=django.db.models\n .deletion.CASCADE, to='emr.EMR'))], options={'abstract': False})]\n", "step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-10-28 17:50\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='EMR',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('emergency', models.CharField(default='', max_length=10)),\n ],\n ),\n migrations.CreateModel(\n name='EMRNote',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('dateCreated', models.DateTimeField(default=django.utils.timezone.now)),\n ('comments', models.CharField(default='', max_length=500)),\n ('emr', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='emr.EMR')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='EMRTrackedMetric',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('dateCreated', models.DateTimeField(default=django.utils.timezone.now)),\n ('label', models.CharField(default='', max_length=200)),\n ('comments', models.CharField(default='', max_length=500)),\n ('emr', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='emr.EMR')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='EMRVitals',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('dateCreated', models.DateTimeField(default=django.utils.timezone.now)),\n ('restingBPM', models.IntegerField(default=0)),\n ('bloodPressure', models.CharField(default='', max_length=10)),\n ('height', models.FloatField(default=0)),\n ('weight', models.FloatField(default=0)),\n ('age', models.IntegerField(default=0)),\n ('comments', models.CharField(default='', max_length=1000)),\n ('emr', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='emr.EMR')),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class Favorits(QDialog, Ui_DialogFavorit): <|reserved_special_token_0|> def __init__(self): super(Favorits, self).__init__() self.setupUi(self) self.buttonBox.button(QDialogButtonBox.Save).setText('Сохранить') self.buttonBox.button(QDialogButtonBox.Cancel).setText('Отмена') self.path = 'setting.json' self.setStyleSheet(open('static/style.qss').read()) self.list_fav() self.plus_pb.setIcon(QIcon(':/icons/icons/plus.png')) self.minus_pb.setIcon(QIcon(':/icons/icons/minus.png')) self.plus_pb.clicked.connect(self.addfav) self.minus_pb.clicked.connect(self.delfav) def list_fav(self): try: self.data = json.load(open(self.path)) for i in self.data['favorit']: self.favlist_listWidget.addItem(i) except FileNotFoundError: print('File with setting not found') except KeyError: self.data['favorit'] = [] json.dump(self.data, open(self.path, 'w')) self.list_fav() <|reserved_special_token_0|> def delfav(self): buf = self.favlist_listWidget.currentItem().text() self.data['favorit'].remove(buf) json.dump(self.data, open(self.path, 'w')) self.favlist_listWidget.clear() self.list_fav() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Favorits(QDialog, Ui_DialogFavorit): <|reserved_special_token_0|> def __init__(self): super(Favorits, self).__init__() self.setupUi(self) self.buttonBox.button(QDialogButtonBox.Save).setText('Сохранить') self.buttonBox.button(QDialogButtonBox.Cancel).setText('Отмена') self.path = 'setting.json' self.setStyleSheet(open('static/style.qss').read()) self.list_fav() self.plus_pb.setIcon(QIcon(':/icons/icons/plus.png')) self.minus_pb.setIcon(QIcon(':/icons/icons/minus.png')) self.plus_pb.clicked.connect(self.addfav) self.minus_pb.clicked.connect(self.delfav) def list_fav(self): try: self.data = json.load(open(self.path)) for i in self.data['favorit']: self.favlist_listWidget.addItem(i) except FileNotFoundError: print('File with setting not found') except KeyError: self.data['favorit'] = [] json.dump(self.data, open(self.path, 'w')) self.list_fav() def addfav(self): name = def_url.Input_stream() if name.exec_(): link = name.url_stream_le.text() reg = 'http[s]?://' if re.match(reg, link) is not None: self.data['favorit'].append(link) json.dump(self.data, open(self.path, 'w')) self.favlist_listWidget.clear() self.list_fav() def delfav(self): buf = self.favlist_listWidget.currentItem().text() self.data['favorit'].remove(buf) json.dump(self.data, open(self.path, 'w')) self.favlist_listWidget.clear() self.list_fav() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Favorits(QDialog, Ui_DialogFavorit): """docstring for Favorits""" def __init__(self): super(Favorits, self).__init__() self.setupUi(self) self.buttonBox.button(QDialogButtonBox.Save).setText('Сохранить') self.buttonBox.button(QDialogButtonBox.Cancel).setText('Отмена') self.path = 'setting.json' self.setStyleSheet(open('static/style.qss').read()) self.list_fav() self.plus_pb.setIcon(QIcon(':/icons/icons/plus.png')) self.minus_pb.setIcon(QIcon(':/icons/icons/minus.png')) self.plus_pb.clicked.connect(self.addfav) self.minus_pb.clicked.connect(self.delfav) def list_fav(self): try: self.data = json.load(open(self.path)) for i in self.data['favorit']: self.favlist_listWidget.addItem(i) except FileNotFoundError: print('File with setting not found') except KeyError: self.data['favorit'] = [] json.dump(self.data, open(self.path, 'w')) self.list_fav() def addfav(self): name = def_url.Input_stream() if name.exec_(): link = name.url_stream_le.text() reg = 'http[s]?://' if re.match(reg, link) is not None: self.data['favorit'].append(link) json.dump(self.data, open(self.path, 'w')) self.favlist_listWidget.clear() self.list_fav() def delfav(self): buf = self.favlist_listWidget.currentItem().text() self.data['favorit'].remove(buf) json.dump(self.data, open(self.path, 'w')) self.favlist_listWidget.clear() self.list_fav() if __name__ == '__main__': app = QApplication([]) w = Favorits() w.show() app.exec_() <|reserved_special_token_1|> from PyQt5.QtWidgets import * from PyQt5.QtCore import * from PyQt5.QtGui import * from widgets.favorits.favorit_win import Ui_DialogFavorit import json import re from widgets.input_link import def_url class Favorits(QDialog, Ui_DialogFavorit): """docstring for Favorits""" def __init__(self): super(Favorits, self).__init__() self.setupUi(self) self.buttonBox.button(QDialogButtonBox.Save).setText('Сохранить') self.buttonBox.button(QDialogButtonBox.Cancel).setText('Отмена') self.path = 'setting.json' self.setStyleSheet(open('static/style.qss').read()) self.list_fav() self.plus_pb.setIcon(QIcon(':/icons/icons/plus.png')) self.minus_pb.setIcon(QIcon(':/icons/icons/minus.png')) self.plus_pb.clicked.connect(self.addfav) self.minus_pb.clicked.connect(self.delfav) def list_fav(self): try: self.data = json.load(open(self.path)) for i in self.data['favorit']: self.favlist_listWidget.addItem(i) except FileNotFoundError: print('File with setting not found') except KeyError: self.data['favorit'] = [] json.dump(self.data, open(self.path, 'w')) self.list_fav() def addfav(self): name = def_url.Input_stream() if name.exec_(): link = name.url_stream_le.text() reg = 'http[s]?://' if re.match(reg, link) is not None: self.data['favorit'].append(link) json.dump(self.data, open(self.path, 'w')) self.favlist_listWidget.clear() self.list_fav() def delfav(self): buf = self.favlist_listWidget.currentItem().text() self.data['favorit'].remove(buf) json.dump(self.data, open(self.path, 'w')) self.favlist_listWidget.clear() self.list_fav() if __name__ == '__main__': app = QApplication([]) w = Favorits() w.show() app.exec_() <|reserved_special_token_1|> #!/usr/bin/env python # -*- coding: utf-8 -*- # @Date : 2016-03-15 16:39:32 # @Author : Your Name (you@example.org) # @Link : http://example.org # @Version : $Id$ from PyQt5.QtWidgets import * from PyQt5.QtCore import * from PyQt5.QtGui import * from widgets.favorits.favorit_win import Ui_DialogFavorit import json import re from widgets.input_link import def_url #from favorit_win import Ui_DialogFavorit class Favorits(QDialog, Ui_DialogFavorit): """docstring for Favorits""" def __init__(self): super(Favorits, self).__init__() self.setupUi(self) self.buttonBox.button(QDialogButtonBox.Save).setText("Сохранить") self.buttonBox.button(QDialogButtonBox.Cancel).setText("Отмена") self.path = 'setting.json' self.setStyleSheet(open('static/style.qss').read()) self.list_fav() self.plus_pb.setIcon(QIcon(":/icons/icons/plus.png")) self.minus_pb.setIcon(QIcon(":/icons/icons/minus.png")) self.plus_pb.clicked.connect(self.addfav) self.minus_pb.clicked.connect(self.delfav) def list_fav(self): try: self.data = json.load(open(self.path)) for i in self.data['favorit']: self.favlist_listWidget.addItem(i) except FileNotFoundError: print("File with setting not found") except KeyError: self.data['favorit'] = [] json.dump(self.data, open(self.path, 'w')) self.list_fav() def addfav(self): name = def_url.Input_stream() if name.exec_(): link = name.url_stream_le.text() reg = "http[s]?://" if re.match(reg, link) is not None: self.data['favorit'].append(link) json.dump(self.data, open(self.path, 'w')) self.favlist_listWidget.clear() self.list_fav() def delfav(self): buf = self.favlist_listWidget.currentItem().text() self.data['favorit'].remove(buf) json.dump(self.data, open(self.path, 'w')) self.favlist_listWidget.clear() self.list_fav() if __name__ == '__main__': app = QApplication([]) w = Favorits() w.show() app.exec_()
flexible
{ "blob_id": "14023785983f493af57189b3d96254efef2e33ae", "index": 8180, "step-1": "<mask token>\n\n\nclass Favorits(QDialog, Ui_DialogFavorit):\n <mask token>\n\n def __init__(self):\n super(Favorits, self).__init__()\n self.setupUi(self)\n self.buttonBox.button(QDialogButtonBox.Save).setText('Сохранить')\n self.buttonBox.button(QDialogButtonBox.Cancel).setText('Отмена')\n self.path = 'setting.json'\n self.setStyleSheet(open('static/style.qss').read())\n self.list_fav()\n self.plus_pb.setIcon(QIcon(':/icons/icons/plus.png'))\n self.minus_pb.setIcon(QIcon(':/icons/icons/minus.png'))\n self.plus_pb.clicked.connect(self.addfav)\n self.minus_pb.clicked.connect(self.delfav)\n\n def list_fav(self):\n try:\n self.data = json.load(open(self.path))\n for i in self.data['favorit']:\n self.favlist_listWidget.addItem(i)\n except FileNotFoundError:\n print('File with setting not found')\n except KeyError:\n self.data['favorit'] = []\n json.dump(self.data, open(self.path, 'w'))\n self.list_fav()\n <mask token>\n\n def delfav(self):\n buf = self.favlist_listWidget.currentItem().text()\n self.data['favorit'].remove(buf)\n json.dump(self.data, open(self.path, 'w'))\n self.favlist_listWidget.clear()\n self.list_fav()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Favorits(QDialog, Ui_DialogFavorit):\n <mask token>\n\n def __init__(self):\n super(Favorits, self).__init__()\n self.setupUi(self)\n self.buttonBox.button(QDialogButtonBox.Save).setText('Сохранить')\n self.buttonBox.button(QDialogButtonBox.Cancel).setText('Отмена')\n self.path = 'setting.json'\n self.setStyleSheet(open('static/style.qss').read())\n self.list_fav()\n self.plus_pb.setIcon(QIcon(':/icons/icons/plus.png'))\n self.minus_pb.setIcon(QIcon(':/icons/icons/minus.png'))\n self.plus_pb.clicked.connect(self.addfav)\n self.minus_pb.clicked.connect(self.delfav)\n\n def list_fav(self):\n try:\n self.data = json.load(open(self.path))\n for i in self.data['favorit']:\n self.favlist_listWidget.addItem(i)\n except FileNotFoundError:\n print('File with setting not found')\n except KeyError:\n self.data['favorit'] = []\n json.dump(self.data, open(self.path, 'w'))\n self.list_fav()\n\n def addfav(self):\n name = def_url.Input_stream()\n if name.exec_():\n link = name.url_stream_le.text()\n reg = 'http[s]?://'\n if re.match(reg, link) is not None:\n self.data['favorit'].append(link)\n json.dump(self.data, open(self.path, 'w'))\n self.favlist_listWidget.clear()\n self.list_fav()\n\n def delfav(self):\n buf = self.favlist_listWidget.currentItem().text()\n self.data['favorit'].remove(buf)\n json.dump(self.data, open(self.path, 'w'))\n self.favlist_listWidget.clear()\n self.list_fav()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Favorits(QDialog, Ui_DialogFavorit):\n \"\"\"docstring for Favorits\"\"\"\n\n def __init__(self):\n super(Favorits, self).__init__()\n self.setupUi(self)\n self.buttonBox.button(QDialogButtonBox.Save).setText('Сохранить')\n self.buttonBox.button(QDialogButtonBox.Cancel).setText('Отмена')\n self.path = 'setting.json'\n self.setStyleSheet(open('static/style.qss').read())\n self.list_fav()\n self.plus_pb.setIcon(QIcon(':/icons/icons/plus.png'))\n self.minus_pb.setIcon(QIcon(':/icons/icons/minus.png'))\n self.plus_pb.clicked.connect(self.addfav)\n self.minus_pb.clicked.connect(self.delfav)\n\n def list_fav(self):\n try:\n self.data = json.load(open(self.path))\n for i in self.data['favorit']:\n self.favlist_listWidget.addItem(i)\n except FileNotFoundError:\n print('File with setting not found')\n except KeyError:\n self.data['favorit'] = []\n json.dump(self.data, open(self.path, 'w'))\n self.list_fav()\n\n def addfav(self):\n name = def_url.Input_stream()\n if name.exec_():\n link = name.url_stream_le.text()\n reg = 'http[s]?://'\n if re.match(reg, link) is not None:\n self.data['favorit'].append(link)\n json.dump(self.data, open(self.path, 'w'))\n self.favlist_listWidget.clear()\n self.list_fav()\n\n def delfav(self):\n buf = self.favlist_listWidget.currentItem().text()\n self.data['favorit'].remove(buf)\n json.dump(self.data, open(self.path, 'w'))\n self.favlist_listWidget.clear()\n self.list_fav()\n\n\nif __name__ == '__main__':\n app = QApplication([])\n w = Favorits()\n w.show()\n app.exec_()\n", "step-4": "from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom widgets.favorits.favorit_win import Ui_DialogFavorit\nimport json\nimport re\nfrom widgets.input_link import def_url\n\n\nclass Favorits(QDialog, Ui_DialogFavorit):\n \"\"\"docstring for Favorits\"\"\"\n\n def __init__(self):\n super(Favorits, self).__init__()\n self.setupUi(self)\n self.buttonBox.button(QDialogButtonBox.Save).setText('Сохранить')\n self.buttonBox.button(QDialogButtonBox.Cancel).setText('Отмена')\n self.path = 'setting.json'\n self.setStyleSheet(open('static/style.qss').read())\n self.list_fav()\n self.plus_pb.setIcon(QIcon(':/icons/icons/plus.png'))\n self.minus_pb.setIcon(QIcon(':/icons/icons/minus.png'))\n self.plus_pb.clicked.connect(self.addfav)\n self.minus_pb.clicked.connect(self.delfav)\n\n def list_fav(self):\n try:\n self.data = json.load(open(self.path))\n for i in self.data['favorit']:\n self.favlist_listWidget.addItem(i)\n except FileNotFoundError:\n print('File with setting not found')\n except KeyError:\n self.data['favorit'] = []\n json.dump(self.data, open(self.path, 'w'))\n self.list_fav()\n\n def addfav(self):\n name = def_url.Input_stream()\n if name.exec_():\n link = name.url_stream_le.text()\n reg = 'http[s]?://'\n if re.match(reg, link) is not None:\n self.data['favorit'].append(link)\n json.dump(self.data, open(self.path, 'w'))\n self.favlist_listWidget.clear()\n self.list_fav()\n\n def delfav(self):\n buf = self.favlist_listWidget.currentItem().text()\n self.data['favorit'].remove(buf)\n json.dump(self.data, open(self.path, 'w'))\n self.favlist_listWidget.clear()\n self.list_fav()\n\n\nif __name__ == '__main__':\n app = QApplication([])\n w = Favorits()\n w.show()\n app.exec_()\n", "step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2016-03-15 16:39:32\n# @Author : Your Name (you@example.org)\n# @Link : http://example.org\n# @Version : $Id$\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom widgets.favorits.favorit_win import Ui_DialogFavorit\nimport json\nimport re\nfrom widgets.input_link import def_url\n#from favorit_win import Ui_DialogFavorit\n\n\nclass Favorits(QDialog, Ui_DialogFavorit):\n \"\"\"docstring for Favorits\"\"\"\n\n def __init__(self):\n super(Favorits, self).__init__()\n self.setupUi(self)\n self.buttonBox.button(QDialogButtonBox.Save).setText(\"Сохранить\")\n self.buttonBox.button(QDialogButtonBox.Cancel).setText(\"Отмена\")\n self.path = 'setting.json'\n self.setStyleSheet(open('static/style.qss').read())\n self.list_fav()\n self.plus_pb.setIcon(QIcon(\":/icons/icons/plus.png\"))\n self.minus_pb.setIcon(QIcon(\":/icons/icons/minus.png\"))\n self.plus_pb.clicked.connect(self.addfav)\n self.minus_pb.clicked.connect(self.delfav)\n\n def list_fav(self):\n try:\n self.data = json.load(open(self.path))\n for i in self.data['favorit']:\n self.favlist_listWidget.addItem(i)\n except FileNotFoundError:\n print(\"File with setting not found\")\n except KeyError:\n self.data['favorit'] = []\n json.dump(self.data, open(self.path, 'w'))\n self.list_fav()\n\n def addfav(self):\n name = def_url.Input_stream()\n if name.exec_():\n link = name.url_stream_le.text()\n reg = \"http[s]?://\"\n if re.match(reg, link) is not None:\n self.data['favorit'].append(link)\n json.dump(self.data, open(self.path, 'w'))\n\n self.favlist_listWidget.clear()\n self.list_fav()\n\n def delfav(self):\n buf = self.favlist_listWidget.currentItem().text()\n self.data['favorit'].remove(buf)\n json.dump(self.data, open(self.path, 'w'))\n self.favlist_listWidget.clear()\n self.list_fav()\n\n\nif __name__ == '__main__':\n app = QApplication([])\n w = Favorits()\n w.show()\n app.exec_()\n", "step-ids": [ 4, 5, 7, 8, 9 ] }
[ 4, 5, 7, 8, 9 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> received_event = Event() leave_rooms_event = Event() exit_event = Event() output_message_queue = AGQueue() input_message_queue = AGQueue() matrix_to_aio_queue = AGQueue() aio_to_matrix_queue = AGQueue() sync_to_matrix_queue = Queue() SERVER_URL = 'https://transport.transport01.raiden.network' <|reserved_special_token_1|> from gevent.event import Event from gevent.queue import Queue from ping_pong_chat.aio_queue import AGQueue received_event = Event() leave_rooms_event = Event() exit_event = Event() output_message_queue = AGQueue() input_message_queue = AGQueue() matrix_to_aio_queue = AGQueue() aio_to_matrix_queue = AGQueue() sync_to_matrix_queue = Queue() SERVER_URL = 'https://transport.transport01.raiden.network' <|reserved_special_token_1|> from gevent.event import Event from gevent.queue import Queue from ping_pong_chat.aio_queue import AGQueue received_event = Event() leave_rooms_event = Event() exit_event = Event() output_message_queue = AGQueue() input_message_queue = AGQueue() matrix_to_aio_queue = AGQueue() aio_to_matrix_queue = AGQueue() sync_to_matrix_queue = Queue() SERVER_URL = "https://transport.transport01.raiden.network"
flexible
{ "blob_id": "af1a6c6009b21962228fbe737f27c22bf9460762", "index": 729, "step-1": "<mask token>\n", "step-2": "<mask token>\nreceived_event = Event()\nleave_rooms_event = Event()\nexit_event = Event()\noutput_message_queue = AGQueue()\ninput_message_queue = AGQueue()\nmatrix_to_aio_queue = AGQueue()\naio_to_matrix_queue = AGQueue()\nsync_to_matrix_queue = Queue()\nSERVER_URL = 'https://transport.transport01.raiden.network'\n", "step-3": "from gevent.event import Event\nfrom gevent.queue import Queue\nfrom ping_pong_chat.aio_queue import AGQueue\nreceived_event = Event()\nleave_rooms_event = Event()\nexit_event = Event()\noutput_message_queue = AGQueue()\ninput_message_queue = AGQueue()\nmatrix_to_aio_queue = AGQueue()\naio_to_matrix_queue = AGQueue()\nsync_to_matrix_queue = Queue()\nSERVER_URL = 'https://transport.transport01.raiden.network'\n", "step-4": "from gevent.event import Event\nfrom gevent.queue import Queue\nfrom ping_pong_chat.aio_queue import AGQueue\n\nreceived_event = Event()\nleave_rooms_event = Event()\nexit_event = Event()\noutput_message_queue = AGQueue()\ninput_message_queue = AGQueue()\n\nmatrix_to_aio_queue = AGQueue()\naio_to_matrix_queue = AGQueue()\nsync_to_matrix_queue = Queue()\n\nSERVER_URL = \"https://transport.transport01.raiden.network\"\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#header import matplotlib.pyplot as pmf import random p = 0.5 # Probablility of success for original system n = 18 # Number of trials Y = [] # Contains binomial RVs b = [0] * (n+1) # List of n + 1 zeroes N = 100 # Number of experiments performed for j in range(N): # Bernoulli random variable for i in range(n): r = random.uniform(0,1) if r < p: x = 1 else: x = 0 Y.append(x) outcome = sum(Y) # Number of successes from 0 to n b[outcome] = b[outcome] + 1 # Record of successes for bar plot Y.clear() for i in range(n+1): b[i] = b[i]/N # Probabilities p = 0 cv = int(input('Enter a choice for the CV.')) for i in range(cv, 19): p = p + b[i] print('For a critical value of', cv, 'the probability of rejecting the old system in favor of a new system that is no better than is', p,'.') #cv = 13, 1/20 or the 5% rule
normal
{ "blob_id": "9a1b268386b4652bf50af0365892ef7338329727", "index": 9631, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor j in range(N):\n for i in range(n):\n r = random.uniform(0, 1)\n if r < p:\n x = 1\n else:\n x = 0\n Y.append(x)\n outcome = sum(Y)\n b[outcome] = b[outcome] + 1\n Y.clear()\nfor i in range(n + 1):\n b[i] = b[i] / N\n p = 0\n<mask token>\nfor i in range(cv, 19):\n p = p + b[i]\nprint('For a critical value of', cv,\n 'the probability of rejecting the old system in favor of a new system that is no better than is'\n , p, '.')\n", "step-3": "<mask token>\np = 0.5\nn = 18\nY = []\nb = [0] * (n + 1)\nN = 100\nfor j in range(N):\n for i in range(n):\n r = random.uniform(0, 1)\n if r < p:\n x = 1\n else:\n x = 0\n Y.append(x)\n outcome = sum(Y)\n b[outcome] = b[outcome] + 1\n Y.clear()\nfor i in range(n + 1):\n b[i] = b[i] / N\n p = 0\ncv = int(input('Enter a choice for the CV.'))\nfor i in range(cv, 19):\n p = p + b[i]\nprint('For a critical value of', cv,\n 'the probability of rejecting the old system in favor of a new system that is no better than is'\n , p, '.')\n", "step-4": "import matplotlib.pyplot as pmf\nimport random\np = 0.5\nn = 18\nY = []\nb = [0] * (n + 1)\nN = 100\nfor j in range(N):\n for i in range(n):\n r = random.uniform(0, 1)\n if r < p:\n x = 1\n else:\n x = 0\n Y.append(x)\n outcome = sum(Y)\n b[outcome] = b[outcome] + 1\n Y.clear()\nfor i in range(n + 1):\n b[i] = b[i] / N\n p = 0\ncv = int(input('Enter a choice for the CV.'))\nfor i in range(cv, 19):\n p = p + b[i]\nprint('For a critical value of', cv,\n 'the probability of rejecting the old system in favor of a new system that is no better than is'\n , p, '.')\n", "step-5": "#header\n\nimport matplotlib.pyplot as pmf\nimport random\n\np = 0.5 # Probablility of success for original system\nn = 18 # Number of trials\nY = [] # Contains binomial RVs\nb = [0] * (n+1) # List of n + 1 zeroes\nN = 100 # Number of experiments performed\n\nfor j in range(N):\n \n # Bernoulli random variable\n for i in range(n):\n \n r = random.uniform(0,1)\n if r < p:\n x = 1\n else:\n x = 0\n Y.append(x)\n outcome = sum(Y) # Number of successes from 0 to n\n b[outcome] = b[outcome] + 1 # Record of successes for bar plot\n Y.clear()\n \n \nfor i in range(n+1):\n b[i] = b[i]/N # Probabilities\n p = 0\n\ncv = int(input('Enter a choice for the CV.'))\n\nfor i in range(cv, 19):\n p = p + b[i]\n \nprint('For a critical value of', cv, 'the probability of rejecting the old system in favor of a new system that is no better than is', p,'.')\n#cv = 13, 1/20 or the 5% rule", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
## ## Originally created by https://www.reddit.com/user/AlekseyP ## Seen at: https://www.reddit.com/r/technology/comments/43fi39/i_set_up_my_raspberry_pi_to_automatically_tweet ## #!/usr/bin/python import os import sys import csv import datetime import time import twitter #Configuration # Twitter ACCESS_TOKEN="" ACCESS_TOKEN_SECRET="" CONSUMER_KEY="" CONSUMER_SECRET="" # Minimum network speed min_net_speed = 10 # Speedtest client absolute path speedtest_path = "/home/alberto/Desarrollo/Proyectos/Scripts/SpeedTest/speedtest-cli" csv_output_file_path = "/home/alberto/Desarrollo/Proyectos/Scripts/SpeedTest/" def test(): #run speedtest-cli print 'running test' a = os.popen("python %s --simple"%(speedtest_path)).read() print 'ran' #split the 3 line result (ping,down,up) lines = a.split('\n') print a ts = time.time() date =datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') #if speedtest could not connect set the speeds to 0 if "Cannot" in a: p = 100 d = 0 u = 0 #extract the values for ping down and up values else: p = lines[0][6:11] d = lines[1][10:14] u = lines[2][8:12] print date,p, d, u #save the data to file for local network plotting out_file = open(csv_output_file_path + 'data.csv', 'a') writer = csv.writer(out_file) writer.writerow((ts*1000,p,d,u)) out_file.close() my_auth = twitter.OAuth(ACCESS_TOKEN,ACCESS_TOKEN_SECRET,CONSUMER_KEY,CONSUMER_SECRET) twit = twitter.Twitter(auth=my_auth) #try to tweet if speedtest couldnt even connet. Probably wont work if the internet is down if "Cannot" in a: try: tweet="Hey @Comcast @ComcastCares why is my internet down? I pay for 150down\\10up in Washington DC? #comcastoutage #comcast" ## twit.statuses.update(status=tweet) print tweet except: pass # tweet if down speed is less than whatever I set elif eval(d)<min_net_speed: print "trying to tweet" try: # i know there must be a better way than to do (str(int(eval()))) tweet="Hey @Comcast why is my internet speed " + str(int(eval(d))) + "down\\" + str(int(eval(u))) + "up when I pay for 150down\\10up in Washington DC? @ComcastCares @xfinity #comcast #speedtest" ## twit.statuses.update(status=tweet) print tweet except Exception,e: print str(e) pass return if __name__ == '__main__': test() print 'completed'
normal
{ "blob_id": "6492f1eda79fd3116058f29647dc5f09e903f637", "index": 7274, "step-1": "##\n## Originally created by https://www.reddit.com/user/AlekseyP\n## Seen at: https://www.reddit.com/r/technology/comments/43fi39/i_set_up_my_raspberry_pi_to_automatically_tweet\n##\n\n#!/usr/bin/python\nimport os\nimport sys\nimport csv\nimport datetime\nimport time\nimport twitter\n\n#Configuration\n# Twitter\nACCESS_TOKEN=\"\"\nACCESS_TOKEN_SECRET=\"\"\nCONSUMER_KEY=\"\"\nCONSUMER_SECRET=\"\"\n# Minimum network speed\nmin_net_speed = 10\n# Speedtest client absolute path\nspeedtest_path = \"/home/alberto/Desarrollo/Proyectos/Scripts/SpeedTest/speedtest-cli\"\ncsv_output_file_path = \"/home/alberto/Desarrollo/Proyectos/Scripts/SpeedTest/\"\n\ndef test():\n\n #run speedtest-cli\n print 'running test'\n a = os.popen(\"python %s --simple\"%(speedtest_path)).read()\n print 'ran'\n #split the 3 line result (ping,down,up)\n lines = a.split('\\n')\n print a\n ts = time.time()\n date =datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n #if speedtest could not connect set the speeds to 0\n if \"Cannot\" in a:\n p = 100\n d = 0\n u = 0\n #extract the values for ping down and up values\n else:\n p = lines[0][6:11]\n d = lines[1][10:14]\n u = lines[2][8:12]\n print date,p, d, u\n #save the data to file for local network plotting\n out_file = open(csv_output_file_path + 'data.csv', 'a')\n writer = csv.writer(out_file)\n writer.writerow((ts*1000,p,d,u))\n out_file.close()\n\n my_auth = twitter.OAuth(ACCESS_TOKEN,ACCESS_TOKEN_SECRET,CONSUMER_KEY,CONSUMER_SECRET)\n twit = twitter.Twitter(auth=my_auth)\n\n #try to tweet if speedtest couldnt even connet. Probably wont work if the internet is down\n if \"Cannot\" in a:\n try:\n tweet=\"Hey @Comcast @ComcastCares why is my internet down? I pay for 150down\\\\10up in Washington DC? #comcastoutage #comcast\"\n ## twit.statuses.update(status=tweet)\n\t\t\t print tweet\n except:\n pass\n\n # tweet if down speed is less than whatever I set\n elif eval(d)<min_net_speed:\n print \"trying to tweet\"\n try:\n # i know there must be a better way than to do (str(int(eval())))\n tweet=\"Hey @Comcast why is my internet speed \" + str(int(eval(d))) + \"down\\\\\" + str(int(eval(u))) + \"up when I pay for 150down\\\\10up in Washington DC? @ComcastCares @xfinity #comcast #speedtest\"\n ## twit.statuses.update(status=tweet)\n\t\t\t print tweet\n except Exception,e:\n print str(e)\n pass\n return\n\nif __name__ == '__main__':\n test()\n print 'completed'\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> def get_text_from_image(imageName): img = preprocess(imageName) result = tes.image_to_string(img) return result <|reserved_special_token_0|> def find_receipt_box(image): """ Finds a contour around the receipt in the given image. Returns the bounding box and the binary image """ gray = cv.medianBlur(image, 15, 0) _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU) k = np.ones((25, 25)) thresh = cv.erode(thresh, k, iterations=1) thresh = cv.dilate(thresh, k, iterations=1) contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) contours = sorted(contours[0], key=cv.contourArea, reverse=True) contour = contours[0] rect = cv.minAreaRect(contour) box = cv.boxPoints(rect) box = np.int0(box) return box def perspective_transform(contour): """Produces the transformation matrix and the new size for perspective correction""" ord_rect = np.float32(order_rect(contour)) tl, tr, br, bl = ord_rect dist_top = np.linalg.norm(tl - tr) dist_btm = np.linalg.norm(bl - br) width = max(dist_btm, dist_top) dist_left = np.linalg.norm(tl - tr) dist_right = np.linalg.norm(tr - br) height = max(dist_left, dist_right) dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1 ], [0, height - 1]], dtype=ord_rect.dtype) M = cv.getPerspectiveTransform(ord_rect, dest_corners) return M, width, height def order_rect(pts): """ orders a rectangle in the order top-left, top-right, bottom-right, bottom-left """ new = np.zeros((4, 2), dtype='int64') s = pts.sum(axis=1) new[0] = pts[np.argmin(s)] new[2] = pts[np.argmax(s)] diff = np.diff(pts, axis=1) new[1] = pts[np.argmin(diff)] new[3] = pts[np.argmax(diff)] return new def apply_perspective_correction(image, M, width, height): """Crops the contour and applies perspective correction""" warped = cv.warpPerspective(image, M, (width, height)) return warped <|reserved_special_token_1|> <|reserved_special_token_0|> print(text) def get_text_from_image(imageName): img = preprocess(imageName) result = tes.image_to_string(img) return result def preprocess(image_name): image = cv.imread(image_name) gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) receiptBox = find_receipt_box(gray) M, w, h = perspective_transform(receiptBox) receiptImg = apply_perspective_correction(gray, M, w, h) receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv. ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10) return receiptImg def find_receipt_box(image): """ Finds a contour around the receipt in the given image. Returns the bounding box and the binary image """ gray = cv.medianBlur(image, 15, 0) _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU) k = np.ones((25, 25)) thresh = cv.erode(thresh, k, iterations=1) thresh = cv.dilate(thresh, k, iterations=1) contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) contours = sorted(contours[0], key=cv.contourArea, reverse=True) contour = contours[0] rect = cv.minAreaRect(contour) box = cv.boxPoints(rect) box = np.int0(box) return box def perspective_transform(contour): """Produces the transformation matrix and the new size for perspective correction""" ord_rect = np.float32(order_rect(contour)) tl, tr, br, bl = ord_rect dist_top = np.linalg.norm(tl - tr) dist_btm = np.linalg.norm(bl - br) width = max(dist_btm, dist_top) dist_left = np.linalg.norm(tl - tr) dist_right = np.linalg.norm(tr - br) height = max(dist_left, dist_right) dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1 ], [0, height - 1]], dtype=ord_rect.dtype) M = cv.getPerspectiveTransform(ord_rect, dest_corners) return M, width, height def order_rect(pts): """ orders a rectangle in the order top-left, top-right, bottom-right, bottom-left """ new = np.zeros((4, 2), dtype='int64') s = pts.sum(axis=1) new[0] = pts[np.argmin(s)] new[2] = pts[np.argmax(s)] diff = np.diff(pts, axis=1) new[1] = pts[np.argmin(diff)] new[3] = pts[np.argmax(diff)] return new def apply_perspective_correction(image, M, width, height): """Crops the contour and applies perspective correction""" warped = cv.warpPerspective(image, M, (width, height)) return warped <|reserved_special_token_1|> <|reserved_special_token_0|> text = get_text_from_image('resizedReceipt.jpg') print(text) def get_text_from_image(imageName): img = preprocess(imageName) result = tes.image_to_string(img) return result def preprocess(image_name): image = cv.imread(image_name) gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) receiptBox = find_receipt_box(gray) M, w, h = perspective_transform(receiptBox) receiptImg = apply_perspective_correction(gray, M, w, h) receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv. ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10) return receiptImg def find_receipt_box(image): """ Finds a contour around the receipt in the given image. Returns the bounding box and the binary image """ gray = cv.medianBlur(image, 15, 0) _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU) k = np.ones((25, 25)) thresh = cv.erode(thresh, k, iterations=1) thresh = cv.dilate(thresh, k, iterations=1) contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) contours = sorted(contours[0], key=cv.contourArea, reverse=True) contour = contours[0] rect = cv.minAreaRect(contour) box = cv.boxPoints(rect) box = np.int0(box) return box def perspective_transform(contour): """Produces the transformation matrix and the new size for perspective correction""" ord_rect = np.float32(order_rect(contour)) tl, tr, br, bl = ord_rect dist_top = np.linalg.norm(tl - tr) dist_btm = np.linalg.norm(bl - br) width = max(dist_btm, dist_top) dist_left = np.linalg.norm(tl - tr) dist_right = np.linalg.norm(tr - br) height = max(dist_left, dist_right) dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1 ], [0, height - 1]], dtype=ord_rect.dtype) M = cv.getPerspectiveTransform(ord_rect, dest_corners) return M, width, height def order_rect(pts): """ orders a rectangle in the order top-left, top-right, bottom-right, bottom-left """ new = np.zeros((4, 2), dtype='int64') s = pts.sum(axis=1) new[0] = pts[np.argmin(s)] new[2] = pts[np.argmax(s)] diff = np.diff(pts, axis=1) new[1] = pts[np.argmin(diff)] new[3] = pts[np.argmax(diff)] return new def apply_perspective_correction(image, M, width, height): """Crops the contour and applies perspective correction""" warped = cv.warpPerspective(image, M, (width, height)) return warped <|reserved_special_token_1|> import cv2 as cv import numpy as np import pytesseract as tes text = get_text_from_image('resizedReceipt.jpg') print(text) def get_text_from_image(imageName): img = preprocess(imageName) result = tes.image_to_string(img) return result def preprocess(image_name): image = cv.imread(image_name) gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) receiptBox = find_receipt_box(gray) M, w, h = perspective_transform(receiptBox) receiptImg = apply_perspective_correction(gray, M, w, h) receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv. ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10) return receiptImg def find_receipt_box(image): """ Finds a contour around the receipt in the given image. Returns the bounding box and the binary image """ gray = cv.medianBlur(image, 15, 0) _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU) k = np.ones((25, 25)) thresh = cv.erode(thresh, k, iterations=1) thresh = cv.dilate(thresh, k, iterations=1) contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) contours = sorted(contours[0], key=cv.contourArea, reverse=True) contour = contours[0] rect = cv.minAreaRect(contour) box = cv.boxPoints(rect) box = np.int0(box) return box def perspective_transform(contour): """Produces the transformation matrix and the new size for perspective correction""" ord_rect = np.float32(order_rect(contour)) tl, tr, br, bl = ord_rect dist_top = np.linalg.norm(tl - tr) dist_btm = np.linalg.norm(bl - br) width = max(dist_btm, dist_top) dist_left = np.linalg.norm(tl - tr) dist_right = np.linalg.norm(tr - br) height = max(dist_left, dist_right) dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1 ], [0, height - 1]], dtype=ord_rect.dtype) M = cv.getPerspectiveTransform(ord_rect, dest_corners) return M, width, height def order_rect(pts): """ orders a rectangle in the order top-left, top-right, bottom-right, bottom-left """ new = np.zeros((4, 2), dtype='int64') s = pts.sum(axis=1) new[0] = pts[np.argmin(s)] new[2] = pts[np.argmax(s)] diff = np.diff(pts, axis=1) new[1] = pts[np.argmin(diff)] new[3] = pts[np.argmax(diff)] return new def apply_perspective_correction(image, M, width, height): """Crops the contour and applies perspective correction""" warped = cv.warpPerspective(image, M, (width, height)) return warped <|reserved_special_token_1|> import cv2 as cv import numpy as np import pytesseract as tes text = get_text_from_image("resizedReceipt.jpg") print(text) def get_text_from_image(imageName): img = preprocess(imageName) result = tes.image_to_string(img) return result def preprocess(image_name): image = cv.imread(image_name) gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) receiptBox = find_receipt_box(gray) M, w, h = perspective_transform(receiptBox) receiptImg = apply_perspective_correction(gray, M, w, h) receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10) return receiptImg def find_receipt_box(image): """ Finds a contour around the receipt in the given image. Returns the bounding box and the binary image """ # gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) gray = cv.medianBlur(image, 15, 0) _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU) k = np.ones((25, 25)) thresh = cv.erode(thresh, k, iterations=1) thresh = cv.dilate(thresh, k, iterations=1) contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) contours = sorted(contours[0], key=cv.contourArea, reverse=True) contour = contours[0] rect = cv.minAreaRect(contour) box = cv.boxPoints(rect) box = np.int0(box) return box def perspective_transform(contour): """Produces the transformation matrix and the new size for perspective correction""" ord_rect = np.float32(order_rect(contour)) (tl, tr, br, bl) = ord_rect dist_top = np.linalg.norm(tl - tr) dist_btm = np.linalg.norm(bl - br) width = max(dist_btm, dist_top) dist_left = np.linalg.norm(tl - tr) dist_right = np.linalg.norm(tr - br) height = max(dist_left, dist_right) dest_corners = np.array([ [0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1] ], dtype=ord_rect.dtype) M = cv.getPerspectiveTransform(ord_rect, dest_corners) return M, width, height def order_rect(pts): """ orders a rectangle in the order top-left, top-right, bottom-right, bottom-left """ new = np.zeros((4, 2), dtype="int64") s = pts.sum(axis=1) new[0] = pts[np.argmin(s)] new[2] = pts[np.argmax(s)] diff = np.diff(pts, axis=1) new[1] = pts[np.argmin(diff)] new[3] = pts[np.argmax(diff)] return new def apply_perspective_correction(image, M, width, height): """Crops the contour and applies perspective correction""" warped = cv.warpPerspective(image, M, (width, height)) return warped
flexible
{ "blob_id": "e480136aca96e45cc8a7ca34c1a9d09b96a5a4da", "index": 4152, "step-1": "<mask token>\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\n<mask token>\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n tl, tr, br, bl = ord_rect\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1\n ], [0, height - 1]], dtype=ord_rect.dtype)\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype='int64')\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n", "step-2": "<mask token>\nprint(text)\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\ndef preprocess(image_name):\n image = cv.imread(image_name)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n receiptBox = find_receipt_box(gray)\n M, w, h = perspective_transform(receiptBox)\n receiptImg = apply_perspective_correction(gray, M, w, h)\n receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)\n return receiptImg\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n tl, tr, br, bl = ord_rect\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1\n ], [0, height - 1]], dtype=ord_rect.dtype)\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype='int64')\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n", "step-3": "<mask token>\ntext = get_text_from_image('resizedReceipt.jpg')\nprint(text)\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\ndef preprocess(image_name):\n image = cv.imread(image_name)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n receiptBox = find_receipt_box(gray)\n M, w, h = perspective_transform(receiptBox)\n receiptImg = apply_perspective_correction(gray, M, w, h)\n receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)\n return receiptImg\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n tl, tr, br, bl = ord_rect\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1\n ], [0, height - 1]], dtype=ord_rect.dtype)\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype='int64')\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n", "step-4": "import cv2 as cv\nimport numpy as np\nimport pytesseract as tes\ntext = get_text_from_image('resizedReceipt.jpg')\nprint(text)\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\ndef preprocess(image_name):\n image = cv.imread(image_name)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n receiptBox = find_receipt_box(gray)\n M, w, h = perspective_transform(receiptBox)\n receiptImg = apply_perspective_correction(gray, M, w, h)\n receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)\n return receiptImg\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n tl, tr, br, bl = ord_rect\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1\n ], [0, height - 1]], dtype=ord_rect.dtype)\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype='int64')\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n", "step-5": "import cv2 as cv\nimport numpy as np\nimport pytesseract as tes\n\n\ntext = get_text_from_image(\"resizedReceipt.jpg\")\nprint(text)\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\ndef preprocess(image_name):\n image = cv.imread(image_name)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n receiptBox = find_receipt_box(gray)\n M, w, h = perspective_transform(receiptBox)\n receiptImg = apply_perspective_correction(gray, M, w, h)\n receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)\n return receiptImg\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n # gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n (tl, tr, br, bl) = ord_rect\n\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n\n dest_corners = np.array([\n [0, 0],\n [width - 1, 0],\n [width - 1, height - 1],\n [0, height - 1]\n ], dtype=ord_rect.dtype)\n\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype=\"int64\")\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n", "step-ids": [ 5, 7, 8, 9, 10 ] }
[ 5, 7, 8, 9, 10 ]
from packages import data as DATA from packages import plot as PLOT from packages import universal as UNIVERSAL from packages import currency_pair as CP import matplotlib.pyplot as plt import mpl_finance as mpf from packages import db as DB import CONSTANTS import datetime from matplotlib.pylab import date2num from matplotlib.widgets import Cursor pgmanager=DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL) tablename='klines_full_vol_50' rows=pgmanager.select('select * from '+tablename + ' where timestamp>1577808000+86400*5 order by timestamp limit 300') a=1 alist = [] vols_bid = [] vols_ask = [] diff_bid_2_ask = [] diff_bid_2_ask_in_past_2_epochs = [] diff_bid_2_ask_in_past_3_epochs = [] diff_bid_2_ask_in_past_5_epochs = [] diff_bid_2_ask_in_past_10_epochs = [] diff_bid_2_ask_in_past_20_epochs = [] avg_buys=[] avg_sells=[] avg_buy_diff_sell=[] avg_amounts=[] dates = [] cnt = 0 date = date2num(datetime.datetime.fromtimestamp(rows[0][1])) for cnt in range(20, len(rows)): row_previous2=rows[cnt-2] row_previous1 = rows[cnt - 1] row = rows[cnt] open=row[2] high=row[3] low=row[4] close=row[5] vol=row[6] vol_buy,vol_sell=row[7:9] avg_buy, avg_sell, avg_amount_per_trade=row[-3:] date = date + 1 data = (date, open, high, low, close) alist.append(data) vols_bid.append(-vol_buy) vols_ask.append(vol_sell) diff_bid_2_ask.append(vol_buy-vol_sell) diff_bid_2_ask_in_past_2_epochs.append( vol_buy + row_previous1[7] - vol_sell-row_previous1[8]) diff_bid_2_ask_in_past_3_epochs.append( vol_buy + row_previous1[7] +row_previous2[7] - vol_sell-row_previous1[8]-row_previous2[8]) avg_buy_diff_sell.append(avg_buy-avg_sell) avg_amounts.append(avg_amount_per_trade*100) dates.append(date) # fig, ax = plt.subplots(figsize=(32, 18)) # fig.subplots_adjust(bottom=0.5) # mpf.candlestick_ohlc(ax, alist, width=0.5, colorup='g', colordown='r', alpha=1.0) # plt.grid(True) # # 设置日期刻度旋转的角度 # plt.xticks(rotation=30) # plt.title('wanda yuanxian 17') # plt.xlabel('Date') # plt.ylabel('Price') # # x轴的刻度为日期 # ax.xaxis_date() fig, axes = plt.subplots(3, sharex=True, figsize=(64, 30)) mpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r') axes[0].set_title('BTC') axes[0].set_ylabel('价格') axes[0].grid(True) axes[0].xaxis_date() # axes[1].plot(dates, avg_buy_diff_sell,c='red',linewidth=0.5) # axes[1].plot(dates, avg_amounts,c='green', linewidth=0.5) # axes[1].grid(True) axes[1].plot(dates, avg_buy_diff_sell, c='orange') axes[1].plot(dates, avg_amounts, c='blue') axes[1].set_ylabel('成交量') axes[1].grid(True) axes[2].plot(dates, diff_bid_2_ask, c='green') axes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange') axes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue') axes[2].set_ylabel('成交量') axes[2].grid(True) axes[2].set_ylabel('买卖均价') axes[2].grid(True) plt.show()
normal
{ "blob_id": "9aaaa744780dbd32b14e09a34976a2a0a3ce34f7", "index": 7864, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor cnt in range(20, len(rows)):\n row_previous2 = rows[cnt - 2]\n row_previous1 = rows[cnt - 1]\n row = rows[cnt]\n open = row[2]\n high = row[3]\n low = row[4]\n close = row[5]\n vol = row[6]\n vol_buy, vol_sell = row[7:9]\n avg_buy, avg_sell, avg_amount_per_trade = row[-3:]\n date = date + 1\n data = date, open, high, low, close\n alist.append(data)\n vols_bid.append(-vol_buy)\n vols_ask.append(vol_sell)\n diff_bid_2_ask.append(vol_buy - vol_sell)\n diff_bid_2_ask_in_past_2_epochs.append(vol_buy + row_previous1[7] -\n vol_sell - row_previous1[8])\n diff_bid_2_ask_in_past_3_epochs.append(vol_buy + row_previous1[7] +\n row_previous2[7] - vol_sell - row_previous1[8] - row_previous2[8])\n avg_buy_diff_sell.append(avg_buy - avg_sell)\n avg_amounts.append(avg_amount_per_trade * 100)\n dates.append(date)\n<mask token>\nmpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')\naxes[0].set_title('BTC')\naxes[0].set_ylabel('价格')\naxes[0].grid(True)\naxes[0].xaxis_date()\naxes[1].plot(dates, avg_buy_diff_sell, c='orange')\naxes[1].plot(dates, avg_amounts, c='blue')\naxes[1].set_ylabel('成交量')\naxes[1].grid(True)\naxes[2].plot(dates, diff_bid_2_ask, c='green')\naxes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')\naxes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')\naxes[2].set_ylabel('成交量')\naxes[2].grid(True)\naxes[2].set_ylabel('买卖均价')\naxes[2].grid(True)\nplt.show()\n", "step-3": "<mask token>\npgmanager = DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)\ntablename = 'klines_full_vol_50'\nrows = pgmanager.select('select * from ' + tablename +\n ' where timestamp>1577808000+86400*5 order by timestamp limit 300')\na = 1\nalist = []\nvols_bid = []\nvols_ask = []\ndiff_bid_2_ask = []\ndiff_bid_2_ask_in_past_2_epochs = []\ndiff_bid_2_ask_in_past_3_epochs = []\ndiff_bid_2_ask_in_past_5_epochs = []\ndiff_bid_2_ask_in_past_10_epochs = []\ndiff_bid_2_ask_in_past_20_epochs = []\navg_buys = []\navg_sells = []\navg_buy_diff_sell = []\navg_amounts = []\ndates = []\ncnt = 0\ndate = date2num(datetime.datetime.fromtimestamp(rows[0][1]))\nfor cnt in range(20, len(rows)):\n row_previous2 = rows[cnt - 2]\n row_previous1 = rows[cnt - 1]\n row = rows[cnt]\n open = row[2]\n high = row[3]\n low = row[4]\n close = row[5]\n vol = row[6]\n vol_buy, vol_sell = row[7:9]\n avg_buy, avg_sell, avg_amount_per_trade = row[-3:]\n date = date + 1\n data = date, open, high, low, close\n alist.append(data)\n vols_bid.append(-vol_buy)\n vols_ask.append(vol_sell)\n diff_bid_2_ask.append(vol_buy - vol_sell)\n diff_bid_2_ask_in_past_2_epochs.append(vol_buy + row_previous1[7] -\n vol_sell - row_previous1[8])\n diff_bid_2_ask_in_past_3_epochs.append(vol_buy + row_previous1[7] +\n row_previous2[7] - vol_sell - row_previous1[8] - row_previous2[8])\n avg_buy_diff_sell.append(avg_buy - avg_sell)\n avg_amounts.append(avg_amount_per_trade * 100)\n dates.append(date)\nfig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))\nmpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')\naxes[0].set_title('BTC')\naxes[0].set_ylabel('价格')\naxes[0].grid(True)\naxes[0].xaxis_date()\naxes[1].plot(dates, avg_buy_diff_sell, c='orange')\naxes[1].plot(dates, avg_amounts, c='blue')\naxes[1].set_ylabel('成交量')\naxes[1].grid(True)\naxes[2].plot(dates, diff_bid_2_ask, c='green')\naxes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')\naxes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')\naxes[2].set_ylabel('成交量')\naxes[2].grid(True)\naxes[2].set_ylabel('买卖均价')\naxes[2].grid(True)\nplt.show()\n", "step-4": "from packages import data as DATA\nfrom packages import plot as PLOT\nfrom packages import universal as UNIVERSAL\nfrom packages import currency_pair as CP\nimport matplotlib.pyplot as plt\nimport mpl_finance as mpf\nfrom packages import db as DB\nimport CONSTANTS\nimport datetime\nfrom matplotlib.pylab import date2num\nfrom matplotlib.widgets import Cursor\npgmanager = DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)\ntablename = 'klines_full_vol_50'\nrows = pgmanager.select('select * from ' + tablename +\n ' where timestamp>1577808000+86400*5 order by timestamp limit 300')\na = 1\nalist = []\nvols_bid = []\nvols_ask = []\ndiff_bid_2_ask = []\ndiff_bid_2_ask_in_past_2_epochs = []\ndiff_bid_2_ask_in_past_3_epochs = []\ndiff_bid_2_ask_in_past_5_epochs = []\ndiff_bid_2_ask_in_past_10_epochs = []\ndiff_bid_2_ask_in_past_20_epochs = []\navg_buys = []\navg_sells = []\navg_buy_diff_sell = []\navg_amounts = []\ndates = []\ncnt = 0\ndate = date2num(datetime.datetime.fromtimestamp(rows[0][1]))\nfor cnt in range(20, len(rows)):\n row_previous2 = rows[cnt - 2]\n row_previous1 = rows[cnt - 1]\n row = rows[cnt]\n open = row[2]\n high = row[3]\n low = row[4]\n close = row[5]\n vol = row[6]\n vol_buy, vol_sell = row[7:9]\n avg_buy, avg_sell, avg_amount_per_trade = row[-3:]\n date = date + 1\n data = date, open, high, low, close\n alist.append(data)\n vols_bid.append(-vol_buy)\n vols_ask.append(vol_sell)\n diff_bid_2_ask.append(vol_buy - vol_sell)\n diff_bid_2_ask_in_past_2_epochs.append(vol_buy + row_previous1[7] -\n vol_sell - row_previous1[8])\n diff_bid_2_ask_in_past_3_epochs.append(vol_buy + row_previous1[7] +\n row_previous2[7] - vol_sell - row_previous1[8] - row_previous2[8])\n avg_buy_diff_sell.append(avg_buy - avg_sell)\n avg_amounts.append(avg_amount_per_trade * 100)\n dates.append(date)\nfig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))\nmpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')\naxes[0].set_title('BTC')\naxes[0].set_ylabel('价格')\naxes[0].grid(True)\naxes[0].xaxis_date()\naxes[1].plot(dates, avg_buy_diff_sell, c='orange')\naxes[1].plot(dates, avg_amounts, c='blue')\naxes[1].set_ylabel('成交量')\naxes[1].grid(True)\naxes[2].plot(dates, diff_bid_2_ask, c='green')\naxes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')\naxes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')\naxes[2].set_ylabel('成交量')\naxes[2].grid(True)\naxes[2].set_ylabel('买卖均价')\naxes[2].grid(True)\nplt.show()\n", "step-5": "from packages import data as DATA\nfrom packages import plot as PLOT\nfrom packages import universal as UNIVERSAL\nfrom packages import currency_pair as CP\nimport matplotlib.pyplot as plt\nimport mpl_finance as mpf\nfrom packages import db as DB\nimport CONSTANTS\nimport datetime\nfrom matplotlib.pylab import date2num\nfrom matplotlib.widgets import Cursor\n\npgmanager=DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)\ntablename='klines_full_vol_50'\n\nrows=pgmanager.select('select * from '+tablename + ' where timestamp>1577808000+86400*5 order by timestamp limit 300')\na=1\n\nalist = []\nvols_bid = []\nvols_ask = []\ndiff_bid_2_ask = []\ndiff_bid_2_ask_in_past_2_epochs = []\ndiff_bid_2_ask_in_past_3_epochs = []\ndiff_bid_2_ask_in_past_5_epochs = []\ndiff_bid_2_ask_in_past_10_epochs = []\ndiff_bid_2_ask_in_past_20_epochs = []\navg_buys=[]\navg_sells=[]\navg_buy_diff_sell=[]\navg_amounts=[]\ndates = []\ncnt = 0\ndate = date2num(datetime.datetime.fromtimestamp(rows[0][1]))\n\nfor cnt in range(20, len(rows)):\n row_previous2=rows[cnt-2]\n row_previous1 = rows[cnt - 1]\n row = rows[cnt]\n open=row[2]\n high=row[3]\n low=row[4]\n close=row[5]\n vol=row[6]\n vol_buy,vol_sell=row[7:9]\n avg_buy, avg_sell, avg_amount_per_trade=row[-3:]\n date = date + 1\n data = (date, open, high, low, close)\n alist.append(data)\n vols_bid.append(-vol_buy)\n vols_ask.append(vol_sell)\n diff_bid_2_ask.append(vol_buy-vol_sell)\n diff_bid_2_ask_in_past_2_epochs.append(\n vol_buy + row_previous1[7] - vol_sell-row_previous1[8])\n diff_bid_2_ask_in_past_3_epochs.append(\n vol_buy + row_previous1[7] +row_previous2[7] - vol_sell-row_previous1[8]-row_previous2[8])\n avg_buy_diff_sell.append(avg_buy-avg_sell)\n avg_amounts.append(avg_amount_per_trade*100)\n dates.append(date)\n\n# fig, ax = plt.subplots(figsize=(32, 18))\n# fig.subplots_adjust(bottom=0.5)\n# mpf.candlestick_ohlc(ax, alist, width=0.5, colorup='g', colordown='r', alpha=1.0)\n# plt.grid(True)\n# # 设置日期刻度旋转的角度\n# plt.xticks(rotation=30)\n# plt.title('wanda yuanxian 17')\n# plt.xlabel('Date')\n# plt.ylabel('Price')\n# # x轴的刻度为日期\n# ax.xaxis_date()\n\nfig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))\nmpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')\n\naxes[0].set_title('BTC')\naxes[0].set_ylabel('价格')\naxes[0].grid(True)\naxes[0].xaxis_date()\n\n# axes[1].plot(dates, avg_buy_diff_sell,c='red',linewidth=0.5)\n# axes[1].plot(dates, avg_amounts,c='green', linewidth=0.5)\n# axes[1].grid(True)\naxes[1].plot(dates, avg_buy_diff_sell, c='orange')\naxes[1].plot(dates, avg_amounts, c='blue')\naxes[1].set_ylabel('成交量')\naxes[1].grid(True)\n\naxes[2].plot(dates, diff_bid_2_ask, c='green')\naxes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')\naxes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')\naxes[2].set_ylabel('成交量')\naxes[2].grid(True)\n\naxes[2].set_ylabel('买卖均价')\naxes[2].grid(True)\n\nplt.show()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import os import sqlite3 import datetime directory = 'C:\PyHelp' if not os.path.exists(directory): os.makedirs(directory) rand_facts = '''• Exception is used as a base class for all exceptions. It's strongly recommended (but not yet required) that user exceptions are derived from this class too. • SystemExit(Exception) is raised by the sys.exit function. If it propagates to the top level without being caught by a try-except clause, the interpreter is terminated without a traceback message. • StandardError(Exception) is used as a base class for all standard exceptions (except SystemExit, that is). • KeyboardInterrupt(StandardError) is raised when the user presses Control-C (or any other interrupt key). Note that this may cause strange errors if you use "catch all" try-except statements. • ImportError(StandardError) is raised when Python fails to import a module. • EnvironmentError is used as a base class for exceptions that can be caused by the interpreter's environment (that is, they're usually not caused by bugs in the program). • IOError(EnvironmentError) is used to flag I/O-related errors. • OSError(EnvironmentError) is used to flag errors by the os module. • WindowsError(OSError) is used to flag Windows-specific errors from the os module. • NameError(StandardError) is raised when Python fails to find a global or local name. • UnboundLocalError(NameError) is raised if your program attempts to access a local variable before it has been assigned a value. This exception is only used in 2.0 and later; earlier versions raise a plain NameError exception instead. • AttributeError(StandardError) is raised when Python fails to find (or assign to) an instance attribute, a method, a module function, or any other qualified name. • SyntaxError(StandardError) is raised when the compiler stumbles upon a syntax error. • IndentationError(SyntaxError) is raised for syntax errors caused by bad indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead. • TabError(IndentationError) is raised by the interpreter when the -tt option is used to check for inconsistent indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead. • TypeError(StandardError) is raised when an operation cannot be applied to an object of the given type. • AssertionError(StandardError) is raised when an assert statement fails (if the expression is false, that is). • LookupError(StandardError) is used as a base class for exceptions raised when a sequence or dictionary type doesn't contain a given index or key. • IndexError(LookupError) is raised by sequence objects when the given index doesn't exist. • KeyError(LookupError) is raised by dictionary objects when the given key doesn't exist. • ArithmeticError(StandardError) is used as a base class for math-related exceptions. • OverflowError(ArithmeticError) is raised when an operations overflows (for example, when an integer is too large to fit in the given type). • ZeroDivisionError(ArithmeticError) is raised when you try to divide a number by zero. • FloatingPointError(ArithmeticError) is raised when a floating point operation fails. • ValueError(StandardError) is raised if an argument has the right type, but an invalid value. • UnicodeError(ValueError) is raised for type problems related to the Unicode string type. This is only used in 2.0 and later. • RuntimeError(StandardError) is used for various run-time problems, including attempts to get outside the box when running in restricted mode, unexpected hardware problems, etc. • NotImplementedError(RuntimeError) can be used to flag functions that hasn't been implemented yet, or methods that should be overridden. • SystemError(StandardError) is raised if the interpreter messes up, and knows about it. The exception value contains a more detailed description (usually something cryptic, like "eval_code2: NULL globals" or so). I cannot recall ever seeing this exception in over five years of full-time Python programming, but maybe that's just me. • MemoryError(StandardError) is raised when the interpreter runs out of memory. Note that this only happens when the underlying memory allocation routines complain; you can often send your poor computer into a mindless swapping frenzy before that happens. • NoneType The type of None. • TypeType The type of type objects (such as returned by type()). • IntType The type of integers (e.g. 1). • LongType The type of long integers (e.g. 1L). • FloatType The type of floating point numbers (e.g. 1.0). • ComplexType The type of complex numbers (e.g. 1.0j). • StringType The type of character strings (e.g. ’Spam’). • UnicodeType The type of Unicode character strings (e.g. u’Spam’). • TupleType The type of tuples (e.g. (1, 2, 3, ’Spam’)). • ListType The type of lists (e.g. [0, 1, 2, 3]). • DictType The type of dictionaries (e.g. {’Bacon’: 1, ’Ham’: 0}). • DictionaryType An alternate name for DictType. • FunctionType The type of user-defined functions and lambdas. • LambdaType An alternate name for FunctionType. • CodeType The type for code objects such as returned by compile(). • ClassType type of user-defined classes. • InstanceType The type of instances of user-defined classes. • MethodType The type of methods of user-defined class instances. • UnboundMethod Type An alternate name for MethodType. • BuiltinFunction Type The type of built-in functions like len() or sys.exit(). • BuiltinMethod TypeAn alternate name for BuiltinFunction. • ModuleType The type of modules. • FileType The type of open file objects such as sys.stdout. • XRangeType The type of range objects returned by xrange(). • SliceType The type of objects returned by slice(). • EllipsisType The type of Ellipsis. • TracebackType The type of traceback objects such as found in sys.exc traceback. • FrameType The type of frame objects such as found in tb.tb frame if tb is a traceback object. • BufferType The type of buffer objects created by the buffer() function. • string.capitalize()Return a copy of the string with only its first character capitalized. • string.center(width) Return centered in a string of length width. Padding is done using spaces. • string.count(sub[, start[, end ]]) Return the number of occurrences of substring sub in string S[start:end]. Optional arguments start and end are interpreted as in slice notation. • string.encode([encoding[,errors]]) Return an encoded version of the string. Default encoding is the current default string encoding. errors may be given to set a different error handling scheme. The default for errors is ’strict’, meaning that encoding errors raise a ValueError. Other possible values are ’ignore’ and ’replace’. . • string.endswith(suffix[, start[, end ]]) Return true if the string ends with the specified suffix, otherwise return false. With optional start, test beginning at that position. With optional end, stop comparing at that position. • string.expandtabs([tabsize ]) Return a copy of the string where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. • string.find(sub[, start[, end ]]) Return the lowest index in the string where substring sub is found, such that sub is contained in the range [start, end). Optional arguments start and end are interpreted as in slice notation. Return -1 if sub is not found. • string.index(sub[, start[, end ]]) Like find(), but raise ValueError when the substring is not found. • string.isalnum() Return true if all characters in the string are alphanumeric and there is at least one character, false otherwise. • string.isalpha() Return true if all characters in the string are alphabetic and there is at least one character, false otherwise. • string.isdigit()Return true if there are only digit characters, false otherwise. • string.islower() Return true if all cased characters in the string are lowercase and there is at least one cased character, false otherwise. • string.isspace() Return true if there are only whitespace characters in the string and the string is not empty, false otherwise. • string.istitle() Return true if the string is a titlecased string, i.e. uppercase characters may only follow uncased characters and lowercase characters only cased ones. Return false otherwise. • string.isupper() Return true if all cased characters in the string are uppercase and there is at least one cased character, false otherwise. • string.join(seq) Return a string which is the concatenation of the strings in the sequence seq. The separator between elements is the string providing this method. • string.ljust(width) Return the string left justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). • string.lower() Return a copy of the string converted to lowercase. • string.lstrip() Return a copy of the string with leading whitespace removed. • string.replace(old, new[, maxsplit]) Return a copy of the string with all occurrences of substring old replaced by new. If the optional argument maxsplit is given, only the first maxsplit occurrences are replaced. • string.rfind(sub [,start [,end ]]) Return the highest index in the string where substring sub is found, such that sub is contained within s[start,end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. • string.rindex(sub[, start[, end ]]) Like rfind() but raises ValueError when the substring sub is not found. • string.rjust(width) Return the string right justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). • string.rstrip() Return a copy of the string with trailing whitespace removed. • string.split([sep [,maxsplit]]) Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or None, any whitespace string is a separator. • string.splitlines([keepends]) Return a list of the lines in the string, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true. • string.startswith(prefix[, start[, end ]]) Return true if string starts with the prefix, otherwise return false. With optional start, test string beginning at that position. With optional end, stop comparing string at that position. • string.strip() Return a copy of the string with leading and trailing whitespace removed. • string.swapcase() Return a copy of the string with uppercase characters converted to lowercase and vice versa. • string.title() Return a titlecased version of, i.e. words start with uppercase characters, all remaining cased characters are lowercase. • string.translate(table[, deletechars]) Return a copy of the string where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256. • string.upper() Return a copy of the string converted to uppercase. • file.close() Close the file. A closed file cannot be read or written anymore. Any operation which requires that the file be open will raise a ValueError after the file has been closed. Calling close() more than once is allowed. • file.flush() Flush the internal buffer, like stdio’s fflush(). This may be a no-op on some file-like objects. • file.isatty() Return true if the file is connected to a tty(-like) device, else false. Note: If a file-like object is not associated with a real file, this method should not be implemented. • file.fileno() Return the integer “file descriptor” that is used by the underlying implementation to request I/O operations from the operating system. This can be useful for other, lower level interfaces that use file descriptors, e.g. module fcntl or os.read() and friends. Note: File-like objects which do not have a real file descriptor should not provide this method! • file.read([size ]) Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. (For certain files, like ttys, it makes sense to continue reading after an EOF is hit.) Note that this method may call the underlying C function fread() more than once in an effort to acquire as close to size bytes as possible. • file.readline([size ]) Read one entire line from the file. A trailing newline character is kept in the string7 (but may be absent when ends with an incomplete line). If the size argument is present and non-negative, it is a maximum byte count and an incomplete line may be returned. An empty string is returned when EOF is hit immediately. Note: Unlike stdio’s fgets(), the returned string contains null characters (’\0’) if they occurred in the input. • file.readlines([sizehint]) Read until EOF using readline() and return a list containing the lines thus read. If the optional sizehint argument is present, instead of reading up to EOF, whole lines totalling approximately sizehint bytes (possibly after rounding up to an internal buffer size) are read. Objects implementing a file-like interface may choose to ignore sizehint if it cannot be implemented, or cannot be implemented efficiently. • file.xreadlines() Equivalent to xreadlines.xreadlines(file). (See the xreadlines module for more information.) . • file.seek(offset[, whence ]) Set the file’s current position, like stdio’s fseek(). The whence argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file’s end). There is no return value. Note that if the file is opened for appending (mode ’a’ or ’a+’), any seek() operations will be undone at the next write. If the file is only opened for writing in append mode (mode ’a’), this method is essentially a no-op, but it remains useful for files opened in append mode with reading enabled (mode ’a+’). • file.tell() Return the file’s current position, like stdio’s ftell(). • file.truncate([size ]) Truncate the file’s size. If the optional size argument present, the file is truncated to (at most) that size. The size defaults to the current position. Availability of this function depends on the operating system version (for example, not all UNIX versions support this operation). • file.write(str) Write a string to the file. There is no return value. Note: Due to buffering, the string may not actually show up in the file until the flush() or close() method is called. • file.writelines(list) Write a list of strings to the file. There is no return value. (The name is intended to match readlines(); writelines() does not add line separators.) File objects also offer a number of other interesting attributes. These are not required for file-like objects, but should be implemented if they make sense for the particular object. • file.closed Boolean indicating the current state of the file object. This is a read-only attribute; the close() method changes the value. It may not be available on all file-like objects. • file.mode The I/O mode for the file. If the file was created using the open() built-in function, this will be the value of the mode parameter. This is a read-only attribute and may not be present on all file-like objects. • file.name If the file object was created using open(), the name of the file. Otherwise, some string that indicates the source of the file object, of the form ‘<...>’. This is a read-only attribute and may not be present on all file-like objects. • abs(x) Return the absolute value of a number. The argument may be a plain or long integer or a floating point number. If the argument is a complex number, its magnitude is returned. • apply(function, args[, keywords]) The function argument must be a callable object (a user-defined or built-in function or method, or a class object) and the args argument must be a sequence (if it is not a tuple, the sequence is first converted to a tuple). The function is called with args as the argument list; the number of arguments is the the length of the tuple. (This is different from just calling func(args), since in that case there is always exactly one argument.) If the optional keywords argument is present, it must be a dictionary whose keys are strings. It specifies keyword arguments to be added to the end of the the argument list. • buffer(object[, offset[, size ]]) The object argument must be an object that supports the buffer call interface (such as strings, arrays, and buffers). A new buffer object will be created which references the object argument. The buffer object will be a slice from the beginning of object (or from the specified offset). The slice will extend to the end of object (or will have a length given by the size argument). • callable(object) Return true if the object argument appears callable, false if not. If this returns true, it is still possible that a call fails, but if it is false, calling object will never succeed. Note that classes are callable (calling a class returns a new instance); class instances are callable if they have a call () method. • chr(i) Return a string of one character whose ASCII code is the integer i, e.g., chr(97) returns the string ’a’. This is the inverse of ord(). The argument must be in the range [0..255], inclusive; ValueError will be raised if i is outside that range. • cmp(x, y) Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y. • coerce(x, y) Return a tuple consisting of the two numeric arguments converted to a common type, using the same rules as used by arithmetic operations. compile(string, filename, kind) Compile the string into a code object. Code objects can be executed by an exec statement or evaluated by a call to eval(). The filename argument should give the file from which the code was read; pass e.g. ’<string>’ if it wasn’t read from a file. The kind argument specifies what kind of code must be compiled; it can be ’exec’ if string consists of a sequence of statements, ’eval’ if it consists of a single expression, or ’single’ if it consists of a single interactive statement (in the latter case, expression statements that evaluate to something else than None will printed). • complex(real[, imag ]) Create a complex number with the value real + imag*j or convert a string or number to a complex number. Each argument may be any numeric type (including complex). If imag is omitted, it defaults to zero and the function serves as a numeric conversion function like int(), long() and float(); in this case it also accepts a string argument which should be a valid complex number. • delattr(object, name) This is a relative of setattr(). The arguments are an object and a string. The string must be the name of one of the object’s attributes. The function deletes the named attribute, provided the object allows it. For example, delattr(x, ’foobar’) is equivalent to del x.foobar. • dir([object]) Without arguments, return the list of names in the current local symbol table. • divmod(a, b) Take two numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division. With mixed operand types, the rules for binary arithmetic operators apply. For plain and long integers, the result is the same as (a / b, a % b). For floating point numbers the result is (q, a %bb), where q is usually math.floor(a / b) but may be 1 less than that. In any case q * b + a % b is very close to a, if a % b is non-zero it has the same sign as b, and 0 <= abs(a % b) < abs(b). • eval(expression[, globals[, locals]]) The arguments are a string and two optional dictionaries. The expression argument is parsed and evaluated as a Python expression (technically speaking, a condition list) using the globals and locals dictionaries as global and local name space. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where eval is called. The return value is the result of the evaluated expression. Syntax errors are reported as exceptions. Example: >>> x = 1 >>> print eval(’x+1’) 2 This function can also be used to execute arbitrary code objects (e.g. created by compile()). In this case pass a code object instead of a string. The code object must have been compiled passing ’eval’ to the kind argument. Hints: dynamic execution of statements is supported by the exec statement. Execution of statements from a file is supported by the execfile() function. The globals() and locals() functions returns the current global and local dictionary, respectively, which may be useful to pass around for use by eval() or execfile(). • execfile(file[, globals[, locals]]) This function is similar to the exec statement, but parses a file instead of a string. It is different from the import statement in that it does not use the module administration — it reads the file unconditionally and does not create a new module.8 The arguments are a file name and two optional dictionaries. The file is parsed and evaluated as a sequence of Python statements (similarly to a module) using the globals and locals dictionaries as global and local names- pace. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where execfile() is called. The return value is None. • filter(function, list) Construct a list from those elements of list for which function returns true. If list is a string or a tuple, the result also has that type; otherwise it is always a list. If function is None, the identity function is assumed, i.e. all elements of list that are false (zero or empty) are removed. • float(x) Convert a string or a number to floating point. If the argument is a string, it must contain a possibly signed dec-imal or floating point number, possibly embedded in whitespace; this behaves identical to string.atof(x). Otherwise, the argument may be a plain or long integer or a floating point number, and a floating point number with the same value (within Python’s floating point precision) is returned. • getattr(object, name[, default]) Return the value of the named attributed of object. name must be a string. If the string is the name of one of the object’s attributes, the result is the value of that attribute. For example, getattr(x, ’foobar’) is equivalent to x.foobar. If the named attribute does not exist, default is returned if provided, otherwise AttributeError is raised. • globals() Return a dictionary representing the current global symbol table. This is always the dictionary of the current module (inside a function or method, this is the module where it is defined, not the module from which it is called). • hasattr(object, name) The arguments are an object and a string. The result is 1 if the string is the name of one of the object’s attributes, 0 if not. (This is implemented by calling getattr(object, name) and seeing whether it raises an exception or not.) • hash(object) Return the hash value of the object (if it has one). Hash values are integers. They are used to quickly compare dictionary keys during a dictionary lookup. Numeric values that compare equal have the same hash value (even if they are of different types, e.g. 1 and 1.0). • hex(x) Convert an integer number (of any size) to a hexadecimal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, hex(-1) yields ’0xffffffff’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception. • id(object) Return the ‘identity’ of an object. This is an integer (or long integer) which is guaranteed to be unique and constant for this object during its lifetime. Two objects whose lifetimes are disjunct may have the same id() value. (Implementation note: this is the address of the object.) • input([prompt]) Equivalent to eval(raw input(prompt)). Warning: This function is not safe from user errors! It expects a valid Python expression as input; if the input is not syntactically valid, a SyntaxError will be raised. Other exceptions may be raised if there is an error during evaluation. (On the other hand, sometimes this is exactly what you need when writing a quick script for expert use.) If the readline module was loaded, then input() will use it to provide elaborate line editing and history features. Consider using the raw input() function for general input from users. • int(x[, radix ]) Convert a string or number to a plain integer. If the argument is a string, it must contain a possibly signed decimal number representable as a Python integer, possibly embedded in whitespace; this behaves identical to • string.atoi(x[, radix ]). The radix parameter gives the base for the conversion and may be any integer in the range [2, 36], or zero. If radix is zero, the proper radix is guessed based on the contents of string; the interpretation is the same as for integer literals. If radix is specified and x is not a string, TypeError is raised. Otherwise, the argument may be a plain or long integer or a floating point number. Conversion of floating point numbers to integers is defined by the C semantics; normally the conversion truncates towards zero.9 • intern(string) Enter string in the table of “interned” strings and return the interned string – which is string itself or a copy. Interning strings is useful to gain a little performance on dictionary lookup – if the keys in a dictionary are interned, and the lookup key is interned, the key comparisons (after hashing) can be done by a pointer compare instead of a string compare. Normally, the names used in Python programs are automatically interned, and the dictionaries used to hold module, class or instance attributes have interned keys. Interned strings are immortal (i.e. never get garbage collected). • isinstance(object, class) Return true if the object argument is an instance of the class argument, or of a (direct or indirect) subclass thereof. Also return true if class is a type object and object is an object of that type. If object is not a class instance or a object of the given type, the function always returns false. If class is neither a class object nor a type object, a TypeError exception is raised. • issubclass(class1, class2) Return true if class1 is a subclass (direct or indirect) of class2. A class is considered a subclass of itself. If either argument is not a class object, a TypeError exception is raised. • len (s) Return the length (the number of items) of an object. The argument may be a sequence (string, tuple or list) or a mapping (dictionary). • list(sequence) Return a list whose items are the same and in the same order as sequence’s items. If sequence is already a list, a copy is made and returned, similar to sequence[:]. For instance, list(’abc’) returns returns [’a’, ’b’, ’c’] and list( (1, 2, 3) ) returns [1, 2, 3]. • locals() Return a dictionary representing the current local symbol table. Warning: The contents of this dictionary should not be modified; changes may not affect the values of local variables used by the interpreter. • long(x[, radix ]) Convert a string or number to a long integer. If the argument is a string, it must contain a possibly signed number of arbitrary size, possibly embedded in whitespace; this behaves identical to string.atol(x). The radix argument is interpreted in the same way as for int(), and may only be given when x is a string. Otherwise, the argument may be a plain or long integer or a floating point number, and a long integer with the same value is returned. Conversion of floating point numbers to integers is defined by the C semantics; see the description of int(). • map(function, list, ...) Apply function to every item of list and return a list of the results. If additional list arguments are passed, function must take that many arguments and is applied to the items of all lists in parallel; if a list is shorter than another it is assumed to be extended with None items. If function is None, the identity function is assumed; if there are multiple list arguments, map() returns a list consisting of tuples containing the corresponding items from all lists (i.e. a kind of transpose operation). The list arguments may be any kind of sequence; the result is always a list. • max(s[, args...]) With a single argument s, return the largest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the largest of the arguments. • min(s[, args...]) With a single argument s, return the smallest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the smallest of the arguments. • oct(x) Convert an integer number (of any size) to an octal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, oct(-1) yields ’037777777777’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception. • ord(c) Return the ASCII value of a string of one character or a Unicode character. E.g., ord(’a’) returns the integer 97, ord(u’ u2020’) returns 8224. This is the inverse of chr() for strings and of unichr() for Unicode characters. • pow(x, y[, z]) Return x to the power y; if z is present, return x to the power y, modulo z (computed more efficiently than pow(x, y) % z). The arguments must have numeric types. With mixed operand types, the rules for binary arithmetic operators apply. The effective operand type is also the type of the result; if the result is not expressible in this type, the function raises an exception; e.g., pow(2, -1) or pow(2, 35000) is not allowed. • range([start,] stop[, step ]) This is a versatile function to create lists containing arithmetic progressions. It is most often used in for loops. The arguments must be plain integers. If the step argument is omitted, it defaults to 1. If the start argument is omitted, it defaults to 0. The full form returns a list of plain integers [start, start + step, start + 2 * step, ...]. If step is positive, the last element is the largest start + i * step less than stop; if step is negative, the last element is the largest start + i * step greater than stop. step must not be zero (or else ValueError is raised). • reduce(function, sequence[, initializer]) Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). If the optional initializer is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. • reload(module) Re-parse and re-initialize an already imported module. The argument must be a module object, so it must have been successfully imported before. This is useful if you have edited the module source file using an external editor and want to try out the new version without leaving the Python interpreter. The return value is the module object (i.e. the same as the module argument). • repr(object) Return a string containing a printable representation of an object. This is the same value yielded by conversions (reverse quotes). It is sometimes useful to be able to access this operation as an ordinary function. For many types, this function makes an attempt to return a string that would yield an object with the same value when passed to eval(). • round(x[, n ]) Return the floating point value x rounded to n digits after the decimal point. If n is omitted, it defaults to zero. The result is a floating point number. Values are rounded to the closest multiple of 10 to the power minus n; if two multiples are equally close, rounding is done away from 0 (so e.g. round(0.5) is 1.0 and round(- 0.5) is -1.0). • setattr(object, name, value) This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value. The string may name an existing attribute or a new attribute. The function assigns the value to the attribute, provided the object allows it. For example, setattr(x, ’foobar’, 123) is equivalent to x.foobar = 123. • slice([start,] stop[, step ]) Return a slice object representing the set of indices specified by range(start, stop, step). The start and step arguments default to None. Slice objects have read-only data attributes start, stop and step which merely return the argument values (or their default). They have no other explicit functionality; however they are used by Numerical Python and other third party extensions. Slice objects are also generated when extended indexing syntax is used, e.g. for ‘a[start:stop:step]’ or ‘a[start:stop, i]’. • str(object) Return a string containing a nicely printable representation of an object. For strings, this returns the string itself. The difference with repr(object) is that str(object) does not always attempt to return a string that is acceptable to eval(); its goal is to return a printable string. • tuple(sequence) Return a tuple whose items are the same and in the same order as sequence’s items. If sequence is already a tuple, it is returned unchanged. For instance, tuple(’abc’) returns returns (’a’, ’b’, ’c’) and tuple([1, 2, 3]) returns (1, 2, 3). • type(object) Return the type of an object. The return value is a type object. The standard module types defines names for all built-in types. For instance: >>> import types >>> if type(x) == types.StringType: print "It’s a string" unichr(i) Return the Unicode string of one character whose Unicode code is the integer i, e.g., unichr(97) returns the string u’a’. This is the inverse of ord() for Unicode strings. The argument must be in the range [0..65535], inclusive. ValueError is raised otherwise. . • unicode(string[, encoding[, errors]]) Decodes string using the codec for encoding. Error handling is done according to errors. The default behavior is to decode UTF-8 in strict mode, meaning that encoding errors raise ValueError. See also the codecs module. . • vars([object]) Without arguments, return a dictionary corresponding to the current local symbol table. With a module, class or class instance object as argument (or anything else that has a dict attribute), returns a dictionary corresponding to the object’s symbol table. The returned dictionary should not be modified: the effects on the corresponding symbol table are undefined.11 • xrange([start,] stop[, step ]) This function is very similar to range(), but returns an “xrange object” instead of a list. This is an opaque sequence type which yields the same values as the corresponding list, without actually storing them all si- multaneously. The advantage of xrange() over range() is minimal (since xrange() still has to create the values when asked for them) except when a very large range is used on a memory-starved machine (e.g. MS-DOS) or when all of the range’s elements are never used (e.g. when the loop is usually terminated with break). • zip(seq1, ...) This function returns a list of tuples, where each tuple contains the i-th element from each of the argument sequences. At least one sequence is required, otherwise a TypeError is raised. The returned list is truncated in length to the length of the shortest argument sequence. When there are multiple argument sequences which are all of the same length, zip() is similar to map() with an initial argument of None. With a single sequence argument, it returns a list of 1-tuples. ''' op='C:\PyHelp\\randinfo.txt' file_exists = os.path.isfile(op) if not file_exists: x = open(op,"w") x.write(rand_facts)
normal
{ "blob_id": "a2c93fd632a637d47f05e0a4fda851b465d03a31", "index": 4674, "step-1": "<mask token>\n", "step-2": "<mask token>\nif not os.path.exists(directory):\n os.makedirs(directory)\n<mask token>\nif not file_exists:\n x = open(op, 'w')\n x.write(rand_facts)\n", "step-3": "<mask token>\ndirectory = 'C:\\\\PyHelp'\nif not os.path.exists(directory):\n os.makedirs(directory)\nrand_facts = (\n '• Exception is used as a base class for all exceptions. It\\'s strongly recommended (but not yet required) that user exceptions are derived from this class too.\\n• SystemExit(Exception) is raised by the sys.exit function. If it propagates to the top level without being caught by a try-except clause, the interpreter is terminated without a traceback message.\\n• StandardError(Exception) is used as a base class for all standard exceptions (except SystemExit, that is).\\n• KeyboardInterrupt(StandardError) is raised when the user presses Control-C (or any other interrupt key). Note that this may cause strange errors if you use \"catch all\" try-except statements.\\n• ImportError(StandardError) is raised when Python fails to import a module.\\n• EnvironmentError is used as a base class for exceptions that can be caused by the interpreter\\'s environment (that is, they\\'re usually not caused by bugs in the program).\\n• IOError(EnvironmentError) is used to flag I/O-related errors.\\n• OSError(EnvironmentError) is used to flag errors by the os module.\\n• WindowsError(OSError) is used to flag Windows-specific errors from the os module.\\n• NameError(StandardError) is raised when Python fails to find a global or local name.\\n• UnboundLocalError(NameError) is raised if your program attempts to access a local variable before it has been assigned a value. This exception is only used in 2.0 and later; earlier versions raise a plain NameError exception instead.\\n• AttributeError(StandardError) is raised when Python fails to find (or assign to) an instance attribute, a method, a module function, or any other qualified name.\\n• SyntaxError(StandardError) is raised when the compiler stumbles upon a syntax error.\\n• IndentationError(SyntaxError) is raised for syntax errors caused by bad indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\\n• TabError(IndentationError) is raised by the interpreter when the -tt option is used to check for inconsistent indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\\n• TypeError(StandardError) is raised when an operation cannot be applied to an object of the given type.\\n• AssertionError(StandardError) is raised when an assert statement fails (if the expression is false, that is).\\n• LookupError(StandardError) is used as a base class for exceptions raised when a sequence or dictionary type doesn\\'t contain a given index or key.\\n• IndexError(LookupError) is raised by sequence objects when the given index doesn\\'t exist.\\n• KeyError(LookupError) is raised by dictionary objects when the given key doesn\\'t exist.\\n• ArithmeticError(StandardError) is used as a base class for math-related exceptions.\\n• OverflowError(ArithmeticError) is raised when an operations overflows (for example, when an integer is too large to fit in the given type).\\n• ZeroDivisionError(ArithmeticError) is raised when you try to divide a number by zero.\\n• FloatingPointError(ArithmeticError) is raised when a floating point operation fails.\\n• ValueError(StandardError) is raised if an argument has the right type, but an invalid value.\\n• UnicodeError(ValueError) is raised for type problems related to the Unicode string type. This is only used in 2.0 and later.\\n• RuntimeError(StandardError) is used for various run-time problems, including attempts to get outside the box when running in restricted mode, unexpected hardware problems, etc.\\n• NotImplementedError(RuntimeError) can be used to flag functions that hasn\\'t been implemented yet, or methods that should be overridden.\\n• SystemError(StandardError) is raised if the interpreter messes up, and knows about it. The exception value contains a more detailed description (usually something cryptic, like\\n\"eval_code2: NULL globals\" or so). I cannot recall ever seeing this exception in over five years of full-time Python programming, but maybe that\\'s just me.\\n• MemoryError(StandardError) is raised when the interpreter runs out of memory. Note that this only happens when the underlying memory allocation routines complain; you can often send your poor computer into a mindless swapping frenzy before that happens.\\n• NoneType The type of None.\\n• TypeType The type of type objects (such as returned by type()). \\n• IntType The type of integers (e.g. 1).\\n• LongType The type of long integers (e.g. 1L).\\n• FloatType The type of floating point numbers (e.g. 1.0).\\n• ComplexType The type of complex numbers (e.g. 1.0j).\\n• StringType The type of character strings (e.g. ’Spam’). \\n• UnicodeType The type of Unicode character strings (e.g. u’Spam’). \\n• TupleType The type of tuples (e.g. (1, 2, 3, ’Spam’)). \\n• ListType The type of lists (e.g. [0, 1, 2, 3]). \\n• DictType The type of dictionaries (e.g. {’Bacon’: 1, ’Ham’: 0}). \\n• DictionaryType An alternate name for DictType. \\n• FunctionType The type of user-defined functions and lambdas. \\n• LambdaType An alternate name for FunctionType. \\n• CodeType The type for code objects such as returned by compile(). \\n• ClassType type of user-defined classes. \\n• InstanceType The type of instances of user-defined classes. \\n• MethodType The type of methods of user-defined class instances. \\n• UnboundMethod Type An alternate name for MethodType. \\n• BuiltinFunction Type The type of built-in functions like len() or sys.exit(). \\n• BuiltinMethod TypeAn alternate name for BuiltinFunction. \\n• ModuleType The type of modules. \\n• FileType The type of open file objects such as sys.stdout. \\n• XRangeType The type of range objects returned by xrange(). \\n• SliceType The type of objects returned by slice().\\n• EllipsisType The type of Ellipsis. \\n• TracebackType The type of traceback objects such as found in sys.exc traceback. \\n• FrameType The type of frame objects such as found in tb.tb frame if tb is a traceback object. \\n• BufferType The type of buffer objects created by the buffer() function.\\n• string.capitalize()Return a copy of the string with only its first character capitalized. \\n• string.center(width) Return centered in a string of length width. Padding is done using spaces. \\n• string.count(sub[, start[, end ]]) Return the number of occurrences of substring sub in string S[start:end]. Optional arguments start and end are interpreted as in slice notation. \\n• string.encode([encoding[,errors]]) Return an encoded version of the string. Default encoding is the current default string encoding. errors may be given to set a different error handling scheme. The default for errors is ’strict’, meaning that encoding errors raise a ValueError. Other possible values are ’ignore’ and ’replace’. . \\n• string.endswith(suffix[, start[, end ]]) Return true if the string ends with the specified suffix, otherwise return false. With optional start, test beginning at that position. With optional end, stop comparing at that position. \\n• string.expandtabs([tabsize ]) Return a copy of the string where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. \\n• string.find(sub[, start[, end ]]) Return the lowest index in the string where substring sub is found, such that sub is contained in the range [start, end). Optional arguments start and end are interpreted as in slice notation. Return -1 if sub is not found. \\n• string.index(sub[, start[, end ]]) Like find(), but raise ValueError when the substring is not found. \\n• string.isalnum() Return true if all characters in the string are alphanumeric and there is at least one character, false otherwise. \\n• string.isalpha() Return true if all characters in the string are alphabetic and there is at least one character, false otherwise. \\n• string.isdigit()Return true if there are only digit characters, false otherwise.\\n• string.islower() Return true if all cased characters in the string are lowercase and there is at least one cased character, false otherwise. \\n• string.isspace() Return true if there are only whitespace characters in the string and the string is not empty, false otherwise.\\n• string.istitle() Return true if the string is a titlecased string, i.e. uppercase characters may only follow uncased characters and lowercase characters only cased ones. Return false otherwise. \\n• string.isupper() Return true if all cased characters in the string are uppercase and there is at least one cased character, false otherwise. \\n• string.join(seq) Return a string which is the concatenation of the strings in the sequence seq. The separator between elements is the string providing this method. \\n• string.ljust(width) Return the string left justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). \\n• string.lower() Return a copy of the string converted to lowercase. \\n• string.lstrip() Return a copy of the string with leading whitespace removed.\\n• string.replace(old, new[, maxsplit]) Return a copy of the string with all occurrences of substring old replaced by new. If the optional argument maxsplit is given, only the first maxsplit occurrences are replaced. \\n• string.rfind(sub [,start [,end ]]) Return the highest index in the string where substring sub is found, such that sub is contained within s[start,end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure.\\n• string.rindex(sub[, start[, end ]]) Like rfind() but raises ValueError when the substring sub is not found. \\n• string.rjust(width) Return the string right justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). • string.rstrip() Return a copy of the string with trailing whitespace removed. \\n• string.split([sep [,maxsplit]]) Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or None, any whitespace string is a separator. \\n• string.splitlines([keepends]) Return a list of the lines in the string, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true.\\n• string.startswith(prefix[, start[, end ]]) Return true if string starts with the prefix, otherwise return false. With optional start, test string beginning at that position. With optional end, stop comparing string at that position. \\n• string.strip() Return a copy of the string with leading and trailing whitespace removed.\\n• string.swapcase() Return a copy of the string with uppercase characters converted to lowercase and vice versa. \\n• string.title() Return a titlecased version of, i.e. words start with uppercase characters, all remaining cased characters are lowercase. \\n• string.translate(table[, deletechars]) Return a copy of the string where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256. \\n• string.upper() Return a copy of the string converted to uppercase.\\n• file.close() Close the file. A closed file cannot be read or written anymore. Any operation which requires that the file be open will raise a ValueError after the file has been closed. Calling close() more than once is allowed. \\n• file.flush() Flush the internal buffer, like stdio’s fflush(). This may be a no-op on some file-like objects. \\n• file.isatty() Return true if the file is connected to a tty(-like) device, else false. Note: If a file-like object is not associated with a real file, this method should not be implemented. \\n• file.fileno() Return the integer “file descriptor” that is used by the underlying implementation to request I/O operations from the operating system. This can be useful for other, lower level interfaces that use file descriptors, e.g. module fcntl or os.read() and friends. Note: File-like objects which do not have a real file descriptor should not provide this method! \\n• file.read([size ]) Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. (For certain files, like ttys, it makes sense to continue reading after an EOF is hit.) Note that this method may call the underlying C function fread() more than once in an effort to acquire as close to size bytes as possible. \\n• file.readline([size ]) Read one entire line from the file. A trailing newline character is kept in the string7 (but may be absent when ends with an incomplete line). If the size argument is present and non-negative, it is a maximum byte count and an incomplete line may be returned. An empty string is returned when EOF is hit immediately. Note: Unlike stdio’s fgets(), the returned string contains null characters (’\\x00’) if they occurred in the input. \\n• file.readlines([sizehint]) Read until EOF using readline() and return a list containing the lines thus read. If the optional sizehint argument is present, instead of reading up to EOF, whole lines totalling approximately sizehint bytes (possibly after rounding up to an internal buffer size) are read. Objects implementing a file-like interface may choose to ignore sizehint if it cannot be implemented, or cannot be implemented efficiently. \\n• file.xreadlines() Equivalent to xreadlines.xreadlines(file). (See the xreadlines module for more information.) . \\n• file.seek(offset[, whence ]) Set the file’s current position, like stdio’s fseek(). The whence argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file’s end). There is no return value. Note that if the file is opened for appending (mode ’a’ or ’a+’), any seek() operations will be undone at the next write. If the file is only opened for writing in append mode (mode ’a’), this method is essentially a no-op, but it remains useful for files opened in append mode with reading enabled (mode ’a+’). \\n• file.tell() Return the file’s current position, like stdio’s ftell(). \\n• file.truncate([size ]) Truncate the file’s size. If the optional size argument present, the file is truncated to (at most) that size. The size defaults to the current position. Availability of this function depends on the operating system version (for example, not all UNIX versions support this operation). \\n• file.write(str) Write a string to the file. There is no return value. Note: Due to buffering, the string may not actually show up in the file until the flush() or close() method is called. \\n• file.writelines(list) Write a list of strings to the file. There is no return value. (The name is intended to match readlines(); writelines() does not add line separators.) File objects also offer a number of other interesting attributes. These are not required for file-like objects, but should be implemented if they make sense for the particular object. \\n• file.closed Boolean indicating the current state of the file object. This is a read-only attribute; the close() method changes the value. It may not be available on all file-like objects. \\n• file.mode The I/O mode for the file. If the file was created using the open() built-in function, this will be the value of the mode parameter. This is a read-only attribute and may not be present on all file-like objects. \\n• file.name If the file object was created using open(), the name of the file. Otherwise, some string that indicates the source of the file object, of the form ‘<...>’. This is a read-only attribute and may not be present on all file-like objects.\\n• abs(x) Return the absolute value of a number. The argument may be a plain or long integer or a floating point number. If the argument is a complex number, its magnitude is returned. \\n• apply(function, args[, keywords]) The function argument must be a callable object (a user-defined or built-in function or method, or a class object) and the args argument must be a sequence (if it is not a tuple, the sequence is first converted to a tuple). The function is called with args as the argument list; the number of arguments is the the length of the tuple. (This is different from just calling func(args), since in that case there is always exactly one argument.) If the optional keywords argument is present, it must be a dictionary whose keys are strings. It specifies keyword arguments to be added to the end of the the argument list. \\n• buffer(object[, offset[, size ]]) The object argument must be an object that supports the buffer call interface (such as strings, arrays, and buffers). A new buffer object will be created which references the object argument. The buffer object will be a slice from the beginning of object (or from the specified offset). The slice will extend to the end of object (or will have a length given by the size argument). \\n• callable(object) Return true if the object argument appears callable, false if not. If this returns true, it is still possible that a call fails, but if it is false, calling object will never succeed. Note that classes are callable (calling a class returns a new instance); class instances are callable if they have a call () method.\\n• chr(i) Return a string of one character whose ASCII code is the integer i, e.g., chr(97) returns the string ’a’. This is the inverse of ord(). The argument must be in the range [0..255], inclusive; ValueError will be raised if i is outside that range. \\n• cmp(x, y) Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y. \\n• coerce(x, y) Return a tuple consisting of the two numeric arguments converted to a common type, using the same rules as used by arithmetic operations. compile(string, filename, kind) Compile the string into a code object. Code objects can be executed by an exec statement or evaluated by a call to eval(). The filename argument should give the file from which the code was read; pass e.g. ’<string>’ if it wasn’t read from a file. The kind argument specifies what kind of code must be compiled; it can be ’exec’ if string consists of a sequence of statements, ’eval’ if it consists of a single expression, or ’single’ if it consists of a single interactive statement (in the latter case, expression statements that evaluate to something else than None will printed). \\n• complex(real[, imag ]) Create a complex number with the value real + imag*j or convert a string or number to a complex number. Each argument may be any numeric type (including complex). If imag is omitted, it defaults to zero and the function serves as a numeric conversion function like int(), long() and float(); in this case it also accepts a string argument which should be a valid complex number. \\n• delattr(object, name) This is a relative of setattr(). The arguments are an object and a string. The string must be the name of one of the object’s attributes. The function deletes the named attribute, provided the object allows it. For example, delattr(x, ’foobar’) is equivalent to del x.foobar. \\n• dir([object]) Without arguments, return the list of names in the current local symbol table. \\n• divmod(a, b) Take two numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division. With mixed operand types, the rules for binary arithmetic operators apply. For plain and long integers, the result is the same as (a / b, a % b). For floating point numbers the result is (q, a %bb), where q is usually math.floor(a / b) but may be 1 less than that. In any case q * b + a % b is very close to a, if a % b is non-zero it has the same sign as b, and 0 <= abs(a % b) < abs(b). \\n• eval(expression[, globals[, locals]]) The arguments are a string and two optional dictionaries. The expression argument is parsed and evaluated as a Python expression (technically speaking, a condition list) using the globals and locals dictionaries as global and local name space. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where eval is called. The return value is the result of the evaluated expression. Syntax errors are reported as exceptions. Example: \\n>>> x = 1 \\n>>> print eval(’x+1’) \\n2 This function can also be used to execute arbitrary code objects (e.g. created by compile()). In this case pass a code object instead of a string. The code object must have been compiled passing ’eval’ to the kind argument. Hints: dynamic execution of statements is supported by the exec statement. Execution of statements from a file is supported by the execfile() function. The globals() and locals() functions returns the current global and local dictionary, respectively, which may be useful to pass around for use by eval() or execfile(). \\n• execfile(file[, globals[, locals]]) This function is similar to the exec statement, but parses a file instead of a string. It is different from the import statement in that it does not use the module administration — it reads the file unconditionally and does not create a new module.8 The arguments are a file name and two optional dictionaries. The file is parsed and evaluated as a sequence of Python statements (similarly to a module) using the globals and locals dictionaries as global and local names- pace. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where execfile() is called. The return value is None. \\n• filter(function, list) Construct a list from those elements of list for which function returns true. If list is a string or a tuple, the result also has that type; otherwise it is always a list. If function is None, the identity function is assumed, i.e. all elements of list that are false (zero or empty) are removed. \\n• float(x) Convert a string or a number to floating point. If the argument is a string, it must contain a possibly signed dec-imal or floating point number, possibly embedded in whitespace; this behaves identical to string.atof(x). Otherwise, the argument may be a plain or long integer or a floating point number, and a floating point number with the same value (within Python’s floating point precision) is returned.\\n• getattr(object, name[, default]) Return the value of the named attributed of object. name must be a string. If the string is the name of one of the object’s attributes, the result is the value of that attribute. For example, getattr(x, ’foobar’) is equivalent to x.foobar. If the named attribute does not exist, default is returned if provided, otherwise AttributeError is raised. \\n• globals() Return a dictionary representing the current global symbol table. This is always the dictionary of the current module (inside a function or method, this is the module where it is defined, not the module from which it is called). \\n• hasattr(object, name) The arguments are an object and a string. The result is 1 if the string is the name of one of the object’s attributes, 0 if not. (This is implemented by calling getattr(object, name) and seeing whether it raises an exception or not.) \\n• hash(object) Return the hash value of the object (if it has one). Hash values are integers. They are used to quickly compare dictionary keys during a dictionary lookup. Numeric values that compare equal have the same hash value (even if they are of different types, e.g. 1 and 1.0). \\n• hex(x) Convert an integer number (of any size) to a hexadecimal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, hex(-1) yields ’0xffffffff’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception. \\n• id(object) Return the ‘identity’ of an object. This is an integer (or long integer) which is guaranteed to be unique and constant for this object during its lifetime. Two objects whose lifetimes are disjunct may have the same id() value. (Implementation note: this is the address of the object.) \\n• input([prompt]) Equivalent to eval(raw input(prompt)). Warning: This function is not safe from user errors! It expects a valid Python expression as input; if the input is not syntactically valid, a SyntaxError will be raised. Other exceptions may be raised if there is an error during evaluation. (On the other hand, sometimes this is exactly what you need when writing a quick script for expert use.) If the readline module was loaded, then input() will use it to provide elaborate line editing and history features. Consider using the raw input() function for general input from users.\\n• int(x[, radix ]) Convert a string or number to a plain integer. If the argument is a string, it must contain a possibly signed decimal number representable as a Python integer, possibly embedded in whitespace; this behaves identical to \\n• string.atoi(x[, radix ]). The radix parameter gives the base for the conversion and may be any integer in the range [2, 36], or zero. If radix is zero, the proper radix is guessed based on the contents of string; the interpretation is the same as for integer literals. If radix is specified and x is not a string, TypeError is raised. Otherwise, the argument may be a plain or long integer or a floating point number. Conversion of floating point numbers to integers is defined by the C semantics; normally the conversion truncates towards zero.9 \\n• intern(string) Enter string in the table of “interned” strings and return the interned string – which is string itself or a copy. Interning strings is useful to gain a little performance on dictionary lookup – if the keys in a dictionary are interned, and the lookup key is interned, the key comparisons (after hashing) can be done by a pointer compare instead of a string compare. Normally, the names used in Python programs are automatically interned, and the dictionaries used to hold module, class or instance attributes have interned keys. Interned strings are immortal (i.e. never get garbage collected). \\n• isinstance(object, class) Return true if the object argument is an instance of the class argument, or of a (direct or indirect) subclass thereof. Also return true if class is a type object and object is an object of that type. If object is not a class instance or a object of the given type, the function always returns false. If class is neither a class object nor a type object, a TypeError exception is raised. \\n• issubclass(class1, class2) Return true if class1 is a subclass (direct or indirect) of class2. A class is considered a subclass of itself. If either argument is not a class object, a TypeError exception is raised. \\n• len (s) Return the length (the number of items) of an object. The argument may be a sequence (string, tuple or list) or a mapping (dictionary). \\n• list(sequence) Return a list whose items are the same and in the same order as sequence’s items. If sequence is already a list, a copy is made and returned, similar to sequence[:]. For instance, list(’abc’) returns returns [’a’, ’b’, ’c’] and list( (1, 2, 3) ) returns [1, 2, 3].\\n• locals() Return a dictionary representing the current local symbol table. Warning: The contents of this dictionary should not be modified; changes may not affect the values of local variables used by the interpreter. \\n• long(x[, radix ]) Convert a string or number to a long integer. If the argument is a string, it must contain a possibly signed number of arbitrary size, possibly embedded in whitespace; this behaves identical to string.atol(x). The radix argument is interpreted in the same way as for int(), and may only be given when x is a string. Otherwise, the argument may be a plain or long integer or a floating point number, and a long integer with the same value is returned. Conversion of floating point numbers to integers is defined by the C semantics; see the description of int(). \\n• map(function, list, ...) Apply function to every item of list and return a list of the results. If additional list arguments are passed, function must take that many arguments and is applied to the items of all lists in parallel; if a list is shorter than another it is assumed to be extended with None items. If function is None, the identity function is assumed; if there are multiple list arguments, map() returns a list consisting of tuples containing the corresponding items from all lists (i.e. a kind of transpose operation). The list arguments may be any kind of sequence; the result is always a list. \\n• max(s[, args...]) With a single argument s, return the largest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the largest of the arguments. \\n• min(s[, args...]) With a single argument s, return the smallest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the smallest of the arguments.\\n• oct(x) Convert an integer number (of any size) to an octal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, oct(-1) yields ’037777777777’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception.\\n• ord(c) Return the ASCII value of a string of one character or a Unicode character. E.g., ord(’a’) returns the integer 97, ord(u’ u2020’) returns 8224. This is the inverse of chr() for strings and of unichr() for Unicode characters. \\n• pow(x, y[, z]) Return x to the power y; if z is present, return x to the power y, modulo z (computed more efficiently than pow(x, y) % z). The arguments must have numeric types. With mixed operand types, the rules for binary arithmetic operators apply. The effective operand type is also the type of the result; if the result is not expressible in this type, the function raises an exception; e.g., pow(2, -1) or pow(2, 35000) is not allowed.\\n• range([start,] stop[, step ]) This is a versatile function to create lists containing arithmetic progressions. It is most often used in for loops. The arguments must be plain integers. If the step argument is omitted, it defaults to 1. If the start argument is omitted, it defaults to 0. The full form returns a list of plain integers [start, start + step, start + 2 * step, ...]. If step is positive, the last element is the largest start + i * step less than stop; if step is negative, the last element is the largest start + i * step greater than stop. step must not be zero (or else ValueError is raised). \\n• reduce(function, sequence[, initializer]) Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). If the optional initializer is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. \\n• reload(module) Re-parse and re-initialize an already imported module. The argument must be a module object, so it must have been successfully imported before. This is useful if you have edited the module source file using an external editor and want to try out the new version without leaving the Python interpreter. The return value is the module object (i.e. the same as the module argument).\\n• repr(object) Return a string containing a printable representation of an object. This is the same value yielded by conversions (reverse quotes). It is sometimes useful to be able to access this operation as an ordinary function. For many types, this function makes an attempt to return a string that would yield an object with the same value when passed to eval(). \\n• round(x[, n ]) Return the floating point value x rounded to n digits after the decimal point. If n is omitted, it defaults to zero. The result is a floating point number. Values are rounded to the closest multiple of 10 to the power minus n; if two multiples are equally close, rounding is done away from 0 (so e.g. round(0.5) is 1.0 and round(- 0.5) is -1.0).\\n• setattr(object, name, value) This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value. The string may name an existing attribute or a new attribute. The function assigns the value to the attribute, provided the object allows it. For example, setattr(x, ’foobar’, 123) is equivalent to x.foobar = 123.\\n• slice([start,] stop[, step ]) Return a slice object representing the set of indices specified by range(start, stop, step). The start and step arguments default to None. Slice objects have read-only data attributes start, stop and step which merely return the argument values (or their default). They have no other explicit functionality; however they are used by Numerical Python and other third party extensions. Slice objects are also generated when extended indexing syntax is used, e.g. for ‘a[start:stop:step]’ or ‘a[start:stop, i]’. \\n• str(object) Return a string containing a nicely printable representation of an object. For strings, this returns the string itself. The difference with repr(object) is that str(object) does not always attempt to return a string that is acceptable to eval(); its goal is to return a printable string. \\n• tuple(sequence) Return a tuple whose items are the same and in the same order as sequence’s items. If sequence is already a tuple, it is returned unchanged. For instance, tuple(’abc’) returns returns (’a’, ’b’, ’c’) and tuple([1, 2, 3]) returns (1, 2, 3). \\n• type(object) Return the type of an object. The return value is a type object. The standard module types defines names for all built-in types. For instance: \\n>>> import types \\n>>> if type(x) == types.StringType: print \"It’s a string\" unichr(i) \\nReturn the Unicode string of one character whose Unicode code is the integer i, e.g., unichr(97) returns the string u’a’. This is the inverse of ord() for Unicode strings. The argument must be in the range [0..65535], inclusive. ValueError is raised otherwise. .\\n• unicode(string[, encoding[, errors]]) Decodes string using the codec for encoding. Error handling is done according to errors. The default behavior is to decode UTF-8 in strict mode, meaning that encoding errors raise ValueError. See also the codecs module. . \\n• vars([object]) Without arguments, return a dictionary corresponding to the current local symbol table. With a module, class or class instance object as argument (or anything else that has a dict attribute), returns a dictionary corresponding to the object’s symbol table. The returned dictionary should not be modified: the effects on the corresponding symbol table are undefined.11 \\n• xrange([start,] stop[, step ]) This function is very similar to range(), but returns an “xrange object” instead of a list. This is an opaque sequence type which yields the same values as the corresponding list, without actually storing them all si- multaneously. The advantage of xrange() over range() is minimal (since xrange() still has to create the values when asked for them) except when a very large range is used on a memory-starved machine (e.g. MS-DOS) or when all of the range’s elements are never used (e.g. when the loop is usually terminated with break). \\n• zip(seq1, ...) This function returns a list of tuples, where each tuple contains the i-th element from each of the argument sequences. At least one sequence is required, otherwise a TypeError is raised. The returned list is truncated in length to the length of the shortest argument sequence. When there are multiple argument sequences which are all of the same length, zip() is similar to map() with an initial argument of None. With a single sequence argument, it returns a list of 1-tuples. \\n\\n'\n )\nop = 'C:\\\\PyHelp\\\\randinfo.txt'\nfile_exists = os.path.isfile(op)\nif not file_exists:\n x = open(op, 'w')\n x.write(rand_facts)\n", "step-4": "import os\nimport sqlite3\nimport datetime\ndirectory = 'C:\\\\PyHelp'\nif not os.path.exists(directory):\n os.makedirs(directory)\nrand_facts = (\n '• Exception is used as a base class for all exceptions. It\\'s strongly recommended (but not yet required) that user exceptions are derived from this class too.\\n• SystemExit(Exception) is raised by the sys.exit function. If it propagates to the top level without being caught by a try-except clause, the interpreter is terminated without a traceback message.\\n• StandardError(Exception) is used as a base class for all standard exceptions (except SystemExit, that is).\\n• KeyboardInterrupt(StandardError) is raised when the user presses Control-C (or any other interrupt key). Note that this may cause strange errors if you use \"catch all\" try-except statements.\\n• ImportError(StandardError) is raised when Python fails to import a module.\\n• EnvironmentError is used as a base class for exceptions that can be caused by the interpreter\\'s environment (that is, they\\'re usually not caused by bugs in the program).\\n• IOError(EnvironmentError) is used to flag I/O-related errors.\\n• OSError(EnvironmentError) is used to flag errors by the os module.\\n• WindowsError(OSError) is used to flag Windows-specific errors from the os module.\\n• NameError(StandardError) is raised when Python fails to find a global or local name.\\n• UnboundLocalError(NameError) is raised if your program attempts to access a local variable before it has been assigned a value. This exception is only used in 2.0 and later; earlier versions raise a plain NameError exception instead.\\n• AttributeError(StandardError) is raised when Python fails to find (or assign to) an instance attribute, a method, a module function, or any other qualified name.\\n• SyntaxError(StandardError) is raised when the compiler stumbles upon a syntax error.\\n• IndentationError(SyntaxError) is raised for syntax errors caused by bad indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\\n• TabError(IndentationError) is raised by the interpreter when the -tt option is used to check for inconsistent indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\\n• TypeError(StandardError) is raised when an operation cannot be applied to an object of the given type.\\n• AssertionError(StandardError) is raised when an assert statement fails (if the expression is false, that is).\\n• LookupError(StandardError) is used as a base class for exceptions raised when a sequence or dictionary type doesn\\'t contain a given index or key.\\n• IndexError(LookupError) is raised by sequence objects when the given index doesn\\'t exist.\\n• KeyError(LookupError) is raised by dictionary objects when the given key doesn\\'t exist.\\n• ArithmeticError(StandardError) is used as a base class for math-related exceptions.\\n• OverflowError(ArithmeticError) is raised when an operations overflows (for example, when an integer is too large to fit in the given type).\\n• ZeroDivisionError(ArithmeticError) is raised when you try to divide a number by zero.\\n• FloatingPointError(ArithmeticError) is raised when a floating point operation fails.\\n• ValueError(StandardError) is raised if an argument has the right type, but an invalid value.\\n• UnicodeError(ValueError) is raised for type problems related to the Unicode string type. This is only used in 2.0 and later.\\n• RuntimeError(StandardError) is used for various run-time problems, including attempts to get outside the box when running in restricted mode, unexpected hardware problems, etc.\\n• NotImplementedError(RuntimeError) can be used to flag functions that hasn\\'t been implemented yet, or methods that should be overridden.\\n• SystemError(StandardError) is raised if the interpreter messes up, and knows about it. The exception value contains a more detailed description (usually something cryptic, like\\n\"eval_code2: NULL globals\" or so). I cannot recall ever seeing this exception in over five years of full-time Python programming, but maybe that\\'s just me.\\n• MemoryError(StandardError) is raised when the interpreter runs out of memory. Note that this only happens when the underlying memory allocation routines complain; you can often send your poor computer into a mindless swapping frenzy before that happens.\\n• NoneType The type of None.\\n• TypeType The type of type objects (such as returned by type()). \\n• IntType The type of integers (e.g. 1).\\n• LongType The type of long integers (e.g. 1L).\\n• FloatType The type of floating point numbers (e.g. 1.0).\\n• ComplexType The type of complex numbers (e.g. 1.0j).\\n• StringType The type of character strings (e.g. ’Spam’). \\n• UnicodeType The type of Unicode character strings (e.g. u’Spam’). \\n• TupleType The type of tuples (e.g. (1, 2, 3, ’Spam’)). \\n• ListType The type of lists (e.g. [0, 1, 2, 3]). \\n• DictType The type of dictionaries (e.g. {’Bacon’: 1, ’Ham’: 0}). \\n• DictionaryType An alternate name for DictType. \\n• FunctionType The type of user-defined functions and lambdas. \\n• LambdaType An alternate name for FunctionType. \\n• CodeType The type for code objects such as returned by compile(). \\n• ClassType type of user-defined classes. \\n• InstanceType The type of instances of user-defined classes. \\n• MethodType The type of methods of user-defined class instances. \\n• UnboundMethod Type An alternate name for MethodType. \\n• BuiltinFunction Type The type of built-in functions like len() or sys.exit(). \\n• BuiltinMethod TypeAn alternate name for BuiltinFunction. \\n• ModuleType The type of modules. \\n• FileType The type of open file objects such as sys.stdout. \\n• XRangeType The type of range objects returned by xrange(). \\n• SliceType The type of objects returned by slice().\\n• EllipsisType The type of Ellipsis. \\n• TracebackType The type of traceback objects such as found in sys.exc traceback. \\n• FrameType The type of frame objects such as found in tb.tb frame if tb is a traceback object. \\n• BufferType The type of buffer objects created by the buffer() function.\\n• string.capitalize()Return a copy of the string with only its first character capitalized. \\n• string.center(width) Return centered in a string of length width. Padding is done using spaces. \\n• string.count(sub[, start[, end ]]) Return the number of occurrences of substring sub in string S[start:end]. Optional arguments start and end are interpreted as in slice notation. \\n• string.encode([encoding[,errors]]) Return an encoded version of the string. Default encoding is the current default string encoding. errors may be given to set a different error handling scheme. The default for errors is ’strict’, meaning that encoding errors raise a ValueError. Other possible values are ’ignore’ and ’replace’. . \\n• string.endswith(suffix[, start[, end ]]) Return true if the string ends with the specified suffix, otherwise return false. With optional start, test beginning at that position. With optional end, stop comparing at that position. \\n• string.expandtabs([tabsize ]) Return a copy of the string where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. \\n• string.find(sub[, start[, end ]]) Return the lowest index in the string where substring sub is found, such that sub is contained in the range [start, end). Optional arguments start and end are interpreted as in slice notation. Return -1 if sub is not found. \\n• string.index(sub[, start[, end ]]) Like find(), but raise ValueError when the substring is not found. \\n• string.isalnum() Return true if all characters in the string are alphanumeric and there is at least one character, false otherwise. \\n• string.isalpha() Return true if all characters in the string are alphabetic and there is at least one character, false otherwise. \\n• string.isdigit()Return true if there are only digit characters, false otherwise.\\n• string.islower() Return true if all cased characters in the string are lowercase and there is at least one cased character, false otherwise. \\n• string.isspace() Return true if there are only whitespace characters in the string and the string is not empty, false otherwise.\\n• string.istitle() Return true if the string is a titlecased string, i.e. uppercase characters may only follow uncased characters and lowercase characters only cased ones. Return false otherwise. \\n• string.isupper() Return true if all cased characters in the string are uppercase and there is at least one cased character, false otherwise. \\n• string.join(seq) Return a string which is the concatenation of the strings in the sequence seq. The separator between elements is the string providing this method. \\n• string.ljust(width) Return the string left justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). \\n• string.lower() Return a copy of the string converted to lowercase. \\n• string.lstrip() Return a copy of the string with leading whitespace removed.\\n• string.replace(old, new[, maxsplit]) Return a copy of the string with all occurrences of substring old replaced by new. If the optional argument maxsplit is given, only the first maxsplit occurrences are replaced. \\n• string.rfind(sub [,start [,end ]]) Return the highest index in the string where substring sub is found, such that sub is contained within s[start,end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure.\\n• string.rindex(sub[, start[, end ]]) Like rfind() but raises ValueError when the substring sub is not found. \\n• string.rjust(width) Return the string right justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). • string.rstrip() Return a copy of the string with trailing whitespace removed. \\n• string.split([sep [,maxsplit]]) Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or None, any whitespace string is a separator. \\n• string.splitlines([keepends]) Return a list of the lines in the string, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true.\\n• string.startswith(prefix[, start[, end ]]) Return true if string starts with the prefix, otherwise return false. With optional start, test string beginning at that position. With optional end, stop comparing string at that position. \\n• string.strip() Return a copy of the string with leading and trailing whitespace removed.\\n• string.swapcase() Return a copy of the string with uppercase characters converted to lowercase and vice versa. \\n• string.title() Return a titlecased version of, i.e. words start with uppercase characters, all remaining cased characters are lowercase. \\n• string.translate(table[, deletechars]) Return a copy of the string where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256. \\n• string.upper() Return a copy of the string converted to uppercase.\\n• file.close() Close the file. A closed file cannot be read or written anymore. Any operation which requires that the file be open will raise a ValueError after the file has been closed. Calling close() more than once is allowed. \\n• file.flush() Flush the internal buffer, like stdio’s fflush(). This may be a no-op on some file-like objects. \\n• file.isatty() Return true if the file is connected to a tty(-like) device, else false. Note: If a file-like object is not associated with a real file, this method should not be implemented. \\n• file.fileno() Return the integer “file descriptor” that is used by the underlying implementation to request I/O operations from the operating system. This can be useful for other, lower level interfaces that use file descriptors, e.g. module fcntl or os.read() and friends. Note: File-like objects which do not have a real file descriptor should not provide this method! \\n• file.read([size ]) Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. (For certain files, like ttys, it makes sense to continue reading after an EOF is hit.) Note that this method may call the underlying C function fread() more than once in an effort to acquire as close to size bytes as possible. \\n• file.readline([size ]) Read one entire line from the file. A trailing newline character is kept in the string7 (but may be absent when ends with an incomplete line). If the size argument is present and non-negative, it is a maximum byte count and an incomplete line may be returned. An empty string is returned when EOF is hit immediately. Note: Unlike stdio’s fgets(), the returned string contains null characters (’\\x00’) if they occurred in the input. \\n• file.readlines([sizehint]) Read until EOF using readline() and return a list containing the lines thus read. If the optional sizehint argument is present, instead of reading up to EOF, whole lines totalling approximately sizehint bytes (possibly after rounding up to an internal buffer size) are read. Objects implementing a file-like interface may choose to ignore sizehint if it cannot be implemented, or cannot be implemented efficiently. \\n• file.xreadlines() Equivalent to xreadlines.xreadlines(file). (See the xreadlines module for more information.) . \\n• file.seek(offset[, whence ]) Set the file’s current position, like stdio’s fseek(). The whence argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file’s end). There is no return value. Note that if the file is opened for appending (mode ’a’ or ’a+’), any seek() operations will be undone at the next write. If the file is only opened for writing in append mode (mode ’a’), this method is essentially a no-op, but it remains useful for files opened in append mode with reading enabled (mode ’a+’). \\n• file.tell() Return the file’s current position, like stdio’s ftell(). \\n• file.truncate([size ]) Truncate the file’s size. If the optional size argument present, the file is truncated to (at most) that size. The size defaults to the current position. Availability of this function depends on the operating system version (for example, not all UNIX versions support this operation). \\n• file.write(str) Write a string to the file. There is no return value. Note: Due to buffering, the string may not actually show up in the file until the flush() or close() method is called. \\n• file.writelines(list) Write a list of strings to the file. There is no return value. (The name is intended to match readlines(); writelines() does not add line separators.) File objects also offer a number of other interesting attributes. These are not required for file-like objects, but should be implemented if they make sense for the particular object. \\n• file.closed Boolean indicating the current state of the file object. This is a read-only attribute; the close() method changes the value. It may not be available on all file-like objects. \\n• file.mode The I/O mode for the file. If the file was created using the open() built-in function, this will be the value of the mode parameter. This is a read-only attribute and may not be present on all file-like objects. \\n• file.name If the file object was created using open(), the name of the file. Otherwise, some string that indicates the source of the file object, of the form ‘<...>’. This is a read-only attribute and may not be present on all file-like objects.\\n• abs(x) Return the absolute value of a number. The argument may be a plain or long integer or a floating point number. If the argument is a complex number, its magnitude is returned. \\n• apply(function, args[, keywords]) The function argument must be a callable object (a user-defined or built-in function or method, or a class object) and the args argument must be a sequence (if it is not a tuple, the sequence is first converted to a tuple). The function is called with args as the argument list; the number of arguments is the the length of the tuple. (This is different from just calling func(args), since in that case there is always exactly one argument.) If the optional keywords argument is present, it must be a dictionary whose keys are strings. It specifies keyword arguments to be added to the end of the the argument list. \\n• buffer(object[, offset[, size ]]) The object argument must be an object that supports the buffer call interface (such as strings, arrays, and buffers). A new buffer object will be created which references the object argument. The buffer object will be a slice from the beginning of object (or from the specified offset). The slice will extend to the end of object (or will have a length given by the size argument). \\n• callable(object) Return true if the object argument appears callable, false if not. If this returns true, it is still possible that a call fails, but if it is false, calling object will never succeed. Note that classes are callable (calling a class returns a new instance); class instances are callable if they have a call () method.\\n• chr(i) Return a string of one character whose ASCII code is the integer i, e.g., chr(97) returns the string ’a’. This is the inverse of ord(). The argument must be in the range [0..255], inclusive; ValueError will be raised if i is outside that range. \\n• cmp(x, y) Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y. \\n• coerce(x, y) Return a tuple consisting of the two numeric arguments converted to a common type, using the same rules as used by arithmetic operations. compile(string, filename, kind) Compile the string into a code object. Code objects can be executed by an exec statement or evaluated by a call to eval(). The filename argument should give the file from which the code was read; pass e.g. ’<string>’ if it wasn’t read from a file. The kind argument specifies what kind of code must be compiled; it can be ’exec’ if string consists of a sequence of statements, ’eval’ if it consists of a single expression, or ’single’ if it consists of a single interactive statement (in the latter case, expression statements that evaluate to something else than None will printed). \\n• complex(real[, imag ]) Create a complex number with the value real + imag*j or convert a string or number to a complex number. Each argument may be any numeric type (including complex). If imag is omitted, it defaults to zero and the function serves as a numeric conversion function like int(), long() and float(); in this case it also accepts a string argument which should be a valid complex number. \\n• delattr(object, name) This is a relative of setattr(). The arguments are an object and a string. The string must be the name of one of the object’s attributes. The function deletes the named attribute, provided the object allows it. For example, delattr(x, ’foobar’) is equivalent to del x.foobar. \\n• dir([object]) Without arguments, return the list of names in the current local symbol table. \\n• divmod(a, b) Take two numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division. With mixed operand types, the rules for binary arithmetic operators apply. For plain and long integers, the result is the same as (a / b, a % b). For floating point numbers the result is (q, a %bb), where q is usually math.floor(a / b) but may be 1 less than that. In any case q * b + a % b is very close to a, if a % b is non-zero it has the same sign as b, and 0 <= abs(a % b) < abs(b). \\n• eval(expression[, globals[, locals]]) The arguments are a string and two optional dictionaries. The expression argument is parsed and evaluated as a Python expression (technically speaking, a condition list) using the globals and locals dictionaries as global and local name space. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where eval is called. The return value is the result of the evaluated expression. Syntax errors are reported as exceptions. Example: \\n>>> x = 1 \\n>>> print eval(’x+1’) \\n2 This function can also be used to execute arbitrary code objects (e.g. created by compile()). In this case pass a code object instead of a string. The code object must have been compiled passing ’eval’ to the kind argument. Hints: dynamic execution of statements is supported by the exec statement. Execution of statements from a file is supported by the execfile() function. The globals() and locals() functions returns the current global and local dictionary, respectively, which may be useful to pass around for use by eval() or execfile(). \\n• execfile(file[, globals[, locals]]) This function is similar to the exec statement, but parses a file instead of a string. It is different from the import statement in that it does not use the module administration — it reads the file unconditionally and does not create a new module.8 The arguments are a file name and two optional dictionaries. The file is parsed and evaluated as a sequence of Python statements (similarly to a module) using the globals and locals dictionaries as global and local names- pace. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where execfile() is called. The return value is None. \\n• filter(function, list) Construct a list from those elements of list for which function returns true. If list is a string or a tuple, the result also has that type; otherwise it is always a list. If function is None, the identity function is assumed, i.e. all elements of list that are false (zero or empty) are removed. \\n• float(x) Convert a string or a number to floating point. If the argument is a string, it must contain a possibly signed dec-imal or floating point number, possibly embedded in whitespace; this behaves identical to string.atof(x). Otherwise, the argument may be a plain or long integer or a floating point number, and a floating point number with the same value (within Python’s floating point precision) is returned.\\n• getattr(object, name[, default]) Return the value of the named attributed of object. name must be a string. If the string is the name of one of the object’s attributes, the result is the value of that attribute. For example, getattr(x, ’foobar’) is equivalent to x.foobar. If the named attribute does not exist, default is returned if provided, otherwise AttributeError is raised. \\n• globals() Return a dictionary representing the current global symbol table. This is always the dictionary of the current module (inside a function or method, this is the module where it is defined, not the module from which it is called). \\n• hasattr(object, name) The arguments are an object and a string. The result is 1 if the string is the name of one of the object’s attributes, 0 if not. (This is implemented by calling getattr(object, name) and seeing whether it raises an exception or not.) \\n• hash(object) Return the hash value of the object (if it has one). Hash values are integers. They are used to quickly compare dictionary keys during a dictionary lookup. Numeric values that compare equal have the same hash value (even if they are of different types, e.g. 1 and 1.0). \\n• hex(x) Convert an integer number (of any size) to a hexadecimal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, hex(-1) yields ’0xffffffff’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception. \\n• id(object) Return the ‘identity’ of an object. This is an integer (or long integer) which is guaranteed to be unique and constant for this object during its lifetime. Two objects whose lifetimes are disjunct may have the same id() value. (Implementation note: this is the address of the object.) \\n• input([prompt]) Equivalent to eval(raw input(prompt)). Warning: This function is not safe from user errors! It expects a valid Python expression as input; if the input is not syntactically valid, a SyntaxError will be raised. Other exceptions may be raised if there is an error during evaluation. (On the other hand, sometimes this is exactly what you need when writing a quick script for expert use.) If the readline module was loaded, then input() will use it to provide elaborate line editing and history features. Consider using the raw input() function for general input from users.\\n• int(x[, radix ]) Convert a string or number to a plain integer. If the argument is a string, it must contain a possibly signed decimal number representable as a Python integer, possibly embedded in whitespace; this behaves identical to \\n• string.atoi(x[, radix ]). The radix parameter gives the base for the conversion and may be any integer in the range [2, 36], or zero. If radix is zero, the proper radix is guessed based on the contents of string; the interpretation is the same as for integer literals. If radix is specified and x is not a string, TypeError is raised. Otherwise, the argument may be a plain or long integer or a floating point number. Conversion of floating point numbers to integers is defined by the C semantics; normally the conversion truncates towards zero.9 \\n• intern(string) Enter string in the table of “interned” strings and return the interned string – which is string itself or a copy. Interning strings is useful to gain a little performance on dictionary lookup – if the keys in a dictionary are interned, and the lookup key is interned, the key comparisons (after hashing) can be done by a pointer compare instead of a string compare. Normally, the names used in Python programs are automatically interned, and the dictionaries used to hold module, class or instance attributes have interned keys. Interned strings are immortal (i.e. never get garbage collected). \\n• isinstance(object, class) Return true if the object argument is an instance of the class argument, or of a (direct or indirect) subclass thereof. Also return true if class is a type object and object is an object of that type. If object is not a class instance or a object of the given type, the function always returns false. If class is neither a class object nor a type object, a TypeError exception is raised. \\n• issubclass(class1, class2) Return true if class1 is a subclass (direct or indirect) of class2. A class is considered a subclass of itself. If either argument is not a class object, a TypeError exception is raised. \\n• len (s) Return the length (the number of items) of an object. The argument may be a sequence (string, tuple or list) or a mapping (dictionary). \\n• list(sequence) Return a list whose items are the same and in the same order as sequence’s items. If sequence is already a list, a copy is made and returned, similar to sequence[:]. For instance, list(’abc’) returns returns [’a’, ’b’, ’c’] and list( (1, 2, 3) ) returns [1, 2, 3].\\n• locals() Return a dictionary representing the current local symbol table. Warning: The contents of this dictionary should not be modified; changes may not affect the values of local variables used by the interpreter. \\n• long(x[, radix ]) Convert a string or number to a long integer. If the argument is a string, it must contain a possibly signed number of arbitrary size, possibly embedded in whitespace; this behaves identical to string.atol(x). The radix argument is interpreted in the same way as for int(), and may only be given when x is a string. Otherwise, the argument may be a plain or long integer or a floating point number, and a long integer with the same value is returned. Conversion of floating point numbers to integers is defined by the C semantics; see the description of int(). \\n• map(function, list, ...) Apply function to every item of list and return a list of the results. If additional list arguments are passed, function must take that many arguments and is applied to the items of all lists in parallel; if a list is shorter than another it is assumed to be extended with None items. If function is None, the identity function is assumed; if there are multiple list arguments, map() returns a list consisting of tuples containing the corresponding items from all lists (i.e. a kind of transpose operation). The list arguments may be any kind of sequence; the result is always a list. \\n• max(s[, args...]) With a single argument s, return the largest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the largest of the arguments. \\n• min(s[, args...]) With a single argument s, return the smallest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the smallest of the arguments.\\n• oct(x) Convert an integer number (of any size) to an octal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, oct(-1) yields ’037777777777’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception.\\n• ord(c) Return the ASCII value of a string of one character or a Unicode character. E.g., ord(’a’) returns the integer 97, ord(u’ u2020’) returns 8224. This is the inverse of chr() for strings and of unichr() for Unicode characters. \\n• pow(x, y[, z]) Return x to the power y; if z is present, return x to the power y, modulo z (computed more efficiently than pow(x, y) % z). The arguments must have numeric types. With mixed operand types, the rules for binary arithmetic operators apply. The effective operand type is also the type of the result; if the result is not expressible in this type, the function raises an exception; e.g., pow(2, -1) or pow(2, 35000) is not allowed.\\n• range([start,] stop[, step ]) This is a versatile function to create lists containing arithmetic progressions. It is most often used in for loops. The arguments must be plain integers. If the step argument is omitted, it defaults to 1. If the start argument is omitted, it defaults to 0. The full form returns a list of plain integers [start, start + step, start + 2 * step, ...]. If step is positive, the last element is the largest start + i * step less than stop; if step is negative, the last element is the largest start + i * step greater than stop. step must not be zero (or else ValueError is raised). \\n• reduce(function, sequence[, initializer]) Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). If the optional initializer is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. \\n• reload(module) Re-parse and re-initialize an already imported module. The argument must be a module object, so it must have been successfully imported before. This is useful if you have edited the module source file using an external editor and want to try out the new version without leaving the Python interpreter. The return value is the module object (i.e. the same as the module argument).\\n• repr(object) Return a string containing a printable representation of an object. This is the same value yielded by conversions (reverse quotes). It is sometimes useful to be able to access this operation as an ordinary function. For many types, this function makes an attempt to return a string that would yield an object with the same value when passed to eval(). \\n• round(x[, n ]) Return the floating point value x rounded to n digits after the decimal point. If n is omitted, it defaults to zero. The result is a floating point number. Values are rounded to the closest multiple of 10 to the power minus n; if two multiples are equally close, rounding is done away from 0 (so e.g. round(0.5) is 1.0 and round(- 0.5) is -1.0).\\n• setattr(object, name, value) This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value. The string may name an existing attribute or a new attribute. The function assigns the value to the attribute, provided the object allows it. For example, setattr(x, ’foobar’, 123) is equivalent to x.foobar = 123.\\n• slice([start,] stop[, step ]) Return a slice object representing the set of indices specified by range(start, stop, step). The start and step arguments default to None. Slice objects have read-only data attributes start, stop and step which merely return the argument values (or their default). They have no other explicit functionality; however they are used by Numerical Python and other third party extensions. Slice objects are also generated when extended indexing syntax is used, e.g. for ‘a[start:stop:step]’ or ‘a[start:stop, i]’. \\n• str(object) Return a string containing a nicely printable representation of an object. For strings, this returns the string itself. The difference with repr(object) is that str(object) does not always attempt to return a string that is acceptable to eval(); its goal is to return a printable string. \\n• tuple(sequence) Return a tuple whose items are the same and in the same order as sequence’s items. If sequence is already a tuple, it is returned unchanged. For instance, tuple(’abc’) returns returns (’a’, ’b’, ’c’) and tuple([1, 2, 3]) returns (1, 2, 3). \\n• type(object) Return the type of an object. The return value is a type object. The standard module types defines names for all built-in types. For instance: \\n>>> import types \\n>>> if type(x) == types.StringType: print \"It’s a string\" unichr(i) \\nReturn the Unicode string of one character whose Unicode code is the integer i, e.g., unichr(97) returns the string u’a’. This is the inverse of ord() for Unicode strings. The argument must be in the range [0..65535], inclusive. ValueError is raised otherwise. .\\n• unicode(string[, encoding[, errors]]) Decodes string using the codec for encoding. Error handling is done according to errors. The default behavior is to decode UTF-8 in strict mode, meaning that encoding errors raise ValueError. See also the codecs module. . \\n• vars([object]) Without arguments, return a dictionary corresponding to the current local symbol table. With a module, class or class instance object as argument (or anything else that has a dict attribute), returns a dictionary corresponding to the object’s symbol table. The returned dictionary should not be modified: the effects on the corresponding symbol table are undefined.11 \\n• xrange([start,] stop[, step ]) This function is very similar to range(), but returns an “xrange object” instead of a list. This is an opaque sequence type which yields the same values as the corresponding list, without actually storing them all si- multaneously. The advantage of xrange() over range() is minimal (since xrange() still has to create the values when asked for them) except when a very large range is used on a memory-starved machine (e.g. MS-DOS) or when all of the range’s elements are never used (e.g. when the loop is usually terminated with break). \\n• zip(seq1, ...) This function returns a list of tuples, where each tuple contains the i-th element from each of the argument sequences. At least one sequence is required, otherwise a TypeError is raised. The returned list is truncated in length to the length of the shortest argument sequence. When there are multiple argument sequences which are all of the same length, zip() is similar to map() with an initial argument of None. With a single sequence argument, it returns a list of 1-tuples. \\n\\n'\n )\nop = 'C:\\\\PyHelp\\\\randinfo.txt'\nfile_exists = os.path.isfile(op)\nif not file_exists:\n x = open(op, 'w')\n x.write(rand_facts)\n", "step-5": "import os\r\nimport sqlite3\r\nimport datetime\r\ndirectory = 'C:\\PyHelp'\r\n\r\nif not os.path.exists(directory):\r\n os.makedirs(directory)\r\n\r\nrand_facts = '''• Exception is used as a base class for all exceptions. It's strongly recommended (but not yet required) that user exceptions are derived from this class too.\r\n• SystemExit(Exception) is raised by the sys.exit function. If it propagates to the top level without being caught by a try-except clause, the interpreter is terminated without a traceback message.\r\n• StandardError(Exception) is used as a base class for all standard exceptions (except SystemExit, that is).\r\n• KeyboardInterrupt(StandardError) is raised when the user presses Control-C (or any other interrupt key). Note that this may cause strange errors if you use \"catch all\" try-except statements.\r\n• ImportError(StandardError) is raised when Python fails to import a module.\r\n• EnvironmentError is used as a base class for exceptions that can be caused by the interpreter's environment (that is, they're usually not caused by bugs in the program).\r\n• IOError(EnvironmentError) is used to flag I/O-related errors.\r\n• OSError(EnvironmentError) is used to flag errors by the os module.\r\n• WindowsError(OSError) is used to flag Windows-specific errors from the os module.\r\n• NameError(StandardError) is raised when Python fails to find a global or local name.\r\n• UnboundLocalError(NameError) is raised if your program attempts to access a local variable before it has been assigned a value. This exception is only used in 2.0 and later; earlier versions raise a plain NameError exception instead.\r\n• AttributeError(StandardError) is raised when Python fails to find (or assign to) an instance attribute, a method, a module function, or any other qualified name.\r\n• SyntaxError(StandardError) is raised when the compiler stumbles upon a syntax error.\r\n• IndentationError(SyntaxError) is raised for syntax errors caused by bad indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\r\n• TabError(IndentationError) is raised by the interpreter when the -tt option is used to check for inconsistent indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\r\n• TypeError(StandardError) is raised when an operation cannot be applied to an object of the given type.\r\n• AssertionError(StandardError) is raised when an assert statement fails (if the expression is false, that is).\r\n• LookupError(StandardError) is used as a base class for exceptions raised when a sequence or dictionary type doesn't contain a given index or key.\r\n• IndexError(LookupError) is raised by sequence objects when the given index doesn't exist.\r\n• KeyError(LookupError) is raised by dictionary objects when the given key doesn't exist.\r\n• ArithmeticError(StandardError) is used as a base class for math-related exceptions.\r\n• OverflowError(ArithmeticError) is raised when an operations overflows (for example, when an integer is too large to fit in the given type).\r\n• ZeroDivisionError(ArithmeticError) is raised when you try to divide a number by zero.\r\n• FloatingPointError(ArithmeticError) is raised when a floating point operation fails.\r\n• ValueError(StandardError) is raised if an argument has the right type, but an invalid value.\r\n• UnicodeError(ValueError) is raised for type problems related to the Unicode string type. This is only used in 2.0 and later.\r\n• RuntimeError(StandardError) is used for various run-time problems, including attempts to get outside the box when running in restricted mode, unexpected hardware problems, etc.\r\n• NotImplementedError(RuntimeError) can be used to flag functions that hasn't been implemented yet, or methods that should be overridden.\r\n• SystemError(StandardError) is raised if the interpreter messes up, and knows about it. The exception value contains a more detailed description (usually something cryptic, like\r\n\"eval_code2: NULL globals\" or so). I cannot recall ever seeing this exception in over five years of full-time Python programming, but maybe that's just me.\r\n• MemoryError(StandardError) is raised when the interpreter runs out of memory. Note that this only happens when the underlying memory allocation routines complain; you can often send your poor computer into a mindless swapping frenzy before that happens.\r\n• NoneType The type of None.\r\n• TypeType The type of type objects (such as returned by type()). \r\n• IntType The type of integers (e.g. 1).\r\n• LongType The type of long integers (e.g. 1L).\r\n• FloatType The type of floating point numbers (e.g. 1.0).\r\n• ComplexType The type of complex numbers (e.g. 1.0j).\r\n• StringType The type of character strings (e.g. ’Spam’). \r\n• UnicodeType The type of Unicode character strings (e.g. u’Spam’). \r\n• TupleType The type of tuples (e.g. (1, 2, 3, ’Spam’)). \r\n• ListType The type of lists (e.g. [0, 1, 2, 3]). \r\n• DictType The type of dictionaries (e.g. {’Bacon’: 1, ’Ham’: 0}). \r\n• DictionaryType An alternate name for DictType. \r\n• FunctionType The type of user-defined functions and lambdas. \r\n• LambdaType An alternate name for FunctionType. \r\n• CodeType The type for code objects such as returned by compile(). \r\n• ClassType type of user-defined classes. \r\n• InstanceType The type of instances of user-defined classes. \r\n• MethodType The type of methods of user-defined class instances. \r\n• UnboundMethod Type An alternate name for MethodType. \r\n• BuiltinFunction Type The type of built-in functions like len() or sys.exit(). \r\n• BuiltinMethod TypeAn alternate name for BuiltinFunction. \r\n• ModuleType The type of modules. \r\n• FileType The type of open file objects such as sys.stdout. \r\n• XRangeType The type of range objects returned by xrange(). \r\n• SliceType The type of objects returned by slice().\r\n• EllipsisType The type of Ellipsis. \r\n• TracebackType The type of traceback objects such as found in sys.exc traceback. \r\n• FrameType The type of frame objects such as found in tb.tb frame if tb is a traceback object. \r\n• BufferType The type of buffer objects created by the buffer() function.\r\n• string.capitalize()Return a copy of the string with only its first character capitalized. \r\n• string.center(width) Return centered in a string of length width. Padding is done using spaces. \r\n• string.count(sub[, start[, end ]]) Return the number of occurrences of substring sub in string S[start:end]. Optional arguments start and end are interpreted as in slice notation. \r\n• string.encode([encoding[,errors]]) Return an encoded version of the string. Default encoding is the current default string encoding. errors may be given to set a different error handling scheme. The default for errors is ’strict’, meaning that encoding errors raise a ValueError. Other possible values are ’ignore’ and ’replace’. . \r\n• string.endswith(suffix[, start[, end ]]) Return true if the string ends with the specified suffix, otherwise return false. With optional start, test beginning at that position. With optional end, stop comparing at that position. \r\n• string.expandtabs([tabsize ]) Return a copy of the string where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. \r\n• string.find(sub[, start[, end ]]) Return the lowest index in the string where substring sub is found, such that sub is contained in the range [start, end). Optional arguments start and end are interpreted as in slice notation. Return -1 if sub is not found. \r\n• string.index(sub[, start[, end ]]) Like find(), but raise ValueError when the substring is not found. \r\n• string.isalnum() Return true if all characters in the string are alphanumeric and there is at least one character, false otherwise. \r\n• string.isalpha() Return true if all characters in the string are alphabetic and there is at least one character, false otherwise. \r\n• string.isdigit()Return true if there are only digit characters, false otherwise.\r\n• string.islower() Return true if all cased characters in the string are lowercase and there is at least one cased character, false otherwise. \r\n• string.isspace() Return true if there are only whitespace characters in the string and the string is not empty, false otherwise.\r\n• string.istitle() Return true if the string is a titlecased string, i.e. uppercase characters may only follow uncased characters and lowercase characters only cased ones. Return false otherwise. \r\n• string.isupper() Return true if all cased characters in the string are uppercase and there is at least one cased character, false otherwise. \r\n• string.join(seq) Return a string which is the concatenation of the strings in the sequence seq. The separator between elements is the string providing this method. \r\n• string.ljust(width) Return the string left justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). \r\n• string.lower() Return a copy of the string converted to lowercase. \r\n• string.lstrip() Return a copy of the string with leading whitespace removed.\r\n• string.replace(old, new[, maxsplit]) Return a copy of the string with all occurrences of substring old replaced by new. If the optional argument maxsplit is given, only the first maxsplit occurrences are replaced. \r\n• string.rfind(sub [,start [,end ]]) Return the highest index in the string where substring sub is found, such that sub is contained within s[start,end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure.\r\n• string.rindex(sub[, start[, end ]]) Like rfind() but raises ValueError when the substring sub is not found. \r\n• string.rjust(width) Return the string right justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). • string.rstrip() Return a copy of the string with trailing whitespace removed. \r\n• string.split([sep [,maxsplit]]) Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or None, any whitespace string is a separator. \r\n• string.splitlines([keepends]) Return a list of the lines in the string, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true.\r\n• string.startswith(prefix[, start[, end ]]) Return true if string starts with the prefix, otherwise return false. With optional start, test string beginning at that position. With optional end, stop comparing string at that position. \r\n• string.strip() Return a copy of the string with leading and trailing whitespace removed.\r\n• string.swapcase() Return a copy of the string with uppercase characters converted to lowercase and vice versa. \r\n• string.title() Return a titlecased version of, i.e. words start with uppercase characters, all remaining cased characters are lowercase. \r\n• string.translate(table[, deletechars]) Return a copy of the string where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256. \r\n• string.upper() Return a copy of the string converted to uppercase.\r\n• file.close() Close the file. A closed file cannot be read or written anymore. Any operation which requires that the file be open will raise a ValueError after the file has been closed. Calling close() more than once is allowed. \r\n• file.flush() Flush the internal buffer, like stdio’s fflush(). This may be a no-op on some file-like objects. \r\n• file.isatty() Return true if the file is connected to a tty(-like) device, else false. Note: If a file-like object is not associated with a real file, this method should not be implemented. \r\n• file.fileno() Return the integer “file descriptor” that is used by the underlying implementation to request I/O operations from the operating system. This can be useful for other, lower level interfaces that use file descriptors, e.g. module fcntl or os.read() and friends. Note: File-like objects which do not have a real file descriptor should not provide this method! \r\n• file.read([size ]) Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. (For certain files, like ttys, it makes sense to continue reading after an EOF is hit.) Note that this method may call the underlying C function fread() more than once in an effort to acquire as close to size bytes as possible. \r\n• file.readline([size ]) Read one entire line from the file. A trailing newline character is kept in the string7 (but may be absent when ends with an incomplete line). If the size argument is present and non-negative, it is a maximum byte count and an incomplete line may be returned. An empty string is returned when EOF is hit immediately. Note: Unlike stdio’s fgets(), the returned string contains null characters (’\\0’) if they occurred in the input. \r\n• file.readlines([sizehint]) Read until EOF using readline() and return a list containing the lines thus read. If the optional sizehint argument is present, instead of reading up to EOF, whole lines totalling approximately sizehint bytes (possibly after rounding up to an internal buffer size) are read. Objects implementing a file-like interface may choose to ignore sizehint if it cannot be implemented, or cannot be implemented efficiently. \r\n• file.xreadlines() Equivalent to xreadlines.xreadlines(file). (See the xreadlines module for more information.) . \r\n• file.seek(offset[, whence ]) Set the file’s current position, like stdio’s fseek(). The whence argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file’s end). There is no return value. Note that if the file is opened for appending (mode ’a’ or ’a+’), any seek() operations will be undone at the next write. If the file is only opened for writing in append mode (mode ’a’), this method is essentially a no-op, but it remains useful for files opened in append mode with reading enabled (mode ’a+’). \r\n• file.tell() Return the file’s current position, like stdio’s ftell(). \r\n• file.truncate([size ]) Truncate the file’s size. If the optional size argument present, the file is truncated to (at most) that size. The size defaults to the current position. Availability of this function depends on the operating system version (for example, not all UNIX versions support this operation). \r\n• file.write(str) Write a string to the file. There is no return value. Note: Due to buffering, the string may not actually show up in the file until the flush() or close() method is called. \r\n• file.writelines(list) Write a list of strings to the file. There is no return value. (The name is intended to match readlines(); writelines() does not add line separators.) File objects also offer a number of other interesting attributes. These are not required for file-like objects, but should be implemented if they make sense for the particular object. \r\n• file.closed Boolean indicating the current state of the file object. This is a read-only attribute; the close() method changes the value. It may not be available on all file-like objects. \r\n• file.mode The I/O mode for the file. If the file was created using the open() built-in function, this will be the value of the mode parameter. This is a read-only attribute and may not be present on all file-like objects. \r\n• file.name If the file object was created using open(), the name of the file. Otherwise, some string that indicates the source of the file object, of the form ‘<...>’. This is a read-only attribute and may not be present on all file-like objects.\r\n• abs(x) Return the absolute value of a number. The argument may be a plain or long integer or a floating point number. If the argument is a complex number, its magnitude is returned. \r\n• apply(function, args[, keywords]) The function argument must be a callable object (a user-defined or built-in function or method, or a class object) and the args argument must be a sequence (if it is not a tuple, the sequence is first converted to a tuple). The function is called with args as the argument list; the number of arguments is the the length of the tuple. (This is different from just calling func(args), since in that case there is always exactly one argument.) If the optional keywords argument is present, it must be a dictionary whose keys are strings. It specifies keyword arguments to be added to the end of the the argument list. \r\n• buffer(object[, offset[, size ]]) The object argument must be an object that supports the buffer call interface (such as strings, arrays, and buffers). A new buffer object will be created which references the object argument. The buffer object will be a slice from the beginning of object (or from the specified offset). The slice will extend to the end of object (or will have a length given by the size argument). \r\n• callable(object) Return true if the object argument appears callable, false if not. If this returns true, it is still possible that a call fails, but if it is false, calling object will never succeed. Note that classes are callable (calling a class returns a new instance); class instances are callable if they have a call () method.\r\n• chr(i) Return a string of one character whose ASCII code is the integer i, e.g., chr(97) returns the string ’a’. This is the inverse of ord(). The argument must be in the range [0..255], inclusive; ValueError will be raised if i is outside that range. \r\n• cmp(x, y) Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y. \r\n• coerce(x, y) Return a tuple consisting of the two numeric arguments converted to a common type, using the same rules as used by arithmetic operations. compile(string, filename, kind) Compile the string into a code object. Code objects can be executed by an exec statement or evaluated by a call to eval(). The filename argument should give the file from which the code was read; pass e.g. ’<string>’ if it wasn’t read from a file. The kind argument specifies what kind of code must be compiled; it can be ’exec’ if string consists of a sequence of statements, ’eval’ if it consists of a single expression, or ’single’ if it consists of a single interactive statement (in the latter case, expression statements that evaluate to something else than None will printed). \r\n• complex(real[, imag ]) Create a complex number with the value real + imag*j or convert a string or number to a complex number. Each argument may be any numeric type (including complex). If imag is omitted, it defaults to zero and the function serves as a numeric conversion function like int(), long() and float(); in this case it also accepts a string argument which should be a valid complex number. \r\n• delattr(object, name) This is a relative of setattr(). The arguments are an object and a string. The string must be the name of one of the object’s attributes. The function deletes the named attribute, provided the object allows it. For example, delattr(x, ’foobar’) is equivalent to del x.foobar. \r\n• dir([object]) Without arguments, return the list of names in the current local symbol table. \r\n• divmod(a, b) Take two numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division. With mixed operand types, the rules for binary arithmetic operators apply. For plain and long integers, the result is the same as (a / b, a % b). For floating point numbers the result is (q, a %bb), where q is usually math.floor(a / b) but may be 1 less than that. In any case q * b + a % b is very close to a, if a % b is non-zero it has the same sign as b, and 0 <= abs(a % b) < abs(b). \r\n• eval(expression[, globals[, locals]]) The arguments are a string and two optional dictionaries. The expression argument is parsed and evaluated as a Python expression (technically speaking, a condition list) using the globals and locals dictionaries as global and local name space. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where eval is called. The return value is the result of the evaluated expression. Syntax errors are reported as exceptions. Example: \r\n>>> x = 1 \r\n>>> print eval(’x+1’) \r\n2 This function can also be used to execute arbitrary code objects (e.g. created by compile()). In this case pass a code object instead of a string. The code object must have been compiled passing ’eval’ to the kind argument. Hints: dynamic execution of statements is supported by the exec statement. Execution of statements from a file is supported by the execfile() function. The globals() and locals() functions returns the current global and local dictionary, respectively, which may be useful to pass around for use by eval() or execfile(). \r\n• execfile(file[, globals[, locals]]) This function is similar to the exec statement, but parses a file instead of a string. It is different from the import statement in that it does not use the module administration — it reads the file unconditionally and does not create a new module.8 The arguments are a file name and two optional dictionaries. The file is parsed and evaluated as a sequence of Python statements (similarly to a module) using the globals and locals dictionaries as global and local names- pace. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where execfile() is called. The return value is None. \r\n• filter(function, list) Construct a list from those elements of list for which function returns true. If list is a string or a tuple, the result also has that type; otherwise it is always a list. If function is None, the identity function is assumed, i.e. all elements of list that are false (zero or empty) are removed. \r\n• float(x) Convert a string or a number to floating point. If the argument is a string, it must contain a possibly signed dec-imal or floating point number, possibly embedded in whitespace; this behaves identical to string.atof(x). Otherwise, the argument may be a plain or long integer or a floating point number, and a floating point number with the same value (within Python’s floating point precision) is returned.\r\n• getattr(object, name[, default]) Return the value of the named attributed of object. name must be a string. If the string is the name of one of the object’s attributes, the result is the value of that attribute. For example, getattr(x, ’foobar’) is equivalent to x.foobar. If the named attribute does not exist, default is returned if provided, otherwise AttributeError is raised. \r\n• globals() Return a dictionary representing the current global symbol table. This is always the dictionary of the current module (inside a function or method, this is the module where it is defined, not the module from which it is called). \r\n• hasattr(object, name) The arguments are an object and a string. The result is 1 if the string is the name of one of the object’s attributes, 0 if not. (This is implemented by calling getattr(object, name) and seeing whether it raises an exception or not.) \r\n• hash(object) Return the hash value of the object (if it has one). Hash values are integers. They are used to quickly compare dictionary keys during a dictionary lookup. Numeric values that compare equal have the same hash value (even if they are of different types, e.g. 1 and 1.0). \r\n• hex(x) Convert an integer number (of any size) to a hexadecimal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, hex(-1) yields ’0xffffffff’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception. \r\n• id(object) Return the ‘identity’ of an object. This is an integer (or long integer) which is guaranteed to be unique and constant for this object during its lifetime. Two objects whose lifetimes are disjunct may have the same id() value. (Implementation note: this is the address of the object.) \r\n• input([prompt]) Equivalent to eval(raw input(prompt)). Warning: This function is not safe from user errors! It expects a valid Python expression as input; if the input is not syntactically valid, a SyntaxError will be raised. Other exceptions may be raised if there is an error during evaluation. (On the other hand, sometimes this is exactly what you need when writing a quick script for expert use.) If the readline module was loaded, then input() will use it to provide elaborate line editing and history features. Consider using the raw input() function for general input from users.\r\n• int(x[, radix ]) Convert a string or number to a plain integer. If the argument is a string, it must contain a possibly signed decimal number representable as a Python integer, possibly embedded in whitespace; this behaves identical to \r\n• string.atoi(x[, radix ]). The radix parameter gives the base for the conversion and may be any integer in the range [2, 36], or zero. If radix is zero, the proper radix is guessed based on the contents of string; the interpretation is the same as for integer literals. If radix is specified and x is not a string, TypeError is raised. Otherwise, the argument may be a plain or long integer or a floating point number. Conversion of floating point numbers to integers is defined by the C semantics; normally the conversion truncates towards zero.9 \r\n• intern(string) Enter string in the table of “interned” strings and return the interned string – which is string itself or a copy. Interning strings is useful to gain a little performance on dictionary lookup – if the keys in a dictionary are interned, and the lookup key is interned, the key comparisons (after hashing) can be done by a pointer compare instead of a string compare. Normally, the names used in Python programs are automatically interned, and the dictionaries used to hold module, class or instance attributes have interned keys. Interned strings are immortal (i.e. never get garbage collected). \r\n• isinstance(object, class) Return true if the object argument is an instance of the class argument, or of a (direct or indirect) subclass thereof. Also return true if class is a type object and object is an object of that type. If object is not a class instance or a object of the given type, the function always returns false. If class is neither a class object nor a type object, a TypeError exception is raised. \r\n• issubclass(class1, class2) Return true if class1 is a subclass (direct or indirect) of class2. A class is considered a subclass of itself. If either argument is not a class object, a TypeError exception is raised. \r\n• len (s) Return the length (the number of items) of an object. The argument may be a sequence (string, tuple or list) or a mapping (dictionary). \r\n• list(sequence) Return a list whose items are the same and in the same order as sequence’s items. If sequence is already a list, a copy is made and returned, similar to sequence[:]. For instance, list(’abc’) returns returns [’a’, ’b’, ’c’] and list( (1, 2, 3) ) returns [1, 2, 3].\r\n• locals() Return a dictionary representing the current local symbol table. Warning: The contents of this dictionary should not be modified; changes may not affect the values of local variables used by the interpreter. \r\n• long(x[, radix ]) Convert a string or number to a long integer. If the argument is a string, it must contain a possibly signed number of arbitrary size, possibly embedded in whitespace; this behaves identical to string.atol(x). The radix argument is interpreted in the same way as for int(), and may only be given when x is a string. Otherwise, the argument may be a plain or long integer or a floating point number, and a long integer with the same value is returned. Conversion of floating point numbers to integers is defined by the C semantics; see the description of int(). \r\n• map(function, list, ...) Apply function to every item of list and return a list of the results. If additional list arguments are passed, function must take that many arguments and is applied to the items of all lists in parallel; if a list is shorter than another it is assumed to be extended with None items. If function is None, the identity function is assumed; if there are multiple list arguments, map() returns a list consisting of tuples containing the corresponding items from all lists (i.e. a kind of transpose operation). The list arguments may be any kind of sequence; the result is always a list. \r\n• max(s[, args...]) With a single argument s, return the largest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the largest of the arguments. \r\n• min(s[, args...]) With a single argument s, return the smallest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the smallest of the arguments.\r\n• oct(x) Convert an integer number (of any size) to an octal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, oct(-1) yields ’037777777777’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception.\r\n• ord(c) Return the ASCII value of a string of one character or a Unicode character. E.g., ord(’a’) returns the integer 97, ord(u’ u2020’) returns 8224. This is the inverse of chr() for strings and of unichr() for Unicode characters. \r\n• pow(x, y[, z]) Return x to the power y; if z is present, return x to the power y, modulo z (computed more efficiently than pow(x, y) % z). The arguments must have numeric types. With mixed operand types, the rules for binary arithmetic operators apply. The effective operand type is also the type of the result; if the result is not expressible in this type, the function raises an exception; e.g., pow(2, -1) or pow(2, 35000) is not allowed.\r\n• range([start,] stop[, step ]) This is a versatile function to create lists containing arithmetic progressions. It is most often used in for loops. The arguments must be plain integers. If the step argument is omitted, it defaults to 1. If the start argument is omitted, it defaults to 0. The full form returns a list of plain integers [start, start + step, start + 2 * step, ...]. If step is positive, the last element is the largest start + i * step less than stop; if step is negative, the last element is the largest start + i * step greater than stop. step must not be zero (or else ValueError is raised). \r\n• reduce(function, sequence[, initializer]) Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). If the optional initializer is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. \r\n• reload(module) Re-parse and re-initialize an already imported module. The argument must be a module object, so it must have been successfully imported before. This is useful if you have edited the module source file using an external editor and want to try out the new version without leaving the Python interpreter. The return value is the module object (i.e. the same as the module argument).\r\n• repr(object) Return a string containing a printable representation of an object. This is the same value yielded by conversions (reverse quotes). It is sometimes useful to be able to access this operation as an ordinary function. For many types, this function makes an attempt to return a string that would yield an object with the same value when passed to eval(). \r\n• round(x[, n ]) Return the floating point value x rounded to n digits after the decimal point. If n is omitted, it defaults to zero. The result is a floating point number. Values are rounded to the closest multiple of 10 to the power minus n; if two multiples are equally close, rounding is done away from 0 (so e.g. round(0.5) is 1.0 and round(- 0.5) is -1.0).\r\n• setattr(object, name, value) This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value. The string may name an existing attribute or a new attribute. The function assigns the value to the attribute, provided the object allows it. For example, setattr(x, ’foobar’, 123) is equivalent to x.foobar = 123.\r\n• slice([start,] stop[, step ]) Return a slice object representing the set of indices specified by range(start, stop, step). The start and step arguments default to None. Slice objects have read-only data attributes start, stop and step which merely return the argument values (or their default). They have no other explicit functionality; however they are used by Numerical Python and other third party extensions. Slice objects are also generated when extended indexing syntax is used, e.g. for ‘a[start:stop:step]’ or ‘a[start:stop, i]’. \r\n• str(object) Return a string containing a nicely printable representation of an object. For strings, this returns the string itself. The difference with repr(object) is that str(object) does not always attempt to return a string that is acceptable to eval(); its goal is to return a printable string. \r\n• tuple(sequence) Return a tuple whose items are the same and in the same order as sequence’s items. If sequence is already a tuple, it is returned unchanged. For instance, tuple(’abc’) returns returns (’a’, ’b’, ’c’) and tuple([1, 2, 3]) returns (1, 2, 3). \r\n• type(object) Return the type of an object. The return value is a type object. The standard module types defines names for all built-in types. For instance: \r\n>>> import types \r\n>>> if type(x) == types.StringType: print \"It’s a string\" unichr(i) \r\nReturn the Unicode string of one character whose Unicode code is the integer i, e.g., unichr(97) returns the string u’a’. This is the inverse of ord() for Unicode strings. The argument must be in the range [0..65535], inclusive. ValueError is raised otherwise. .\r\n• unicode(string[, encoding[, errors]]) Decodes string using the codec for encoding. Error handling is done according to errors. The default behavior is to decode UTF-8 in strict mode, meaning that encoding errors raise ValueError. See also the codecs module. . \r\n• vars([object]) Without arguments, return a dictionary corresponding to the current local symbol table. With a module, class or class instance object as argument (or anything else that has a dict attribute), returns a dictionary corresponding to the object’s symbol table. The returned dictionary should not be modified: the effects on the corresponding symbol table are undefined.11 \r\n• xrange([start,] stop[, step ]) This function is very similar to range(), but returns an “xrange object” instead of a list. This is an opaque sequence type which yields the same values as the corresponding list, without actually storing them all si- multaneously. The advantage of xrange() over range() is minimal (since xrange() still has to create the values when asked for them) except when a very large range is used on a memory-starved machine (e.g. MS-DOS) or when all of the range’s elements are never used (e.g. when the loop is usually terminated with break). \r\n• zip(seq1, ...) This function returns a list of tuples, where each tuple contains the i-th element from each of the argument sequences. At least one sequence is required, otherwise a TypeError is raised. The returned list is truncated in length to the length of the shortest argument sequence. When there are multiple argument sequences which are all of the same length, zip() is similar to map() with an initial argument of None. With a single sequence argument, it returns a list of 1-tuples. \r\n\r\n'''\r\n\r\nop='C:\\PyHelp\\\\randinfo.txt'\r\nfile_exists = os.path.isfile(op) \r\n \r\nif not file_exists:\r\n \r\n x = open(op,\"w\")\r\n x.write(rand_facts)\r\n\r\n\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> year = datetime.datetime.now().year project = 'python201' copyright = f'2019-{year} Geoffrey Lentner, 2018 Ashwin Srinath' author = 'Geoffrey Lentner, Ashwin Srinath' version = '0.0.1' release = '0.0.1' extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.githubpages', 'sphinx.ext.autodoc', 'IPython.sphinxext.ipython_directive', 'IPython.sphinxext.ipython_console_highlighting'] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' language = None exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] pygments_style = 'sphinx' html_theme = 'pydata_sphinx_theme' html_logo = '_static/logo.png' html_favicon = '_static/favicon.ico' html_static_path = [''] html_theme_options = {'external_links': [], 'github_url': 'https://github.com/glentner/python201'} latex_elements = {} latex_documents = [(master_doc, 'python-201.tex', 'python-201 Documentation', 'Geoffrey Lentner, Ashwin Srinath', 'manual')] man_pages = [('manpage', 'cumprod', 'Compute cumulative product of a sequence of numbers.', 'Geoffrey Lentner <glentner@purdue.edu>.', '1')] texinfo_documents = [(master_doc, 'python-201', 'python-201 Documentation', author, 'python-201', 'One line description of project.', 'Miscellaneous')] intersphinx_mapping = {'https://docs.python.org/3/': None} rst_epilog = f""" .. |release| replace:: {release} .. |copyright| replace:: {copyright} """ <|reserved_special_token_1|> import datetime year = datetime.datetime.now().year project = 'python201' copyright = f'2019-{year} Geoffrey Lentner, 2018 Ashwin Srinath' author = 'Geoffrey Lentner, Ashwin Srinath' version = '0.0.1' release = '0.0.1' extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.githubpages', 'sphinx.ext.autodoc', 'IPython.sphinxext.ipython_directive', 'IPython.sphinxext.ipython_console_highlighting'] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' language = None exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] pygments_style = 'sphinx' html_theme = 'pydata_sphinx_theme' html_logo = '_static/logo.png' html_favicon = '_static/favicon.ico' html_static_path = [''] html_theme_options = {'external_links': [], 'github_url': 'https://github.com/glentner/python201'} latex_elements = {} latex_documents = [(master_doc, 'python-201.tex', 'python-201 Documentation', 'Geoffrey Lentner, Ashwin Srinath', 'manual')] man_pages = [('manpage', 'cumprod', 'Compute cumulative product of a sequence of numbers.', 'Geoffrey Lentner <glentner@purdue.edu>.', '1')] texinfo_documents = [(master_doc, 'python-201', 'python-201 Documentation', author, 'python-201', 'One line description of project.', 'Miscellaneous')] intersphinx_mapping = {'https://docs.python.org/3/': None} rst_epilog = f""" .. |release| replace:: {release} .. |copyright| replace:: {copyright} """ <|reserved_special_token_1|> # SPDX-FileCopyrightText: 2019-2021 Python201 Contributors # SPDX-License-Identifier: MIT # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys import datetime # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- year = datetime.datetime.now().year project = 'python201' copyright = f'2019-{year} Geoffrey Lentner, 2018 Ashwin Srinath' author = 'Geoffrey Lentner, Ashwin Srinath' version = '0.0.1' release = '0.0.1' # -- General configuration --------------------------------------------------- extensions = [ 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.githubpages', 'sphinx.ext.autodoc', 'IPython.sphinxext.ipython_directive', 'IPython.sphinxext.ipython_console_highlighting', ] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- html_theme = 'pydata_sphinx_theme' html_logo = '_static/logo.png' html_favicon = '_static/favicon.ico' html_static_path = [''] html_theme_options = { 'external_links': [], 'github_url': 'https://github.com/glentner/python201', } # -- Options for LaTeX output ------------------------------------------------ latex_elements = {} latex_documents = [ (master_doc, 'python-201.tex', 'python-201 Documentation', 'Geoffrey Lentner, Ashwin Srinath', 'manual'), ] # -- Options for manual page output ------------------------------------------ # manual pages options man_pages = [( 'manpage', 'cumprod', 'Compute cumulative product of a sequence of numbers.', 'Geoffrey Lentner <glentner@purdue.edu>.', '1' ), ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'python-201', 'python-201 Documentation', author, 'python-201', 'One line description of project.', 'Miscellaneous'), ] # -- Extension configuration ------------------------------------------------- intersphinx_mapping = {'https://docs.python.org/3/': None} # export variables with epilogue rst_epilog = f""" .. |release| replace:: {release} .. |copyright| replace:: {copyright} """
flexible
{ "blob_id": "1ead23c6ea4e66b24e60598ae20606e24fa41482", "index": 1024, "step-1": "<mask token>\n", "step-2": "<mask token>\nyear = datetime.datetime.now().year\nproject = 'python201'\ncopyright = f'2019-{year} Geoffrey Lentner, 2018 Ashwin Srinath'\nauthor = 'Geoffrey Lentner, Ashwin Srinath'\nversion = '0.0.1'\nrelease = '0.0.1'\nextensions = ['sphinx.ext.intersphinx', 'sphinx.ext.mathjax',\n 'sphinx.ext.githubpages', 'sphinx.ext.autodoc',\n 'IPython.sphinxext.ipython_directive',\n 'IPython.sphinxext.ipython_console_highlighting']\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nlanguage = None\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\npygments_style = 'sphinx'\nhtml_theme = 'pydata_sphinx_theme'\nhtml_logo = '_static/logo.png'\nhtml_favicon = '_static/favicon.ico'\nhtml_static_path = ['']\nhtml_theme_options = {'external_links': [], 'github_url':\n 'https://github.com/glentner/python201'}\nlatex_elements = {}\nlatex_documents = [(master_doc, 'python-201.tex',\n 'python-201 Documentation', 'Geoffrey Lentner, Ashwin Srinath', 'manual')]\nman_pages = [('manpage', 'cumprod',\n 'Compute cumulative product of a sequence of numbers.',\n 'Geoffrey Lentner <glentner@purdue.edu>.', '1')]\ntexinfo_documents = [(master_doc, 'python-201', 'python-201 Documentation',\n author, 'python-201', 'One line description of project.', 'Miscellaneous')]\nintersphinx_mapping = {'https://docs.python.org/3/': None}\nrst_epilog = f\"\"\"\n.. |release| replace:: {release}\n.. |copyright| replace:: {copyright}\n\"\"\"\n", "step-3": "import datetime\nyear = datetime.datetime.now().year\nproject = 'python201'\ncopyright = f'2019-{year} Geoffrey Lentner, 2018 Ashwin Srinath'\nauthor = 'Geoffrey Lentner, Ashwin Srinath'\nversion = '0.0.1'\nrelease = '0.0.1'\nextensions = ['sphinx.ext.intersphinx', 'sphinx.ext.mathjax',\n 'sphinx.ext.githubpages', 'sphinx.ext.autodoc',\n 'IPython.sphinxext.ipython_directive',\n 'IPython.sphinxext.ipython_console_highlighting']\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nlanguage = None\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\npygments_style = 'sphinx'\nhtml_theme = 'pydata_sphinx_theme'\nhtml_logo = '_static/logo.png'\nhtml_favicon = '_static/favicon.ico'\nhtml_static_path = ['']\nhtml_theme_options = {'external_links': [], 'github_url':\n 'https://github.com/glentner/python201'}\nlatex_elements = {}\nlatex_documents = [(master_doc, 'python-201.tex',\n 'python-201 Documentation', 'Geoffrey Lentner, Ashwin Srinath', 'manual')]\nman_pages = [('manpage', 'cumprod',\n 'Compute cumulative product of a sequence of numbers.',\n 'Geoffrey Lentner <glentner@purdue.edu>.', '1')]\ntexinfo_documents = [(master_doc, 'python-201', 'python-201 Documentation',\n author, 'python-201', 'One line description of project.', 'Miscellaneous')]\nintersphinx_mapping = {'https://docs.python.org/3/': None}\nrst_epilog = f\"\"\"\n.. |release| replace:: {release}\n.. |copyright| replace:: {copyright}\n\"\"\"\n", "step-4": "# SPDX-FileCopyrightText: 2019-2021 Python201 Contributors\n# SPDX-License-Identifier: MIT\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\nimport datetime\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nyear = datetime.datetime.now().year\nproject = 'python201'\ncopyright = f'2019-{year} Geoffrey Lentner, 2018 Ashwin Srinath'\nauthor = 'Geoffrey Lentner, Ashwin Srinath'\n\nversion = '0.0.1'\nrelease = '0.0.1'\n\n\n# -- General configuration ---------------------------------------------------\n\nextensions = [\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.autodoc',\n 'IPython.sphinxext.ipython_directive',\n 'IPython.sphinxext.ipython_console_highlighting',\n]\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# -- Options for HTML output -------------------------------------------------\n\nhtml_theme = 'pydata_sphinx_theme'\nhtml_logo = '_static/logo.png'\nhtml_favicon = '_static/favicon.ico'\nhtml_static_path = ['']\nhtml_theme_options = {\n 'external_links': [],\n 'github_url': 'https://github.com/glentner/python201',\n}\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {}\nlatex_documents = [\n (master_doc, 'python-201.tex', 'python-201 Documentation',\n 'Geoffrey Lentner, Ashwin Srinath', 'manual'),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# manual pages options\nman_pages = [(\n 'manpage',\n 'cumprod',\n 'Compute cumulative product of a sequence of numbers.',\n 'Geoffrey Lentner <glentner@purdue.edu>.',\n '1'\n),\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'python-201', 'python-201 Documentation',\n author, 'python-201', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n# -- Extension configuration -------------------------------------------------\nintersphinx_mapping = {'https://docs.python.org/3/': None}\n\n# export variables with epilogue\nrst_epilog = f\"\"\"\n.. |release| replace:: {release}\n.. |copyright| replace:: {copyright}\n\"\"\"\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class ColorPoint(object): <|reserved_special_token_0|> <|reserved_special_token_0|> def __getitem__(self, item) ->float: """ >>> cp = ColorPoint(Color('#880073'), Color('white'), '') >>> cp[0] # hue 0.8590686274509803 >>> cp[1] # saturation 1.0 >>> cp[2] # luminance 0.26666666666666666 """ return self.source.hsl[item] def __repr__(self) ->str: return 'ColorPoint({!r} => {!r})'.format(self.source, self.target) class ColorMatch(object): def __init__(self) ->None: self.tree = kdtree.create(dimensions=3) def add(self, source: Color, target: Color, ansi: AnsiCodeType) ->None: point = ColorPoint(source, target, ansi) self.tree.add(point) def match(self, color: Color) ->ColorPoint: """ >>> cm = ColorMatch() >>> cm.add(Color('red'), Color('white'), '') >>> cm.add(Color('blue'), Color('white'), '') >>> cm.match(Color('yellow')) ColorPoint(<Color red> => <Color white>) """ results = self.tree.search_nn(color.hsl) if not results: raise KeyError('No match found for color: {}'.format(color)) return results[0].data <|reserved_special_token_1|> <|reserved_special_token_0|> class ColorPoint(object): def __init__(self, source: Color, target: Color, ansi: AnsiCodeType ) ->None: """ Map source color to target color, stores target ansi color ans a single int, a sequence of RGB as ints or markup string. """ self.source = source self.target = target self.ansi = ansi <|reserved_special_token_0|> def __getitem__(self, item) ->float: """ >>> cp = ColorPoint(Color('#880073'), Color('white'), '') >>> cp[0] # hue 0.8590686274509803 >>> cp[1] # saturation 1.0 >>> cp[2] # luminance 0.26666666666666666 """ return self.source.hsl[item] def __repr__(self) ->str: return 'ColorPoint({!r} => {!r})'.format(self.source, self.target) class ColorMatch(object): def __init__(self) ->None: self.tree = kdtree.create(dimensions=3) def add(self, source: Color, target: Color, ansi: AnsiCodeType) ->None: point = ColorPoint(source, target, ansi) self.tree.add(point) def match(self, color: Color) ->ColorPoint: """ >>> cm = ColorMatch() >>> cm.add(Color('red'), Color('white'), '') >>> cm.add(Color('blue'), Color('white'), '') >>> cm.match(Color('yellow')) ColorPoint(<Color red> => <Color white>) """ results = self.tree.search_nn(color.hsl) if not results: raise KeyError('No match found for color: {}'.format(color)) return results[0].data <|reserved_special_token_1|> <|reserved_special_token_0|> class ColorPoint(object): def __init__(self, source: Color, target: Color, ansi: AnsiCodeType ) ->None: """ Map source color to target color, stores target ansi color ans a single int, a sequence of RGB as ints or markup string. """ self.source = source self.target = target self.ansi = ansi def __len__(self) ->int: """ >>> cp = ColorPoint(Color('black'), Color('white'), '') >>> len(cp) == 3 True """ return 3 def __getitem__(self, item) ->float: """ >>> cp = ColorPoint(Color('#880073'), Color('white'), '') >>> cp[0] # hue 0.8590686274509803 >>> cp[1] # saturation 1.0 >>> cp[2] # luminance 0.26666666666666666 """ return self.source.hsl[item] def __repr__(self) ->str: return 'ColorPoint({!r} => {!r})'.format(self.source, self.target) class ColorMatch(object): def __init__(self) ->None: self.tree = kdtree.create(dimensions=3) def add(self, source: Color, target: Color, ansi: AnsiCodeType) ->None: point = ColorPoint(source, target, ansi) self.tree.add(point) def match(self, color: Color) ->ColorPoint: """ >>> cm = ColorMatch() >>> cm.add(Color('red'), Color('white'), '') >>> cm.add(Color('blue'), Color('white'), '') >>> cm.match(Color('yellow')) ColorPoint(<Color red> => <Color white>) """ results = self.tree.search_nn(color.hsl) if not results: raise KeyError('No match found for color: {}'.format(color)) return results[0].data <|reserved_special_token_1|> <|reserved_special_token_0|> AnsiCodeType = Union[str, int, Tuple[int, int, int]] class ColorPoint(object): def __init__(self, source: Color, target: Color, ansi: AnsiCodeType ) ->None: """ Map source color to target color, stores target ansi color ans a single int, a sequence of RGB as ints or markup string. """ self.source = source self.target = target self.ansi = ansi def __len__(self) ->int: """ >>> cp = ColorPoint(Color('black'), Color('white'), '') >>> len(cp) == 3 True """ return 3 def __getitem__(self, item) ->float: """ >>> cp = ColorPoint(Color('#880073'), Color('white'), '') >>> cp[0] # hue 0.8590686274509803 >>> cp[1] # saturation 1.0 >>> cp[2] # luminance 0.26666666666666666 """ return self.source.hsl[item] def __repr__(self) ->str: return 'ColorPoint({!r} => {!r})'.format(self.source, self.target) class ColorMatch(object): def __init__(self) ->None: self.tree = kdtree.create(dimensions=3) def add(self, source: Color, target: Color, ansi: AnsiCodeType) ->None: point = ColorPoint(source, target, ansi) self.tree.add(point) def match(self, color: Color) ->ColorPoint: """ >>> cm = ColorMatch() >>> cm.add(Color('red'), Color('white'), '') >>> cm.add(Color('blue'), Color('white'), '') >>> cm.match(Color('yellow')) ColorPoint(<Color red> => <Color white>) """ results = self.tree.search_nn(color.hsl) if not results: raise KeyError('No match found for color: {}'.format(color)) return results[0].data <|reserved_special_token_1|> from typing import Sequence, Union, Tuple import kdtree from colour import Color AnsiCodeType = Union[str, int, Tuple[int, int, int]] class ColorPoint(object): def __init__(self, source: Color, target: Color, ansi: AnsiCodeType) -> None: """ Map source color to target color, stores target ansi color ans a single int, a sequence of RGB as ints or markup string. """ self.source = source self.target = target self.ansi = ansi def __len__(self) -> int: """ >>> cp = ColorPoint(Color('black'), Color('white'), '') >>> len(cp) == 3 True """ return 3 def __getitem__(self, item) -> float: """ >>> cp = ColorPoint(Color('#880073'), Color('white'), '') >>> cp[0] # hue 0.8590686274509803 >>> cp[1] # saturation 1.0 >>> cp[2] # luminance 0.26666666666666666 """ return self.source.hsl[item] def __repr__(self) -> str: return 'ColorPoint({!r} => {!r})'.format(self.source, self.target) class ColorMatch(object): def __init__(self) -> None: self.tree = kdtree.create(dimensions=3) def add(self, source: Color, target: Color, ansi: AnsiCodeType) -> None: point = ColorPoint(source, target, ansi) self.tree.add(point) def match(self, color: Color) -> ColorPoint: """ >>> cm = ColorMatch() >>> cm.add(Color('red'), Color('white'), '') >>> cm.add(Color('blue'), Color('white'), '') >>> cm.match(Color('yellow')) ColorPoint(<Color red> => <Color white>) """ results = self.tree.search_nn(color.hsl) if not results: raise KeyError('No match found for color: {}'.format(color)) return results[0].data
flexible
{ "blob_id": "e239c2089fc6d4ab646c490b6e3de8953cec5634", "index": 8093, "step-1": "<mask token>\n\n\nclass ColorPoint(object):\n <mask token>\n <mask token>\n\n def __getitem__(self, item) ->float:\n \"\"\"\n >>> cp = ColorPoint(Color('#880073'), Color('white'), '')\n >>> cp[0] # hue\n 0.8590686274509803\n >>> cp[1] # saturation\n 1.0\n >>> cp[2] # luminance\n 0.26666666666666666\n \"\"\"\n return self.source.hsl[item]\n\n def __repr__(self) ->str:\n return 'ColorPoint({!r} => {!r})'.format(self.source, self.target)\n\n\nclass ColorMatch(object):\n\n def __init__(self) ->None:\n self.tree = kdtree.create(dimensions=3)\n\n def add(self, source: Color, target: Color, ansi: AnsiCodeType) ->None:\n point = ColorPoint(source, target, ansi)\n self.tree.add(point)\n\n def match(self, color: Color) ->ColorPoint:\n \"\"\"\n >>> cm = ColorMatch()\n >>> cm.add(Color('red'), Color('white'), '')\n >>> cm.add(Color('blue'), Color('white'), '')\n >>> cm.match(Color('yellow'))\n ColorPoint(<Color red> => <Color white>)\n \"\"\"\n results = self.tree.search_nn(color.hsl)\n if not results:\n raise KeyError('No match found for color: {}'.format(color))\n return results[0].data\n", "step-2": "<mask token>\n\n\nclass ColorPoint(object):\n\n def __init__(self, source: Color, target: Color, ansi: AnsiCodeType\n ) ->None:\n \"\"\"\n Map source color to target color, stores target\n ansi color ans a single int, a sequence of RGB as ints\n or markup string.\n \"\"\"\n self.source = source\n self.target = target\n self.ansi = ansi\n <mask token>\n\n def __getitem__(self, item) ->float:\n \"\"\"\n >>> cp = ColorPoint(Color('#880073'), Color('white'), '')\n >>> cp[0] # hue\n 0.8590686274509803\n >>> cp[1] # saturation\n 1.0\n >>> cp[2] # luminance\n 0.26666666666666666\n \"\"\"\n return self.source.hsl[item]\n\n def __repr__(self) ->str:\n return 'ColorPoint({!r} => {!r})'.format(self.source, self.target)\n\n\nclass ColorMatch(object):\n\n def __init__(self) ->None:\n self.tree = kdtree.create(dimensions=3)\n\n def add(self, source: Color, target: Color, ansi: AnsiCodeType) ->None:\n point = ColorPoint(source, target, ansi)\n self.tree.add(point)\n\n def match(self, color: Color) ->ColorPoint:\n \"\"\"\n >>> cm = ColorMatch()\n >>> cm.add(Color('red'), Color('white'), '')\n >>> cm.add(Color('blue'), Color('white'), '')\n >>> cm.match(Color('yellow'))\n ColorPoint(<Color red> => <Color white>)\n \"\"\"\n results = self.tree.search_nn(color.hsl)\n if not results:\n raise KeyError('No match found for color: {}'.format(color))\n return results[0].data\n", "step-3": "<mask token>\n\n\nclass ColorPoint(object):\n\n def __init__(self, source: Color, target: Color, ansi: AnsiCodeType\n ) ->None:\n \"\"\"\n Map source color to target color, stores target\n ansi color ans a single int, a sequence of RGB as ints\n or markup string.\n \"\"\"\n self.source = source\n self.target = target\n self.ansi = ansi\n\n def __len__(self) ->int:\n \"\"\"\n >>> cp = ColorPoint(Color('black'), Color('white'), '')\n >>> len(cp) == 3\n True\n \"\"\"\n return 3\n\n def __getitem__(self, item) ->float:\n \"\"\"\n >>> cp = ColorPoint(Color('#880073'), Color('white'), '')\n >>> cp[0] # hue\n 0.8590686274509803\n >>> cp[1] # saturation\n 1.0\n >>> cp[2] # luminance\n 0.26666666666666666\n \"\"\"\n return self.source.hsl[item]\n\n def __repr__(self) ->str:\n return 'ColorPoint({!r} => {!r})'.format(self.source, self.target)\n\n\nclass ColorMatch(object):\n\n def __init__(self) ->None:\n self.tree = kdtree.create(dimensions=3)\n\n def add(self, source: Color, target: Color, ansi: AnsiCodeType) ->None:\n point = ColorPoint(source, target, ansi)\n self.tree.add(point)\n\n def match(self, color: Color) ->ColorPoint:\n \"\"\"\n >>> cm = ColorMatch()\n >>> cm.add(Color('red'), Color('white'), '')\n >>> cm.add(Color('blue'), Color('white'), '')\n >>> cm.match(Color('yellow'))\n ColorPoint(<Color red> => <Color white>)\n \"\"\"\n results = self.tree.search_nn(color.hsl)\n if not results:\n raise KeyError('No match found for color: {}'.format(color))\n return results[0].data\n", "step-4": "<mask token>\nAnsiCodeType = Union[str, int, Tuple[int, int, int]]\n\n\nclass ColorPoint(object):\n\n def __init__(self, source: Color, target: Color, ansi: AnsiCodeType\n ) ->None:\n \"\"\"\n Map source color to target color, stores target\n ansi color ans a single int, a sequence of RGB as ints\n or markup string.\n \"\"\"\n self.source = source\n self.target = target\n self.ansi = ansi\n\n def __len__(self) ->int:\n \"\"\"\n >>> cp = ColorPoint(Color('black'), Color('white'), '')\n >>> len(cp) == 3\n True\n \"\"\"\n return 3\n\n def __getitem__(self, item) ->float:\n \"\"\"\n >>> cp = ColorPoint(Color('#880073'), Color('white'), '')\n >>> cp[0] # hue\n 0.8590686274509803\n >>> cp[1] # saturation\n 1.0\n >>> cp[2] # luminance\n 0.26666666666666666\n \"\"\"\n return self.source.hsl[item]\n\n def __repr__(self) ->str:\n return 'ColorPoint({!r} => {!r})'.format(self.source, self.target)\n\n\nclass ColorMatch(object):\n\n def __init__(self) ->None:\n self.tree = kdtree.create(dimensions=3)\n\n def add(self, source: Color, target: Color, ansi: AnsiCodeType) ->None:\n point = ColorPoint(source, target, ansi)\n self.tree.add(point)\n\n def match(self, color: Color) ->ColorPoint:\n \"\"\"\n >>> cm = ColorMatch()\n >>> cm.add(Color('red'), Color('white'), '')\n >>> cm.add(Color('blue'), Color('white'), '')\n >>> cm.match(Color('yellow'))\n ColorPoint(<Color red> => <Color white>)\n \"\"\"\n results = self.tree.search_nn(color.hsl)\n if not results:\n raise KeyError('No match found for color: {}'.format(color))\n return results[0].data\n", "step-5": "from typing import Sequence, Union, Tuple\n\nimport kdtree\n\nfrom colour import Color\n\nAnsiCodeType = Union[str, int, Tuple[int, int, int]]\n\n\nclass ColorPoint(object):\n def __init__(self, source: Color, target: Color,\n ansi: AnsiCodeType) -> None:\n \"\"\"\n Map source color to target color, stores target\n ansi color ans a single int, a sequence of RGB as ints\n or markup string.\n \"\"\"\n self.source = source\n self.target = target\n self.ansi = ansi\n\n def __len__(self) -> int:\n \"\"\"\n >>> cp = ColorPoint(Color('black'), Color('white'), '')\n >>> len(cp) == 3\n True\n \"\"\"\n return 3\n\n def __getitem__(self, item) -> float:\n \"\"\"\n >>> cp = ColorPoint(Color('#880073'), Color('white'), '')\n >>> cp[0] # hue\n 0.8590686274509803\n >>> cp[1] # saturation\n 1.0\n >>> cp[2] # luminance\n 0.26666666666666666\n \"\"\"\n return self.source.hsl[item]\n\n def __repr__(self) -> str:\n return 'ColorPoint({!r} => {!r})'.format(self.source, self.target)\n\n\nclass ColorMatch(object):\n def __init__(self) -> None:\n self.tree = kdtree.create(dimensions=3)\n\n def add(self, source: Color, target: Color, ansi: AnsiCodeType) -> None:\n point = ColorPoint(source, target, ansi)\n self.tree.add(point)\n\n def match(self, color: Color) -> ColorPoint:\n \"\"\"\n >>> cm = ColorMatch()\n >>> cm.add(Color('red'), Color('white'), '')\n >>> cm.add(Color('blue'), Color('white'), '')\n >>> cm.match(Color('yellow'))\n ColorPoint(<Color red> => <Color white>)\n \"\"\"\n results = self.tree.search_nn(color.hsl)\n if not results:\n raise KeyError('No match found for color: {}'.format(color))\n return results[0].data\n\n\n", "step-ids": [ 7, 8, 9, 10, 12 ] }
[ 7, 8, 9, 10, 12 ]
<|reserved_special_token_0|> @ddt.ddt class TestAddress(unittest.TestCase): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def test_02_check_address(self): url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list' data = {'session': self.session} response = Address.check_address(url, data) addr_list = Address.get_value(response, 'data') sql = f'select * from ecs_user_address where user_id = {self.user_id}' sql_addr = self.op_database.get_all(sql) self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败') <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @ddt.ddt class TestAddress(unittest.TestCase): def setUp(self) ->None: login_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/user/signin' login_data = {'name': 'tester', 'password': '123456'} login = Login(url=login_url) self.session = login.get_session(login_data) self.user_id = int(GetKeyword.get_keyword(self.session, 'uid')) self.op_database = OpDatabase() @classmethod def setUpClass(cls) ->None: op_database = OpDatabase() op_database.clear_mysql() @classmethod def tearDownClass(cls) ->None: op_database = OpDatabase() op_database.clear_mysql() @ddt.data(*test_data1) def test_01_add_address(self, data): sql = f'select * from ecs_user_address where user_id = {self.user_id}' before = self.op_database.get_all(sql) add_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/add' add_data = {'address': {'default_address': 0, 'consignee': f"{data['consignee']}", 'tel': f"{data['tel']}", 'zipcode': f"{data['postcode']}", 'country': '1', 'city': '271', 'id': 0, 'email': f"{data['email']}", 'address': f"{data['detail']}", 'province': '', 'district': '', 'mobile': ''}, 'session': self. session} Address.add_address(url=add_url, data=add_data) after = self.op_database.get_all(sql) result = len(after) - len(before) self.assertEqual(data['expect'], result, msg='断言失败') def test_02_check_address(self): url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list' data = {'session': self.session} response = Address.check_address(url, data) addr_list = Address.get_value(response, 'data') sql = f'select * from ecs_user_address where user_id = {self.user_id}' sql_addr = self.op_database.get_all(sql) self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败') @ddt.data(*test_data2) def test_03_modify_address(self, data): sql = ( f'select address_id from ecs_user_address where user_id = {self.user_id}' ) id_list = self.op_database.get_all(sql) url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/update' modify_data = {'address': {'default_address': 0, 'consignee': f"{data['consignee']}", 'tel': f"{data['tel']}", 'zipcode': f"{data['postcode']}", 'country': '1', 'city': '271', 'id': 0, 'email': f"{data['email']}", 'address': f"{data['detail']}", 'province': '0', 'district': '0', 'mobile': f"{data['mobile']}" }, 'address_id': id_list[0]['address_id'], 'session': self.session} response = Address.modify_address(url, modify_data) succeed = Address.get_value(response, 'succeed') self.assertEqual(data['expect'], succeed, msg='断言失败') def test_04_delete_address(self): sql = ( f'select address_id from ecs_user_address where user_id = {self.user_id}' ) id_list = self.op_database.get_all(sql) url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/delete' delete_data = {'address_id': id_list[0]['address_id'], 'session': self.session} response = Address.delete_address(url, delete_data) succeed = Address.get_value(response, 'succeed') sql = ( f"select * from ecs_user_address where address_id = {id_list[0]['address_id']}" ) info = self.op_database.get_one(sql) result = False if info != None else True self.assertEqual(result, succeed, msg='断言失败') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @ddt.ddt class TestAddress(unittest.TestCase): def setUp(self) ->None: login_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/user/signin' login_data = {'name': 'tester', 'password': '123456'} login = Login(url=login_url) self.session = login.get_session(login_data) self.user_id = int(GetKeyword.get_keyword(self.session, 'uid')) self.op_database = OpDatabase() @classmethod def setUpClass(cls) ->None: op_database = OpDatabase() op_database.clear_mysql() @classmethod def tearDownClass(cls) ->None: op_database = OpDatabase() op_database.clear_mysql() @ddt.data(*test_data1) def test_01_add_address(self, data): sql = f'select * from ecs_user_address where user_id = {self.user_id}' before = self.op_database.get_all(sql) add_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/add' add_data = {'address': {'default_address': 0, 'consignee': f"{data['consignee']}", 'tel': f"{data['tel']}", 'zipcode': f"{data['postcode']}", 'country': '1', 'city': '271', 'id': 0, 'email': f"{data['email']}", 'address': f"{data['detail']}", 'province': '', 'district': '', 'mobile': ''}, 'session': self. session} Address.add_address(url=add_url, data=add_data) after = self.op_database.get_all(sql) result = len(after) - len(before) self.assertEqual(data['expect'], result, msg='断言失败') def test_02_check_address(self): url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list' data = {'session': self.session} response = Address.check_address(url, data) addr_list = Address.get_value(response, 'data') sql = f'select * from ecs_user_address where user_id = {self.user_id}' sql_addr = self.op_database.get_all(sql) self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败') @ddt.data(*test_data2) def test_03_modify_address(self, data): sql = ( f'select address_id from ecs_user_address where user_id = {self.user_id}' ) id_list = self.op_database.get_all(sql) url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/update' modify_data = {'address': {'default_address': 0, 'consignee': f"{data['consignee']}", 'tel': f"{data['tel']}", 'zipcode': f"{data['postcode']}", 'country': '1', 'city': '271', 'id': 0, 'email': f"{data['email']}", 'address': f"{data['detail']}", 'province': '0', 'district': '0', 'mobile': f"{data['mobile']}" }, 'address_id': id_list[0]['address_id'], 'session': self.session} response = Address.modify_address(url, modify_data) succeed = Address.get_value(response, 'succeed') self.assertEqual(data['expect'], succeed, msg='断言失败') def test_04_delete_address(self): sql = ( f'select address_id from ecs_user_address where user_id = {self.user_id}' ) id_list = self.op_database.get_all(sql) url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/delete' delete_data = {'address_id': id_list[0]['address_id'], 'session': self.session} response = Address.delete_address(url, delete_data) succeed = Address.get_value(response, 'succeed') sql = ( f"select * from ecs_user_address where address_id = {id_list[0]['address_id']}" ) info = self.op_database.get_one(sql) result = False if info != None else True self.assertEqual(result, succeed, msg='断言失败') if __name__ == '__main__': unittest.main() <|reserved_special_token_1|> <|reserved_special_token_0|> op_excel = OperationExcel() add_file = ( 'D:\\pyCharm\\Demo\\pycode\\Requests\\20191109\\课堂练习\\ECShop_interface\\data\\add_address.xlsx' ) modify_file = ( 'D:\\pyCharm\\Demo\\pycode\\Requests\\20191109\\课堂练习\\ECShop_interface\\data\\modify_address.xlsx' ) test_data1 = op_excel.get_data(add_file) test_data2 = op_excel.get_data(modify_file) @ddt.ddt class TestAddress(unittest.TestCase): def setUp(self) ->None: login_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/user/signin' login_data = {'name': 'tester', 'password': '123456'} login = Login(url=login_url) self.session = login.get_session(login_data) self.user_id = int(GetKeyword.get_keyword(self.session, 'uid')) self.op_database = OpDatabase() @classmethod def setUpClass(cls) ->None: op_database = OpDatabase() op_database.clear_mysql() @classmethod def tearDownClass(cls) ->None: op_database = OpDatabase() op_database.clear_mysql() @ddt.data(*test_data1) def test_01_add_address(self, data): sql = f'select * from ecs_user_address where user_id = {self.user_id}' before = self.op_database.get_all(sql) add_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/add' add_data = {'address': {'default_address': 0, 'consignee': f"{data['consignee']}", 'tel': f"{data['tel']}", 'zipcode': f"{data['postcode']}", 'country': '1', 'city': '271', 'id': 0, 'email': f"{data['email']}", 'address': f"{data['detail']}", 'province': '', 'district': '', 'mobile': ''}, 'session': self. session} Address.add_address(url=add_url, data=add_data) after = self.op_database.get_all(sql) result = len(after) - len(before) self.assertEqual(data['expect'], result, msg='断言失败') def test_02_check_address(self): url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list' data = {'session': self.session} response = Address.check_address(url, data) addr_list = Address.get_value(response, 'data') sql = f'select * from ecs_user_address where user_id = {self.user_id}' sql_addr = self.op_database.get_all(sql) self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败') @ddt.data(*test_data2) def test_03_modify_address(self, data): sql = ( f'select address_id from ecs_user_address where user_id = {self.user_id}' ) id_list = self.op_database.get_all(sql) url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/update' modify_data = {'address': {'default_address': 0, 'consignee': f"{data['consignee']}", 'tel': f"{data['tel']}", 'zipcode': f"{data['postcode']}", 'country': '1', 'city': '271', 'id': 0, 'email': f"{data['email']}", 'address': f"{data['detail']}", 'province': '0', 'district': '0', 'mobile': f"{data['mobile']}" }, 'address_id': id_list[0]['address_id'], 'session': self.session} response = Address.modify_address(url, modify_data) succeed = Address.get_value(response, 'succeed') self.assertEqual(data['expect'], succeed, msg='断言失败') def test_04_delete_address(self): sql = ( f'select address_id from ecs_user_address where user_id = {self.user_id}' ) id_list = self.op_database.get_all(sql) url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/delete' delete_data = {'address_id': id_list[0]['address_id'], 'session': self.session} response = Address.delete_address(url, delete_data) succeed = Address.get_value(response, 'succeed') sql = ( f"select * from ecs_user_address where address_id = {id_list[0]['address_id']}" ) info = self.op_database.get_one(sql) result = False if info != None else True self.assertEqual(result, succeed, msg='断言失败') if __name__ == '__main__': unittest.main() <|reserved_special_token_1|> from common.get_keyword import GetKeyword from common.operation_Excel import OperationExcel from common.op_database import OpDatabase from interface.login import Login from interface.address import Address import unittest import ddt # 测试数据 op_excel = OperationExcel() add_file = r'D:\pyCharm\Demo\pycode\Requests\20191109\课堂练习\ECShop_interface\data\add_address.xlsx' modify_file = r'D:\pyCharm\Demo\pycode\Requests\20191109\课堂练习\ECShop_interface\data\modify_address.xlsx' test_data1 = op_excel.get_data(add_file) test_data2 = op_excel.get_data(modify_file) @ddt.ddt class TestAddress(unittest.TestCase): # 编写test fixture def setUp(self) -> None: # 登录数据 login_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/user/signin' login_data = {"name": "tester", "password": "123456"} # 实例化登录对象 login = Login(url=login_url) self.session = login.get_session(login_data) self.user_id = int(GetKeyword.get_keyword(self.session, 'uid')) # 实例化数据操作对象 self.op_database = OpDatabase() @classmethod def setUpClass(cls) -> None: # 清空数据信息 op_database = OpDatabase() op_database.clear_mysql() @classmethod def tearDownClass(cls) -> None: # 清空数据信息 op_database = OpDatabase() op_database.clear_mysql() # 编写test case # 添加收货地址 @ddt.data(*test_data1) def test_01_add_address(self, data): # SQL语句 sql = f'select * from ecs_user_address where user_id = {self.user_id}' # 获取收货地址表中用户地址数 before = self.op_database.get_all(sql) # 添加收货地址数据 add_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/add' add_data = { "address": {"default_address": 0, "consignee": f"{data['consignee']}", "tel": f"{data['tel']}", "zipcode": f"{data['postcode']}", "country": "1", "city": "271", "id": 0, "email": f"{data['email']}", "address": f"{data['detail']}", "province": "", "district": "", "mobile": ""}, "session": self.session } # 添加收货地址 Address.add_address(url=add_url, data=add_data) # 获取收货地址表中用户地址数 after = self.op_database.get_all(sql) result = len(after) - len(before) # 实际结果 # 断言 self.assertEqual(data['expect'], result, msg='断言失败') # 查看收货地址 def test_02_check_address(self): # 查看收货地址数据 url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list' data = {"session": self.session} # 查看收货地址 response = Address.check_address(url, data) # 获取返回数据中data的值 addr_list = Address.get_value(response, 'data') # 实际结果 # SQL语句 sql = f'select * from ecs_user_address where user_id = {self.user_id}' # 获取收货地址表中用户地址数 sql_addr = self.op_database.get_all(sql) # 期望结果 # 断言 self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败') # 修改收货地址 @ddt.data(*test_data2) def test_03_modify_address(self, data): # 读取收货地址表中的地址的address_id sql = f'select address_id from ecs_user_address where user_id = {self.user_id}' id_list = self.op_database.get_all(sql) # 修改收货地址数据 url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/update' modify_data = { "address": {"default_address": 0, "consignee": f"{data['consignee']}", "tel": f"{data['tel']}", "zipcode": f"{data['postcode']}", "country": "1", "city": "271", "id": 0, "email": f"{data['email']}", "address": f"{data['detail']}", "province": "0", "district": "0", "mobile": f"{data['mobile']}"}, "address_id": id_list[0]['address_id'], "session": self.session } # 修改收货地址 response = Address.modify_address(url, modify_data) # 获取返回数据中的succeed succeed = Address.get_value(response, 'succeed') # 断言----缺少数据库验证代码 self.assertEqual(data['expect'], succeed, msg='断言失败') # 删除收货地址 def test_04_delete_address(self): # 读取收货地址表中的地址的address_id sql = f'select address_id from ecs_user_address where user_id = {self.user_id}' id_list = self.op_database.get_all(sql) # 删除收货地址数据 url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/delete' delete_data = {"address_id": id_list[0]['address_id'], "session": self.session} # 删除收货地址 response = Address.delete_address(url, delete_data) # 获取返回数据中succeed succeed = Address.get_value(response, 'succeed') # 实际结果 # 查询收货地址表中该地址的信息 sql = f"select * from ecs_user_address where address_id = {id_list[0]['address_id']}" info = self.op_database.get_one(sql) result = False if info != None else True # 期望结果 # 断言 self.assertEqual(result, succeed, msg='断言失败') if __name__ == '__main__': unittest.main()
flexible
{ "blob_id": "0f0b3eea9dc397d32e81749304041abaf6651e94", "index": 1873, "step-1": "<mask token>\n\n\n@ddt.ddt\nclass TestAddress(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_02_check_address(self):\n url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list'\n data = {'session': self.session}\n response = Address.check_address(url, data)\n addr_list = Address.get_value(response, 'data')\n sql = f'select * from ecs_user_address where user_id = {self.user_id}'\n sql_addr = self.op_database.get_all(sql)\n self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败')\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@ddt.ddt\nclass TestAddress(unittest.TestCase):\n\n def setUp(self) ->None:\n login_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/user/signin'\n login_data = {'name': 'tester', 'password': '123456'}\n login = Login(url=login_url)\n self.session = login.get_session(login_data)\n self.user_id = int(GetKeyword.get_keyword(self.session, 'uid'))\n self.op_database = OpDatabase()\n\n @classmethod\n def setUpClass(cls) ->None:\n op_database = OpDatabase()\n op_database.clear_mysql()\n\n @classmethod\n def tearDownClass(cls) ->None:\n op_database = OpDatabase()\n op_database.clear_mysql()\n\n @ddt.data(*test_data1)\n def test_01_add_address(self, data):\n sql = f'select * from ecs_user_address where user_id = {self.user_id}'\n before = self.op_database.get_all(sql)\n add_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/add'\n add_data = {'address': {'default_address': 0, 'consignee':\n f\"{data['consignee']}\", 'tel': f\"{data['tel']}\", 'zipcode':\n f\"{data['postcode']}\", 'country': '1', 'city': '271', 'id': 0,\n 'email': f\"{data['email']}\", 'address': f\"{data['detail']}\",\n 'province': '', 'district': '', 'mobile': ''}, 'session': self.\n session}\n Address.add_address(url=add_url, data=add_data)\n after = self.op_database.get_all(sql)\n result = len(after) - len(before)\n self.assertEqual(data['expect'], result, msg='断言失败')\n\n def test_02_check_address(self):\n url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list'\n data = {'session': self.session}\n response = Address.check_address(url, data)\n addr_list = Address.get_value(response, 'data')\n sql = f'select * from ecs_user_address where user_id = {self.user_id}'\n sql_addr = self.op_database.get_all(sql)\n self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败')\n\n @ddt.data(*test_data2)\n def test_03_modify_address(self, data):\n sql = (\n f'select address_id from ecs_user_address where user_id = {self.user_id}'\n )\n id_list = self.op_database.get_all(sql)\n url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/update'\n modify_data = {'address': {'default_address': 0, 'consignee':\n f\"{data['consignee']}\", 'tel': f\"{data['tel']}\", 'zipcode':\n f\"{data['postcode']}\", 'country': '1', 'city': '271', 'id': 0,\n 'email': f\"{data['email']}\", 'address': f\"{data['detail']}\",\n 'province': '0', 'district': '0', 'mobile': f\"{data['mobile']}\"\n }, 'address_id': id_list[0]['address_id'], 'session': self.session}\n response = Address.modify_address(url, modify_data)\n succeed = Address.get_value(response, 'succeed')\n self.assertEqual(data['expect'], succeed, msg='断言失败')\n\n def test_04_delete_address(self):\n sql = (\n f'select address_id from ecs_user_address where user_id = {self.user_id}'\n )\n id_list = self.op_database.get_all(sql)\n url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/delete'\n delete_data = {'address_id': id_list[0]['address_id'], 'session':\n self.session}\n response = Address.delete_address(url, delete_data)\n succeed = Address.get_value(response, 'succeed')\n sql = (\n f\"select * from ecs_user_address where address_id = {id_list[0]['address_id']}\"\n )\n info = self.op_database.get_one(sql)\n result = False if info != None else True\n self.assertEqual(result, succeed, msg='断言失败')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\n@ddt.ddt\nclass TestAddress(unittest.TestCase):\n\n def setUp(self) ->None:\n login_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/user/signin'\n login_data = {'name': 'tester', 'password': '123456'}\n login = Login(url=login_url)\n self.session = login.get_session(login_data)\n self.user_id = int(GetKeyword.get_keyword(self.session, 'uid'))\n self.op_database = OpDatabase()\n\n @classmethod\n def setUpClass(cls) ->None:\n op_database = OpDatabase()\n op_database.clear_mysql()\n\n @classmethod\n def tearDownClass(cls) ->None:\n op_database = OpDatabase()\n op_database.clear_mysql()\n\n @ddt.data(*test_data1)\n def test_01_add_address(self, data):\n sql = f'select * from ecs_user_address where user_id = {self.user_id}'\n before = self.op_database.get_all(sql)\n add_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/add'\n add_data = {'address': {'default_address': 0, 'consignee':\n f\"{data['consignee']}\", 'tel': f\"{data['tel']}\", 'zipcode':\n f\"{data['postcode']}\", 'country': '1', 'city': '271', 'id': 0,\n 'email': f\"{data['email']}\", 'address': f\"{data['detail']}\",\n 'province': '', 'district': '', 'mobile': ''}, 'session': self.\n session}\n Address.add_address(url=add_url, data=add_data)\n after = self.op_database.get_all(sql)\n result = len(after) - len(before)\n self.assertEqual(data['expect'], result, msg='断言失败')\n\n def test_02_check_address(self):\n url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list'\n data = {'session': self.session}\n response = Address.check_address(url, data)\n addr_list = Address.get_value(response, 'data')\n sql = f'select * from ecs_user_address where user_id = {self.user_id}'\n sql_addr = self.op_database.get_all(sql)\n self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败')\n\n @ddt.data(*test_data2)\n def test_03_modify_address(self, data):\n sql = (\n f'select address_id from ecs_user_address where user_id = {self.user_id}'\n )\n id_list = self.op_database.get_all(sql)\n url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/update'\n modify_data = {'address': {'default_address': 0, 'consignee':\n f\"{data['consignee']}\", 'tel': f\"{data['tel']}\", 'zipcode':\n f\"{data['postcode']}\", 'country': '1', 'city': '271', 'id': 0,\n 'email': f\"{data['email']}\", 'address': f\"{data['detail']}\",\n 'province': '0', 'district': '0', 'mobile': f\"{data['mobile']}\"\n }, 'address_id': id_list[0]['address_id'], 'session': self.session}\n response = Address.modify_address(url, modify_data)\n succeed = Address.get_value(response, 'succeed')\n self.assertEqual(data['expect'], succeed, msg='断言失败')\n\n def test_04_delete_address(self):\n sql = (\n f'select address_id from ecs_user_address where user_id = {self.user_id}'\n )\n id_list = self.op_database.get_all(sql)\n url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/delete'\n delete_data = {'address_id': id_list[0]['address_id'], 'session':\n self.session}\n response = Address.delete_address(url, delete_data)\n succeed = Address.get_value(response, 'succeed')\n sql = (\n f\"select * from ecs_user_address where address_id = {id_list[0]['address_id']}\"\n )\n info = self.op_database.get_one(sql)\n result = False if info != None else True\n self.assertEqual(result, succeed, msg='断言失败')\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-4": "<mask token>\nop_excel = OperationExcel()\nadd_file = (\n 'D:\\\\pyCharm\\\\Demo\\\\pycode\\\\Requests\\\\20191109\\\\课堂练习\\\\ECShop_interface\\\\data\\\\add_address.xlsx'\n )\nmodify_file = (\n 'D:\\\\pyCharm\\\\Demo\\\\pycode\\\\Requests\\\\20191109\\\\课堂练习\\\\ECShop_interface\\\\data\\\\modify_address.xlsx'\n )\ntest_data1 = op_excel.get_data(add_file)\ntest_data2 = op_excel.get_data(modify_file)\n\n\n@ddt.ddt\nclass TestAddress(unittest.TestCase):\n\n def setUp(self) ->None:\n login_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/user/signin'\n login_data = {'name': 'tester', 'password': '123456'}\n login = Login(url=login_url)\n self.session = login.get_session(login_data)\n self.user_id = int(GetKeyword.get_keyword(self.session, 'uid'))\n self.op_database = OpDatabase()\n\n @classmethod\n def setUpClass(cls) ->None:\n op_database = OpDatabase()\n op_database.clear_mysql()\n\n @classmethod\n def tearDownClass(cls) ->None:\n op_database = OpDatabase()\n op_database.clear_mysql()\n\n @ddt.data(*test_data1)\n def test_01_add_address(self, data):\n sql = f'select * from ecs_user_address where user_id = {self.user_id}'\n before = self.op_database.get_all(sql)\n add_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/add'\n add_data = {'address': {'default_address': 0, 'consignee':\n f\"{data['consignee']}\", 'tel': f\"{data['tel']}\", 'zipcode':\n f\"{data['postcode']}\", 'country': '1', 'city': '271', 'id': 0,\n 'email': f\"{data['email']}\", 'address': f\"{data['detail']}\",\n 'province': '', 'district': '', 'mobile': ''}, 'session': self.\n session}\n Address.add_address(url=add_url, data=add_data)\n after = self.op_database.get_all(sql)\n result = len(after) - len(before)\n self.assertEqual(data['expect'], result, msg='断言失败')\n\n def test_02_check_address(self):\n url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list'\n data = {'session': self.session}\n response = Address.check_address(url, data)\n addr_list = Address.get_value(response, 'data')\n sql = f'select * from ecs_user_address where user_id = {self.user_id}'\n sql_addr = self.op_database.get_all(sql)\n self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败')\n\n @ddt.data(*test_data2)\n def test_03_modify_address(self, data):\n sql = (\n f'select address_id from ecs_user_address where user_id = {self.user_id}'\n )\n id_list = self.op_database.get_all(sql)\n url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/update'\n modify_data = {'address': {'default_address': 0, 'consignee':\n f\"{data['consignee']}\", 'tel': f\"{data['tel']}\", 'zipcode':\n f\"{data['postcode']}\", 'country': '1', 'city': '271', 'id': 0,\n 'email': f\"{data['email']}\", 'address': f\"{data['detail']}\",\n 'province': '0', 'district': '0', 'mobile': f\"{data['mobile']}\"\n }, 'address_id': id_list[0]['address_id'], 'session': self.session}\n response = Address.modify_address(url, modify_data)\n succeed = Address.get_value(response, 'succeed')\n self.assertEqual(data['expect'], succeed, msg='断言失败')\n\n def test_04_delete_address(self):\n sql = (\n f'select address_id from ecs_user_address where user_id = {self.user_id}'\n )\n id_list = self.op_database.get_all(sql)\n url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/delete'\n delete_data = {'address_id': id_list[0]['address_id'], 'session':\n self.session}\n response = Address.delete_address(url, delete_data)\n succeed = Address.get_value(response, 'succeed')\n sql = (\n f\"select * from ecs_user_address where address_id = {id_list[0]['address_id']}\"\n )\n info = self.op_database.get_one(sql)\n result = False if info != None else True\n self.assertEqual(result, succeed, msg='断言失败')\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "from common.get_keyword import GetKeyword\nfrom common.operation_Excel import OperationExcel\nfrom common.op_database import OpDatabase\nfrom interface.login import Login\nfrom interface.address import Address\nimport unittest\nimport ddt\n\n# 测试数据\nop_excel = OperationExcel()\nadd_file = r'D:\\pyCharm\\Demo\\pycode\\Requests\\20191109\\课堂练习\\ECShop_interface\\data\\add_address.xlsx'\nmodify_file = r'D:\\pyCharm\\Demo\\pycode\\Requests\\20191109\\课堂练习\\ECShop_interface\\data\\modify_address.xlsx'\ntest_data1 = op_excel.get_data(add_file)\ntest_data2 = op_excel.get_data(modify_file)\n\n\n@ddt.ddt\nclass TestAddress(unittest.TestCase):\n # 编写test fixture\n def setUp(self) -> None:\n # 登录数据\n login_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/user/signin'\n login_data = {\"name\": \"tester\", \"password\": \"123456\"}\n # 实例化登录对象\n login = Login(url=login_url)\n self.session = login.get_session(login_data)\n self.user_id = int(GetKeyword.get_keyword(self.session, 'uid'))\n # 实例化数据操作对象\n self.op_database = OpDatabase()\n\n @classmethod\n def setUpClass(cls) -> None:\n # 清空数据信息\n op_database = OpDatabase()\n op_database.clear_mysql()\n\n @classmethod\n def tearDownClass(cls) -> None:\n # 清空数据信息\n op_database = OpDatabase()\n op_database.clear_mysql()\n\n # 编写test case\n # 添加收货地址\n @ddt.data(*test_data1)\n def test_01_add_address(self, data):\n # SQL语句\n sql = f'select * from ecs_user_address where user_id = {self.user_id}'\n # 获取收货地址表中用户地址数\n before = self.op_database.get_all(sql)\n # 添加收货地址数据\n add_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/add'\n add_data = {\n \"address\": {\"default_address\": 0, \"consignee\": f\"{data['consignee']}\", \"tel\": f\"{data['tel']}\",\n \"zipcode\": f\"{data['postcode']}\", \"country\": \"1\", \"city\": \"271\", \"id\": 0,\n \"email\": f\"{data['email']}\", \"address\": f\"{data['detail']}\",\n \"province\": \"\", \"district\": \"\", \"mobile\": \"\"}, \"session\": self.session\n }\n # 添加收货地址\n Address.add_address(url=add_url, data=add_data)\n # 获取收货地址表中用户地址数\n after = self.op_database.get_all(sql)\n result = len(after) - len(before) # 实际结果\n # 断言\n self.assertEqual(data['expect'], result, msg='断言失败')\n\n # 查看收货地址\n def test_02_check_address(self):\n # 查看收货地址数据\n url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list'\n data = {\"session\": self.session}\n # 查看收货地址\n response = Address.check_address(url, data)\n # 获取返回数据中data的值\n addr_list = Address.get_value(response, 'data') # 实际结果\n # SQL语句\n sql = f'select * from ecs_user_address where user_id = {self.user_id}'\n # 获取收货地址表中用户地址数\n sql_addr = self.op_database.get_all(sql) # 期望结果\n # 断言\n self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败')\n\n # 修改收货地址\n @ddt.data(*test_data2)\n def test_03_modify_address(self, data):\n # 读取收货地址表中的地址的address_id\n sql = f'select address_id from ecs_user_address where user_id = {self.user_id}'\n id_list = self.op_database.get_all(sql)\n # 修改收货地址数据\n url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/update'\n modify_data = {\n \"address\": {\"default_address\": 0, \"consignee\": f\"{data['consignee']}\", \"tel\": f\"{data['tel']}\",\n \"zipcode\": f\"{data['postcode']}\", \"country\": \"1\", \"city\": \"271\", \"id\": 0, \"email\": f\"{data['email']}\",\n \"address\": f\"{data['detail']}\", \"province\": \"0\", \"district\": \"0\", \"mobile\": f\"{data['mobile']}\"},\n \"address_id\": id_list[0]['address_id'], \"session\": self.session\n }\n # 修改收货地址\n response = Address.modify_address(url, modify_data)\n # 获取返回数据中的succeed\n succeed = Address.get_value(response, 'succeed')\n # 断言----缺少数据库验证代码\n self.assertEqual(data['expect'], succeed, msg='断言失败')\n\n # 删除收货地址\n def test_04_delete_address(self):\n # 读取收货地址表中的地址的address_id\n sql = f'select address_id from ecs_user_address where user_id = {self.user_id}'\n id_list = self.op_database.get_all(sql)\n # 删除收货地址数据\n url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/delete'\n delete_data = {\"address_id\": id_list[0]['address_id'], \"session\": self.session}\n # 删除收货地址\n response = Address.delete_address(url, delete_data)\n # 获取返回数据中succeed\n succeed = Address.get_value(response, 'succeed') # 实际结果\n # 查询收货地址表中该地址的信息\n sql = f\"select * from ecs_user_address where address_id = {id_list[0]['address_id']}\"\n info = self.op_database.get_one(sql)\n result = False if info != None else True # 期望结果\n # 断言\n self.assertEqual(result, succeed, msg='断言失败')\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-ids": [ 2, 8, 9, 10, 12 ] }
[ 2, 8, 9, 10, 12 ]
<|reserved_special_token_0|> def write_csv(filename, data_list): """ 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中 :param filename: 生成的csv文件名 :param data_list: [{}, {}. {}, {} ...] :return: None """ with open(filename, 'w') as f: dict_writer = csv.DictWriter(f, data_list[0].keys()) dict_writer.writeheader() dict_writer.writerows(data_list) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def read_json(filename): """ 读取json格式的文件 :param filename: json文件的文件名 :return: [{}, {}, {}, {}, {},{} ......] """ return json.loads(open(filename).read()) def write_csv(filename, data_list): """ 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中 :param filename: 生成的csv文件名 :param data_list: [{}, {}. {}, {} ...] :return: None """ with open(filename, 'w') as f: dict_writer = csv.DictWriter(f, data_list[0].keys()) dict_writer.writeheader() dict_writer.writerows(data_list) def write_csv2(filename, content_list): """ 与 write_csv 类似 :param filename: :param content_list: :return: """ with open(filename, 'w') as f: csv_writer = csv.writer(f) head_list = content_list[0].keys() data_list = [content.values() for content in content_list] csv_writer.writerow(head_list) csv_writer.writerows(data_list) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> reload(sys) sys.setdefaultencoding('utf-8') def read_json(filename): """ 读取json格式的文件 :param filename: json文件的文件名 :return: [{}, {}, {}, {}, {},{} ......] """ return json.loads(open(filename).read()) def write_csv(filename, data_list): """ 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中 :param filename: 生成的csv文件名 :param data_list: [{}, {}. {}, {} ...] :return: None """ with open(filename, 'w') as f: dict_writer = csv.DictWriter(f, data_list[0].keys()) dict_writer.writeheader() dict_writer.writerows(data_list) def write_csv2(filename, content_list): """ 与 write_csv 类似 :param filename: :param content_list: :return: """ with open(filename, 'w') as f: csv_writer = csv.writer(f) head_list = content_list[0].keys() data_list = [content.values() for content in content_list] csv_writer.writerow(head_list) csv_writer.writerows(data_list) if __name__ == '__main__': content_list = read_json('lagou_info_lin3.json') write_csv('lagou_info_lin3.csv', content_list) <|reserved_special_token_1|> import json import csv import sys reload(sys) sys.setdefaultencoding('utf-8') def read_json(filename): """ 读取json格式的文件 :param filename: json文件的文件名 :return: [{}, {}, {}, {}, {},{} ......] """ return json.loads(open(filename).read()) def write_csv(filename, data_list): """ 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中 :param filename: 生成的csv文件名 :param data_list: [{}, {}. {}, {} ...] :return: None """ with open(filename, 'w') as f: dict_writer = csv.DictWriter(f, data_list[0].keys()) dict_writer.writeheader() dict_writer.writerows(data_list) def write_csv2(filename, content_list): """ 与 write_csv 类似 :param filename: :param content_list: :return: """ with open(filename, 'w') as f: csv_writer = csv.writer(f) head_list = content_list[0].keys() data_list = [content.values() for content in content_list] csv_writer.writerow(head_list) csv_writer.writerows(data_list) if __name__ == '__main__': content_list = read_json('lagou_info_lin3.json') write_csv('lagou_info_lin3.csv', content_list) <|reserved_special_token_1|> # coding=utf-8 # __author__ = 'lyl' import json import csv import sys reload(sys) sys.setdefaultencoding('utf-8') def read_json(filename): """ 读取json格式的文件 :param filename: json文件的文件名 :return: [{}, {}, {}, {}, {},{} ......] """ return json.loads(open(filename).read()) def write_csv(filename, data_list): """ 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中 :param filename: 生成的csv文件名 :param data_list: [{}, {}. {}, {} ...] :return: None """ with open(filename,'w') as f: dict_writer = csv.DictWriter(f, data_list[0].keys()) dict_writer.writeheader() dict_writer.writerows(data_list) def write_csv2(filename, content_list): """ 与 write_csv 类似 :param filename: :param content_list: :return: """ with open(filename, 'w') as f: csv_writer = csv.writer(f) head_list = content_list[0].keys() data_list = [content.values() for content in content_list] csv_writer.writerow(head_list) csv_writer.writerows(data_list) if __name__ == "__main__": # 读出json数据内容 content_list = read_json('lagou_info_lin3.json') # 将数据写入到csv文件 write_csv( "lagou_info_lin3.csv", content_list)
flexible
{ "blob_id": "7531480f629c1b3d28210afac4ef84b06edcd420", "index": 3825, "step-1": "<mask token>\n\n\ndef write_csv(filename, data_list):\n \"\"\"\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\n :param filename: 生成的csv文件名\n :param data_list: [{}, {}. {}, {} ...]\n :return: None\n \"\"\"\n with open(filename, 'w') as f:\n dict_writer = csv.DictWriter(f, data_list[0].keys())\n dict_writer.writeheader()\n dict_writer.writerows(data_list)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef read_json(filename):\n \"\"\"\n 读取json格式的文件\n :param filename: json文件的文件名\n :return: [{}, {}, {}, {}, {},{} ......]\n \"\"\"\n return json.loads(open(filename).read())\n\n\ndef write_csv(filename, data_list):\n \"\"\"\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\n :param filename: 生成的csv文件名\n :param data_list: [{}, {}. {}, {} ...]\n :return: None\n \"\"\"\n with open(filename, 'w') as f:\n dict_writer = csv.DictWriter(f, data_list[0].keys())\n dict_writer.writeheader()\n dict_writer.writerows(data_list)\n\n\ndef write_csv2(filename, content_list):\n \"\"\"\n 与 write_csv 类似\n :param filename:\n :param content_list:\n :return:\n \"\"\"\n with open(filename, 'w') as f:\n csv_writer = csv.writer(f)\n head_list = content_list[0].keys()\n data_list = [content.values() for content in content_list]\n csv_writer.writerow(head_list)\n csv_writer.writerows(data_list)\n\n\n<mask token>\n", "step-3": "<mask token>\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\ndef read_json(filename):\n \"\"\"\n 读取json格式的文件\n :param filename: json文件的文件名\n :return: [{}, {}, {}, {}, {},{} ......]\n \"\"\"\n return json.loads(open(filename).read())\n\n\ndef write_csv(filename, data_list):\n \"\"\"\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\n :param filename: 生成的csv文件名\n :param data_list: [{}, {}. {}, {} ...]\n :return: None\n \"\"\"\n with open(filename, 'w') as f:\n dict_writer = csv.DictWriter(f, data_list[0].keys())\n dict_writer.writeheader()\n dict_writer.writerows(data_list)\n\n\ndef write_csv2(filename, content_list):\n \"\"\"\n 与 write_csv 类似\n :param filename:\n :param content_list:\n :return:\n \"\"\"\n with open(filename, 'w') as f:\n csv_writer = csv.writer(f)\n head_list = content_list[0].keys()\n data_list = [content.values() for content in content_list]\n csv_writer.writerow(head_list)\n csv_writer.writerows(data_list)\n\n\nif __name__ == '__main__':\n content_list = read_json('lagou_info_lin3.json')\n write_csv('lagou_info_lin3.csv', content_list)\n", "step-4": "import json\nimport csv\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\ndef read_json(filename):\n \"\"\"\n 读取json格式的文件\n :param filename: json文件的文件名\n :return: [{}, {}, {}, {}, {},{} ......]\n \"\"\"\n return json.loads(open(filename).read())\n\n\ndef write_csv(filename, data_list):\n \"\"\"\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\n :param filename: 生成的csv文件名\n :param data_list: [{}, {}. {}, {} ...]\n :return: None\n \"\"\"\n with open(filename, 'w') as f:\n dict_writer = csv.DictWriter(f, data_list[0].keys())\n dict_writer.writeheader()\n dict_writer.writerows(data_list)\n\n\ndef write_csv2(filename, content_list):\n \"\"\"\n 与 write_csv 类似\n :param filename:\n :param content_list:\n :return:\n \"\"\"\n with open(filename, 'w') as f:\n csv_writer = csv.writer(f)\n head_list = content_list[0].keys()\n data_list = [content.values() for content in content_list]\n csv_writer.writerow(head_list)\n csv_writer.writerows(data_list)\n\n\nif __name__ == '__main__':\n content_list = read_json('lagou_info_lin3.json')\n write_csv('lagou_info_lin3.csv', content_list)\n", "step-5": "# coding=utf-8\r\n# __author__ = 'lyl'\r\n\r\nimport json\r\nimport csv\r\n\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('utf-8')\r\n\r\n\r\ndef read_json(filename):\r\n \"\"\"\r\n 读取json格式的文件\r\n :param filename: json文件的文件名\r\n :return: [{}, {}, {}, {}, {},{} ......]\r\n \"\"\"\r\n return json.loads(open(filename).read())\r\n\r\ndef write_csv(filename, data_list):\r\n \"\"\"\r\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\r\n :param filename: 生成的csv文件名\r\n :param data_list: [{}, {}. {}, {} ...]\r\n :return: None\r\n \"\"\"\r\n with open(filename,'w') as f:\r\n dict_writer = csv.DictWriter(f, data_list[0].keys())\r\n dict_writer.writeheader()\r\n dict_writer.writerows(data_list)\r\n\r\n\r\ndef write_csv2(filename, content_list):\r\n \"\"\"\r\n 与 write_csv 类似\r\n :param filename:\r\n :param content_list:\r\n :return:\r\n \"\"\"\r\n with open(filename, 'w') as f:\r\n csv_writer = csv.writer(f)\r\n\r\n head_list = content_list[0].keys()\r\n data_list = [content.values() for content in content_list]\r\n csv_writer.writerow(head_list)\r\n csv_writer.writerows(data_list)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # 读出json数据内容\r\n content_list = read_json('lagou_info_lin3.json')\r\n # 将数据写入到csv文件\r\n write_csv( \"lagou_info_lin3.csv\", content_list)", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
<|reserved_special_token_0|> def sentiment(text): global url global headers body = {'text': text} try: r = requests.post(url, headers=headers, data=json.dumps(body)) dic = r.json() except Exception as e: print('分析失败') pass time.sleep(0.3) return dic['items'][0]['sentiment'] <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> if response: print(response.json()) <|reserved_special_token_0|> print(url) <|reserved_special_token_0|> def sentiment(text): global url global headers body = {'text': text} try: r = requests.post(url, headers=headers, data=json.dumps(body)) dic = r.json() except Exception as e: print('分析失败') pass time.sleep(0.3) return dic['items'][0]['sentiment'] tqdm.pandas() <|reserved_special_token_0|> df_new_senti.sort_values(by='author', inplace=True) <|reserved_special_token_0|> df_new_senti.to_sql(name='smzdm_senti', con=engine, if_exists='replace', index=False, dtype={'id': BIGINT, 'author': VARCHAR(length=255), 'comment': VARCHAR(length=255), 'sentiment': FLOAT(12, 10)}) <|reserved_special_token_1|> <|reserved_special_token_0|> connect_info = ( 'mysql+pymysql://root:rootroot@localhost:3306/db1?charset=UTF8MB4') engine = create_engine(connect_info) sql = """ select * from smzdm; """ df = pd.read_sql_query(sql, engine) df_new = df[df['comment'].str.len() >= 5] host = ( 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=你的client_id&client_secret=你的client_secret' ) response = requests.get(host) if response: print(response.json()) access_token = response.json()['access_token'] url = ( 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token=' + access_token) print(url) headers = {'Content-Type': 'application/json'} def sentiment(text): global url global headers body = {'text': text} try: r = requests.post(url, headers=headers, data=json.dumps(body)) dic = r.json() except Exception as e: print('分析失败') pass time.sleep(0.3) return dic['items'][0]['sentiment'] tqdm.pandas() df_new_senti = df_new.copy() df_new_senti['sentiment'] = df_new['comment'].progress_apply(sentiment) df_new_senti.sort_values(by='author', inplace=True) df_new_senti['id'] = df_new_senti.index df_new_senti.to_sql(name='smzdm_senti', con=engine, if_exists='replace', index=False, dtype={'id': BIGINT, 'author': VARCHAR(length=255), 'comment': VARCHAR(length=255), 'sentiment': FLOAT(12, 10)}) <|reserved_special_token_1|> import requests import json import pandas as pd from sqlalchemy import create_engine from sqlalchemy.types import VARCHAR, INT, FLOAT, BIGINT import time from tqdm import tqdm connect_info = ( 'mysql+pymysql://root:rootroot@localhost:3306/db1?charset=UTF8MB4') engine = create_engine(connect_info) sql = """ select * from smzdm; """ df = pd.read_sql_query(sql, engine) df_new = df[df['comment'].str.len() >= 5] host = ( 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=你的client_id&client_secret=你的client_secret' ) response = requests.get(host) if response: print(response.json()) access_token = response.json()['access_token'] url = ( 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token=' + access_token) print(url) headers = {'Content-Type': 'application/json'} def sentiment(text): global url global headers body = {'text': text} try: r = requests.post(url, headers=headers, data=json.dumps(body)) dic = r.json() except Exception as e: print('分析失败') pass time.sleep(0.3) return dic['items'][0]['sentiment'] tqdm.pandas() df_new_senti = df_new.copy() df_new_senti['sentiment'] = df_new['comment'].progress_apply(sentiment) df_new_senti.sort_values(by='author', inplace=True) df_new_senti['id'] = df_new_senti.index df_new_senti.to_sql(name='smzdm_senti', con=engine, if_exists='replace', index=False, dtype={'id': BIGINT, 'author': VARCHAR(length=255), 'comment': VARCHAR(length=255), 'sentiment': FLOAT(12, 10)}) <|reserved_special_token_1|> import requests import json import pandas as pd from sqlalchemy import create_engine from sqlalchemy.types import VARCHAR,INT,FLOAT,BIGINT import time from tqdm import tqdm #数据库联接设置 connect_info = 'mysql+pymysql://root:rootroot@localhost:3306/db1?charset=UTF8MB4' engine = create_engine(connect_info) sql = ''' select * from smzdm; ''' #从数据库中读取数据 df = pd.read_sql_query(sql, engine) #排除字数小于5的评论 df_new = df[df['comment'].str.len()>=5] #设置百度情感分析api host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=你的client_id&client_secret=你的client_secret' response = requests.get(host) if response: print(response.json()) access_token = response.json()['access_token'] url = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token='+access_token print(url) headers={'Content-Type':'application/json'} #情感分析函数 def sentiment(text): global url global headers body={'text':text} try: r = requests.post(url,headers = headers,data=json.dumps(body)) dic=r.json() except Exception as e: print('分析失败') pass time.sleep(0.3)#设置分析频率,不设置引发QPS超限额错误 return dic['items'][0]['sentiment'] tqdm.pandas() df_new_senti = df_new.copy() df_new_senti['sentiment'] = df_new['comment'].progress_apply(sentiment)#使用tqdm进度条 df_new_senti.sort_values(by='author',inplace=True) df_new_senti['id']=df_new_senti.index #保存到数据库 df_new_senti.to_sql(name = 'smzdm_senti',con = engine,if_exists = 'replace',index = False,dtype = {'id':BIGINT,'author': VARCHAR(length=255),'comment':VARCHAR(length=255),'sentiment':FLOAT(12,10)})
flexible
{ "blob_id": "a95e64877a1fc9f8109f1293b4ae9176f4f64647", "index": 3090, "step-1": "<mask token>\n\n\ndef sentiment(text):\n global url\n global headers\n body = {'text': text}\n try:\n r = requests.post(url, headers=headers, data=json.dumps(body))\n dic = r.json()\n except Exception as e:\n print('分析失败')\n pass\n time.sleep(0.3)\n return dic['items'][0]['sentiment']\n\n\n<mask token>\n", "step-2": "<mask token>\nif response:\n print(response.json())\n<mask token>\nprint(url)\n<mask token>\n\n\ndef sentiment(text):\n global url\n global headers\n body = {'text': text}\n try:\n r = requests.post(url, headers=headers, data=json.dumps(body))\n dic = r.json()\n except Exception as e:\n print('分析失败')\n pass\n time.sleep(0.3)\n return dic['items'][0]['sentiment']\n\n\ntqdm.pandas()\n<mask token>\ndf_new_senti.sort_values(by='author', inplace=True)\n<mask token>\ndf_new_senti.to_sql(name='smzdm_senti', con=engine, if_exists='replace',\n index=False, dtype={'id': BIGINT, 'author': VARCHAR(length=255),\n 'comment': VARCHAR(length=255), 'sentiment': FLOAT(12, 10)})\n", "step-3": "<mask token>\nconnect_info = (\n 'mysql+pymysql://root:rootroot@localhost:3306/db1?charset=UTF8MB4')\nengine = create_engine(connect_info)\nsql = \"\"\"\n select * from smzdm;\n \"\"\"\ndf = pd.read_sql_query(sql, engine)\ndf_new = df[df['comment'].str.len() >= 5]\nhost = (\n 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=你的client_id&client_secret=你的client_secret'\n )\nresponse = requests.get(host)\nif response:\n print(response.json())\naccess_token = response.json()['access_token']\nurl = (\n 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token='\n + access_token)\nprint(url)\nheaders = {'Content-Type': 'application/json'}\n\n\ndef sentiment(text):\n global url\n global headers\n body = {'text': text}\n try:\n r = requests.post(url, headers=headers, data=json.dumps(body))\n dic = r.json()\n except Exception as e:\n print('分析失败')\n pass\n time.sleep(0.3)\n return dic['items'][0]['sentiment']\n\n\ntqdm.pandas()\ndf_new_senti = df_new.copy()\ndf_new_senti['sentiment'] = df_new['comment'].progress_apply(sentiment)\ndf_new_senti.sort_values(by='author', inplace=True)\ndf_new_senti['id'] = df_new_senti.index\ndf_new_senti.to_sql(name='smzdm_senti', con=engine, if_exists='replace',\n index=False, dtype={'id': BIGINT, 'author': VARCHAR(length=255),\n 'comment': VARCHAR(length=255), 'sentiment': FLOAT(12, 10)})\n", "step-4": "import requests\nimport json\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.types import VARCHAR, INT, FLOAT, BIGINT\nimport time\nfrom tqdm import tqdm\nconnect_info = (\n 'mysql+pymysql://root:rootroot@localhost:3306/db1?charset=UTF8MB4')\nengine = create_engine(connect_info)\nsql = \"\"\"\n select * from smzdm;\n \"\"\"\ndf = pd.read_sql_query(sql, engine)\ndf_new = df[df['comment'].str.len() >= 5]\nhost = (\n 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=你的client_id&client_secret=你的client_secret'\n )\nresponse = requests.get(host)\nif response:\n print(response.json())\naccess_token = response.json()['access_token']\nurl = (\n 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token='\n + access_token)\nprint(url)\nheaders = {'Content-Type': 'application/json'}\n\n\ndef sentiment(text):\n global url\n global headers\n body = {'text': text}\n try:\n r = requests.post(url, headers=headers, data=json.dumps(body))\n dic = r.json()\n except Exception as e:\n print('分析失败')\n pass\n time.sleep(0.3)\n return dic['items'][0]['sentiment']\n\n\ntqdm.pandas()\ndf_new_senti = df_new.copy()\ndf_new_senti['sentiment'] = df_new['comment'].progress_apply(sentiment)\ndf_new_senti.sort_values(by='author', inplace=True)\ndf_new_senti['id'] = df_new_senti.index\ndf_new_senti.to_sql(name='smzdm_senti', con=engine, if_exists='replace',\n index=False, dtype={'id': BIGINT, 'author': VARCHAR(length=255),\n 'comment': VARCHAR(length=255), 'sentiment': FLOAT(12, 10)})\n", "step-5": "import requests \nimport json\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.types import VARCHAR,INT,FLOAT,BIGINT\nimport time\nfrom tqdm import tqdm\n#数据库联接设置\nconnect_info = 'mysql+pymysql://root:rootroot@localhost:3306/db1?charset=UTF8MB4'\nengine = create_engine(connect_info) \nsql = '''\n select * from smzdm;\n '''\n#从数据库中读取数据\ndf = pd.read_sql_query(sql, engine)\n#排除字数小于5的评论\ndf_new = df[df['comment'].str.len()>=5]\n#设置百度情感分析api\nhost = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=你的client_id&client_secret=你的client_secret'\nresponse = requests.get(host)\nif response:\n print(response.json())\naccess_token = response.json()['access_token']\nurl = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token='+access_token\nprint(url)\nheaders={'Content-Type':'application/json'}\n\n#情感分析函数\ndef sentiment(text):\n global url\n global headers\n body={'text':text}\n try:\n r = requests.post(url,headers = headers,data=json.dumps(body))\n dic=r.json()\n except Exception as e:\n print('分析失败')\n pass\n time.sleep(0.3)#设置分析频率,不设置引发QPS超限额错误\n return dic['items'][0]['sentiment']\n\ntqdm.pandas()\ndf_new_senti = df_new.copy()\ndf_new_senti['sentiment'] = df_new['comment'].progress_apply(sentiment)#使用tqdm进度条\ndf_new_senti.sort_values(by='author',inplace=True)\ndf_new_senti['id']=df_new_senti.index\n#保存到数据库\ndf_new_senti.to_sql(name = 'smzdm_senti',con = engine,if_exists = 'replace',index = False,dtype = {'id':BIGINT,'author': VARCHAR(length=255),'comment':VARCHAR(length=255),'sentiment':FLOAT(12,10)})", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
n, k = map(int, input().split()) k_list = [] for i in range(k): l, r = map(int, input().split()) k_list.append([l, r]) dp = [0] * (n + 1) dp[1] = 1 dpsum = [0] * (n + 1) dpsum[1] = 1 for i in range(1, n): dpsum[i] = dp[i] + dpsum[i - 1] for j in range(k): l, r = k_list[j] li = i + l ri = i + r + 1 if li <= n: dp[li] += dpsum[i] dp[li] = dp[li] % 998244353 if ri <= n: dp[ri] -= dpsum[i] dp[ri] = dp[ri] % 998244353 print(dp[n])
normal
{ "blob_id": "97720baab961d50ceae832d52350b9871c552c84", "index": 9071, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(k):\n l, r = map(int, input().split())\n k_list.append([l, r])\n<mask token>\nfor i in range(1, n):\n dpsum[i] = dp[i] + dpsum[i - 1]\n for j in range(k):\n l, r = k_list[j]\n li = i + l\n ri = i + r + 1\n if li <= n:\n dp[li] += dpsum[i]\n dp[li] = dp[li] % 998244353\n if ri <= n:\n dp[ri] -= dpsum[i]\n dp[ri] = dp[ri] % 998244353\nprint(dp[n])\n", "step-3": "n, k = map(int, input().split())\nk_list = []\nfor i in range(k):\n l, r = map(int, input().split())\n k_list.append([l, r])\ndp = [0] * (n + 1)\ndp[1] = 1\ndpsum = [0] * (n + 1)\ndpsum[1] = 1\nfor i in range(1, n):\n dpsum[i] = dp[i] + dpsum[i - 1]\n for j in range(k):\n l, r = k_list[j]\n li = i + l\n ri = i + r + 1\n if li <= n:\n dp[li] += dpsum[i]\n dp[li] = dp[li] % 998244353\n if ri <= n:\n dp[ri] -= dpsum[i]\n dp[ri] = dp[ri] % 998244353\nprint(dp[n])\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import pymysql db = pymysql.connect( "localhost", "root", "", "order_db", use_unicode=True, charset="utf8") cursor = db.cursor() sql = "DROP TABLE custdetail" cursor.execute(sql) db.close()
normal
{ "blob_id": "1aa2bff245322a34438cc836e23f430926dfac6c", "index": 3414, "step-1": "<mask token>\n", "step-2": "<mask token>\ncursor.execute(sql)\ndb.close()\n", "step-3": "<mask token>\ndb = pymysql.connect('localhost', 'root', '', 'order_db', use_unicode=True,\n charset='utf8')\ncursor = db.cursor()\nsql = 'DROP TABLE custdetail'\ncursor.execute(sql)\ndb.close()\n", "step-4": "import pymysql\ndb = pymysql.connect('localhost', 'root', '', 'order_db', use_unicode=True,\n charset='utf8')\ncursor = db.cursor()\nsql = 'DROP TABLE custdetail'\ncursor.execute(sql)\ndb.close()\n", "step-5": "import pymysql\ndb = pymysql.connect( \"localhost\", \"root\", \"\", \"order_db\",\n use_unicode=True, charset=\"utf8\") \ncursor = db.cursor()\nsql = \"DROP TABLE custdetail\"\ncursor.execute(sql)\ndb.close()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
def mysum(*c): print(sum([x for x in c])) mysum(1,2,3,4,0xB)
normal
{ "blob_id": "2c4fa92b28fa46a26f21ada8826474baac204e00", "index": 1234, "step-1": "<mask token>\n", "step-2": "def mysum(*c):\n print(sum([x for x in c]))\n\n\n<mask token>\n", "step-3": "def mysum(*c):\n print(sum([x for x in c]))\n\n\nmysum(1, 2, 3, 4, 11)\n", "step-4": "def mysum(*c):\n print(sum([x for x in c]))\n\nmysum(1,2,3,4,0xB)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> parser.add_argument('nex', help='path of the .nex file to be launched') parser.add_argument('file', help='autoexec.bas file to be generated') <|reserved_special_token_0|> contents += bytearray((0, 10)) contents += struct.pack('<H', len(command)) contents += command.encode('ASCII') <|reserved_special_token_0|> with open(args.file, 'wb') as f: f.write(contents) <|reserved_special_token_1|> <|reserved_special_token_0|> parser = argparse.ArgumentParser(description= 'Generate an autoexec.bas that launches a .nex file') parser.add_argument('nex', help='path of the .nex file to be launched') parser.add_argument('file', help='autoexec.bas file to be generated') args = parser.parse_args() command = '.nexload ' + args.nex + '\r' contents = bytearray(128) contents[0:8] = 'PLUS3DOS'.encode('ASCII') contents[8] = 26 contents[9:11] = [1, 0] contents += bytearray((0, 10)) contents += struct.pack('<H', len(command)) contents += command.encode('ASCII') programLength = len(contents) - 128 contents[15] = 0 contents[16:18] = struct.pack('<H', programLength) contents[18:20] = struct.pack('<H', 10) contents[20:22] = struct.pack('<H', programLength) contents[11:15] = struct.pack('<L', len(contents)) contents[127] = sum(contents[0:126]) & 255 with open(args.file, 'wb') as f: f.write(contents) <|reserved_special_token_1|> import argparse import struct import sys parser = argparse.ArgumentParser(description= 'Generate an autoexec.bas that launches a .nex file') parser.add_argument('nex', help='path of the .nex file to be launched') parser.add_argument('file', help='autoexec.bas file to be generated') args = parser.parse_args() command = '.nexload ' + args.nex + '\r' contents = bytearray(128) contents[0:8] = 'PLUS3DOS'.encode('ASCII') contents[8] = 26 contents[9:11] = [1, 0] contents += bytearray((0, 10)) contents += struct.pack('<H', len(command)) contents += command.encode('ASCII') programLength = len(contents) - 128 contents[15] = 0 contents[16:18] = struct.pack('<H', programLength) contents[18:20] = struct.pack('<H', 10) contents[20:22] = struct.pack('<H', programLength) contents[11:15] = struct.pack('<L', len(contents)) contents[127] = sum(contents[0:126]) & 255 with open(args.file, 'wb') as f: f.write(contents) <|reserved_special_token_1|> #!/usr/bin/env python3 # # nextskeleton - An assembler skeleton for the ZX Spectrum Next # # Copyright (C) 2020 Richard "Shred" Körber # https://github.com/shred/nextskeleton # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import struct import sys parser = argparse.ArgumentParser(description='Generate an autoexec.bas that launches a .nex file') parser.add_argument('nex', help='path of the .nex file to be launched') parser.add_argument('file', help='autoexec.bas file to be generated') args = parser.parse_args() command = '.nexload ' + args.nex + '\r' contents = bytearray(128) contents[0:8] = 'PLUS3DOS'.encode('ASCII') # +3DOS signature contents[8] = 0x1A contents[9:11] = [0x01, 0x00] # Issue and Version contents += bytearray((0x00, 0x0A)) # Line number 10 contents += struct.pack('<H', len(command)) # Line length contents += command.encode('ASCII') # BASIC line programLength = len(contents) - 128 # Length of the BASIC program contents[15] = 0x00 # DOS header: PROGRAM contents[16:18] = struct.pack('<H', programLength) # DOS header: length contents[18:20] = struct.pack('<H', 10) # DOS header: run at line 10 contents[20:22] = struct.pack('<H', programLength) # DOS header: offset to prog contents[11:15] = struct.pack('<L', len(contents)) # Set total length contents[127] = sum(contents[0:126]) & 0xFF # Compute checksum with open(args.file, 'wb') as f: f.write(contents)
flexible
{ "blob_id": "0744ec646e7b9303c67c25dff2997568c6171b91", "index": 108, "step-1": "<mask token>\n", "step-2": "<mask token>\nparser.add_argument('nex', help='path of the .nex file to be launched')\nparser.add_argument('file', help='autoexec.bas file to be generated')\n<mask token>\ncontents += bytearray((0, 10))\ncontents += struct.pack('<H', len(command))\ncontents += command.encode('ASCII')\n<mask token>\nwith open(args.file, 'wb') as f:\n f.write(contents)\n", "step-3": "<mask token>\nparser = argparse.ArgumentParser(description=\n 'Generate an autoexec.bas that launches a .nex file')\nparser.add_argument('nex', help='path of the .nex file to be launched')\nparser.add_argument('file', help='autoexec.bas file to be generated')\nargs = parser.parse_args()\ncommand = '.nexload ' + args.nex + '\\r'\ncontents = bytearray(128)\ncontents[0:8] = 'PLUS3DOS'.encode('ASCII')\ncontents[8] = 26\ncontents[9:11] = [1, 0]\ncontents += bytearray((0, 10))\ncontents += struct.pack('<H', len(command))\ncontents += command.encode('ASCII')\nprogramLength = len(contents) - 128\ncontents[15] = 0\ncontents[16:18] = struct.pack('<H', programLength)\ncontents[18:20] = struct.pack('<H', 10)\ncontents[20:22] = struct.pack('<H', programLength)\ncontents[11:15] = struct.pack('<L', len(contents))\ncontents[127] = sum(contents[0:126]) & 255\nwith open(args.file, 'wb') as f:\n f.write(contents)\n", "step-4": "import argparse\nimport struct\nimport sys\nparser = argparse.ArgumentParser(description=\n 'Generate an autoexec.bas that launches a .nex file')\nparser.add_argument('nex', help='path of the .nex file to be launched')\nparser.add_argument('file', help='autoexec.bas file to be generated')\nargs = parser.parse_args()\ncommand = '.nexload ' + args.nex + '\\r'\ncontents = bytearray(128)\ncontents[0:8] = 'PLUS3DOS'.encode('ASCII')\ncontents[8] = 26\ncontents[9:11] = [1, 0]\ncontents += bytearray((0, 10))\ncontents += struct.pack('<H', len(command))\ncontents += command.encode('ASCII')\nprogramLength = len(contents) - 128\ncontents[15] = 0\ncontents[16:18] = struct.pack('<H', programLength)\ncontents[18:20] = struct.pack('<H', 10)\ncontents[20:22] = struct.pack('<H', programLength)\ncontents[11:15] = struct.pack('<L', len(contents))\ncontents[127] = sum(contents[0:126]) & 255\nwith open(args.file, 'wb') as f:\n f.write(contents)\n", "step-5": "#!/usr/bin/env python3\n#\n# nextskeleton - An assembler skeleton for the ZX Spectrum Next\n#\n# Copyright (C) 2020 Richard \"Shred\" Körber\n# https://github.com/shred/nextskeleton\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport argparse\nimport struct\nimport sys\n\nparser = argparse.ArgumentParser(description='Generate an autoexec.bas that launches a .nex file')\nparser.add_argument('nex',\n help='path of the .nex file to be launched')\nparser.add_argument('file',\n help='autoexec.bas file to be generated')\nargs = parser.parse_args()\n\ncommand = '.nexload ' + args.nex + '\\r'\n\ncontents = bytearray(128)\ncontents[0:8] = 'PLUS3DOS'.encode('ASCII') # +3DOS signature\ncontents[8] = 0x1A\ncontents[9:11] = [0x01, 0x00] # Issue and Version\n\ncontents += bytearray((0x00, 0x0A)) # Line number 10\ncontents += struct.pack('<H', len(command)) # Line length\ncontents += command.encode('ASCII') # BASIC line\nprogramLength = len(contents) - 128 # Length of the BASIC program\n\ncontents[15] = 0x00 # DOS header: PROGRAM\ncontents[16:18] = struct.pack('<H', programLength) # DOS header: length\ncontents[18:20] = struct.pack('<H', 10) # DOS header: run at line 10\ncontents[20:22] = struct.pack('<H', programLength) # DOS header: offset to prog\ncontents[11:15] = struct.pack('<L', len(contents)) # Set total length\ncontents[127] = sum(contents[0:126]) & 0xFF # Compute checksum\n\nwith open(args.file, 'wb') as f:\n f.write(contents)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def factor(n): result = [] d = 2 while d * d <= n: if n % d == 0: result.append(d) n //= d else: d += 1 if n > 1: result.append(n) return result def get_coeff(period): c = randint(0, period) while gcd(c, period) != 1: c += 1 b = 2 a = None factor_result = factor(period) while b <= period: if all([(b % p == 0) for p in factor_result]): if period % 4 == 0: if b % 4 == 0: a = b + 1 break else: a = b + 1 break b += 1 return a, c, randint(2, period) def gen_linear_congruential(period): coeff_a, coeff_c, x0 = get_coeff(period) result = [x0] for i in range(1, period): result.append((coeff_a * result[i - 1] + coeff_c) % period) return result def LFG(init, lst, m, count): result = init.copy() for i in range(len(init), count): result.append(sum([result[len(result) - j] for j in lst]) % 2 ** m) return result <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def factor(n): result = [] d = 2 while d * d <= n: if n % d == 0: result.append(d) n //= d else: d += 1 if n > 1: result.append(n) return result def get_coeff(period): c = randint(0, period) while gcd(c, period) != 1: c += 1 b = 2 a = None factor_result = factor(period) while b <= period: if all([(b % p == 0) for p in factor_result]): if period % 4 == 0: if b % 4 == 0: a = b + 1 break else: a = b + 1 break b += 1 return a, c, randint(2, period) def gen_linear_congruential(period): coeff_a, coeff_c, x0 = get_coeff(period) result = [x0] for i in range(1, period): result.append((coeff_a * result[i - 1] + coeff_c) % period) return result def LFG(init, lst, m, count): result = init.copy() for i in range(len(init), count): result.append(sum([result[len(result) - j] for j in lst]) % 2 ** m) return result <|reserved_special_token_0|> if not delays: delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]]) k = delays[1] + 10 m = 8 print(f'delays = {delays}, k = {k}, m = {m}') else: delays = [int(item) for item in delays.split()] k = int(input('Длина начального заполнения: ')) m = int(input('Модуль: ')) <|reserved_special_token_0|> print(LFG(initial_filling, delays, m, 1000)) <|reserved_special_token_1|> <|reserved_special_token_0|> task = """6. Реализовать алгоритм построения ПСП методом Фиббоначи с запаздываниями. Обосновать выбор коэффициентов алгоритма. Для начального заполнения использовать стандартную линейную конгруэнтную ПСП с выбранным периодом. Реализовать возможность для пользователя вводить коэффициенты заранее.""" def factor(n): result = [] d = 2 while d * d <= n: if n % d == 0: result.append(d) n //= d else: d += 1 if n > 1: result.append(n) return result def get_coeff(period): c = randint(0, period) while gcd(c, period) != 1: c += 1 b = 2 a = None factor_result = factor(period) while b <= period: if all([(b % p == 0) for p in factor_result]): if period % 4 == 0: if b % 4 == 0: a = b + 1 break else: a = b + 1 break b += 1 return a, c, randint(2, period) def gen_linear_congruential(period): coeff_a, coeff_c, x0 = get_coeff(period) result = [x0] for i in range(1, period): result.append((coeff_a * result[i - 1] + coeff_c) % period) return result def LFG(init, lst, m, count): result = init.copy() for i in range(len(init), count): result.append(sum([result[len(result) - j] for j in lst]) % 2 ** m) return result delays = input('Параметры запаздывания: ') if not delays: delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]]) k = delays[1] + 10 m = 8 print(f'delays = {delays}, k = {k}, m = {m}') else: delays = [int(item) for item in delays.split()] k = int(input('Длина начального заполнения: ')) m = int(input('Модуль: ')) initial_filling = gen_linear_congruential(k) print(LFG(initial_filling, delays, m, 1000)) <|reserved_special_token_1|> from math import gcd from random import randint, choice task = """6. Реализовать алгоритм построения ПСП методом Фиббоначи с запаздываниями. Обосновать выбор коэффициентов алгоритма. Для начального заполнения использовать стандартную линейную конгруэнтную ПСП с выбранным периодом. Реализовать возможность для пользователя вводить коэффициенты заранее.""" def factor(n): result = [] d = 2 while d * d <= n: if n % d == 0: result.append(d) n //= d else: d += 1 if n > 1: result.append(n) return result def get_coeff(period): c = randint(0, period) while gcd(c, period) != 1: c += 1 b = 2 a = None factor_result = factor(period) while b <= period: if all([(b % p == 0) for p in factor_result]): if period % 4 == 0: if b % 4 == 0: a = b + 1 break else: a = b + 1 break b += 1 return a, c, randint(2, period) def gen_linear_congruential(period): coeff_a, coeff_c, x0 = get_coeff(period) result = [x0] for i in range(1, period): result.append((coeff_a * result[i - 1] + coeff_c) % period) return result def LFG(init, lst, m, count): result = init.copy() for i in range(len(init), count): result.append(sum([result[len(result) - j] for j in lst]) % 2 ** m) return result delays = input('Параметры запаздывания: ') if not delays: delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]]) k = delays[1] + 10 m = 8 print(f'delays = {delays}, k = {k}, m = {m}') else: delays = [int(item) for item in delays.split()] k = int(input('Длина начального заполнения: ')) m = int(input('Модуль: ')) initial_filling = gen_linear_congruential(k) print(LFG(initial_filling, delays, m, 1000)) <|reserved_special_token_1|> from math import gcd from random import randint, choice task = """6. Реализовать алгоритм построения ПСП методом Фиббоначи с запаздываниями. Обосновать выбор коэффициентов алгоритма. Для начального заполнения использовать стандартную линейную конгруэнтную ПСП с выбранным периодом. Реализовать возможность для пользователя вводить коэффициенты заранее.""" def factor(n): result = [] d = 2 while d * d <= n: if n % d == 0: result.append(d) n //= d else: d += 1 if n > 1: result.append(n) return result def get_coeff(period): c = randint(0, period) while gcd(c, period) != 1: c += 1 b = 2 a = None factor_result = factor(period) while b <= period: if all([b % p == 0 for p in factor_result]): if period % 4 == 0: if b % 4 == 0: a = b + 1 break else: a = b + 1 break b += 1 return a, c, randint(2, period) def gen_linear_congruential(period): coeff_a, coeff_c, x0 = get_coeff(period) result = [x0] for i in range(1, period): result.append((coeff_a * result[i - 1] + coeff_c) % period) return result def LFG(init, lst, m, count): result = init.copy() for i in range(len(init), count): result.append(sum([result[len(result) - j] for j in lst]) % (2 ** m)) return result delays = input("Параметры запаздывания: ") if not delays: # y = x^k + x^j + 1 must be primitive delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]]) k = delays[1] + 10 m = 8 print(f"delays = {delays}, k = {k}, m = {m}") else: delays = [int(item) for item in delays.split()] k = int(input("Длина начального заполнения: ")) m = int(input("Модуль: ")) initial_filling = gen_linear_congruential(k) print(LFG(initial_filling, delays, m, 1000))
flexible
{ "blob_id": "11e9d25c30c8c9945cfa3c234ffa1aab98d1869e", "index": 8023, "step-1": "<mask token>\n\n\ndef factor(n):\n result = []\n d = 2\n while d * d <= n:\n if n % d == 0:\n result.append(d)\n n //= d\n else:\n d += 1\n if n > 1:\n result.append(n)\n return result\n\n\ndef get_coeff(period):\n c = randint(0, period)\n while gcd(c, period) != 1:\n c += 1\n b = 2\n a = None\n factor_result = factor(period)\n while b <= period:\n if all([(b % p == 0) for p in factor_result]):\n if period % 4 == 0:\n if b % 4 == 0:\n a = b + 1\n break\n else:\n a = b + 1\n break\n b += 1\n return a, c, randint(2, period)\n\n\ndef gen_linear_congruential(period):\n coeff_a, coeff_c, x0 = get_coeff(period)\n result = [x0]\n for i in range(1, period):\n result.append((coeff_a * result[i - 1] + coeff_c) % period)\n return result\n\n\ndef LFG(init, lst, m, count):\n result = init.copy()\n for i in range(len(init), count):\n result.append(sum([result[len(result) - j] for j in lst]) % 2 ** m)\n return result\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef factor(n):\n result = []\n d = 2\n while d * d <= n:\n if n % d == 0:\n result.append(d)\n n //= d\n else:\n d += 1\n if n > 1:\n result.append(n)\n return result\n\n\ndef get_coeff(period):\n c = randint(0, period)\n while gcd(c, period) != 1:\n c += 1\n b = 2\n a = None\n factor_result = factor(period)\n while b <= period:\n if all([(b % p == 0) for p in factor_result]):\n if period % 4 == 0:\n if b % 4 == 0:\n a = b + 1\n break\n else:\n a = b + 1\n break\n b += 1\n return a, c, randint(2, period)\n\n\ndef gen_linear_congruential(period):\n coeff_a, coeff_c, x0 = get_coeff(period)\n result = [x0]\n for i in range(1, period):\n result.append((coeff_a * result[i - 1] + coeff_c) % period)\n return result\n\n\ndef LFG(init, lst, m, count):\n result = init.copy()\n for i in range(len(init), count):\n result.append(sum([result[len(result) - j] for j in lst]) % 2 ** m)\n return result\n\n\n<mask token>\nif not delays:\n delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]])\n k = delays[1] + 10\n m = 8\n print(f'delays = {delays}, k = {k}, m = {m}')\nelse:\n delays = [int(item) for item in delays.split()]\n k = int(input('Длина начального заполнения: '))\n m = int(input('Модуль: '))\n<mask token>\nprint(LFG(initial_filling, delays, m, 1000))\n", "step-3": "<mask token>\ntask = \"\"\"6. Реализовать алгоритм построения ПСП методом Фиббоначи с\nзапаздываниями. Обосновать выбор коэффициентов алгоритма. Для\nначального заполнения использовать стандартную линейную конгруэнтную\nПСП с выбранным периодом. Реализовать возможность для пользователя\nвводить коэффициенты заранее.\"\"\"\n\n\ndef factor(n):\n result = []\n d = 2\n while d * d <= n:\n if n % d == 0:\n result.append(d)\n n //= d\n else:\n d += 1\n if n > 1:\n result.append(n)\n return result\n\n\ndef get_coeff(period):\n c = randint(0, period)\n while gcd(c, period) != 1:\n c += 1\n b = 2\n a = None\n factor_result = factor(period)\n while b <= period:\n if all([(b % p == 0) for p in factor_result]):\n if period % 4 == 0:\n if b % 4 == 0:\n a = b + 1\n break\n else:\n a = b + 1\n break\n b += 1\n return a, c, randint(2, period)\n\n\ndef gen_linear_congruential(period):\n coeff_a, coeff_c, x0 = get_coeff(period)\n result = [x0]\n for i in range(1, period):\n result.append((coeff_a * result[i - 1] + coeff_c) % period)\n return result\n\n\ndef LFG(init, lst, m, count):\n result = init.copy()\n for i in range(len(init), count):\n result.append(sum([result[len(result) - j] for j in lst]) % 2 ** m)\n return result\n\n\ndelays = input('Параметры запаздывания: ')\nif not delays:\n delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]])\n k = delays[1] + 10\n m = 8\n print(f'delays = {delays}, k = {k}, m = {m}')\nelse:\n delays = [int(item) for item in delays.split()]\n k = int(input('Длина начального заполнения: '))\n m = int(input('Модуль: '))\ninitial_filling = gen_linear_congruential(k)\nprint(LFG(initial_filling, delays, m, 1000))\n", "step-4": "from math import gcd\nfrom random import randint, choice\ntask = \"\"\"6. Реализовать алгоритм построения ПСП методом Фиббоначи с\nзапаздываниями. Обосновать выбор коэффициентов алгоритма. Для\nначального заполнения использовать стандартную линейную конгруэнтную\nПСП с выбранным периодом. Реализовать возможность для пользователя\nвводить коэффициенты заранее.\"\"\"\n\n\ndef factor(n):\n result = []\n d = 2\n while d * d <= n:\n if n % d == 0:\n result.append(d)\n n //= d\n else:\n d += 1\n if n > 1:\n result.append(n)\n return result\n\n\ndef get_coeff(period):\n c = randint(0, period)\n while gcd(c, period) != 1:\n c += 1\n b = 2\n a = None\n factor_result = factor(period)\n while b <= period:\n if all([(b % p == 0) for p in factor_result]):\n if period % 4 == 0:\n if b % 4 == 0:\n a = b + 1\n break\n else:\n a = b + 1\n break\n b += 1\n return a, c, randint(2, period)\n\n\ndef gen_linear_congruential(period):\n coeff_a, coeff_c, x0 = get_coeff(period)\n result = [x0]\n for i in range(1, period):\n result.append((coeff_a * result[i - 1] + coeff_c) % period)\n return result\n\n\ndef LFG(init, lst, m, count):\n result = init.copy()\n for i in range(len(init), count):\n result.append(sum([result[len(result) - j] for j in lst]) % 2 ** m)\n return result\n\n\ndelays = input('Параметры запаздывания: ')\nif not delays:\n delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]])\n k = delays[1] + 10\n m = 8\n print(f'delays = {delays}, k = {k}, m = {m}')\nelse:\n delays = [int(item) for item in delays.split()]\n k = int(input('Длина начального заполнения: '))\n m = int(input('Модуль: '))\ninitial_filling = gen_linear_congruential(k)\nprint(LFG(initial_filling, delays, m, 1000))\n", "step-5": "from math import gcd\nfrom random import randint, choice\n\ntask = \"\"\"6. Реализовать алгоритм построения ПСП методом Фиббоначи с\nзапаздываниями. Обосновать выбор коэффициентов алгоритма. Для\nначального заполнения использовать стандартную линейную конгруэнтную\nПСП с выбранным периодом. Реализовать возможность для пользователя\nвводить коэффициенты заранее.\"\"\"\n\n\ndef factor(n):\n result = []\n d = 2\n while d * d <= n:\n if n % d == 0:\n result.append(d)\n n //= d\n else:\n d += 1\n if n > 1:\n result.append(n)\n return result\n\n\ndef get_coeff(period):\n c = randint(0, period)\n while gcd(c, period) != 1:\n c += 1\n b = 2\n a = None\n factor_result = factor(period)\n while b <= period:\n if all([b % p == 0 for p in factor_result]):\n if period % 4 == 0:\n if b % 4 == 0:\n a = b + 1\n break\n else:\n a = b + 1\n break\n b += 1\n return a, c, randint(2, period)\n\n\ndef gen_linear_congruential(period):\n coeff_a, coeff_c, x0 = get_coeff(period)\n result = [x0]\n for i in range(1, period):\n result.append((coeff_a * result[i - 1] + coeff_c) % period)\n return result\n\n\ndef LFG(init, lst, m, count):\n result = init.copy()\n for i in range(len(init), count):\n result.append(sum([result[len(result) - j] for j in lst]) % (2 ** m))\n return result\n\n\ndelays = input(\"Параметры запаздывания: \")\nif not delays:\n # y = x^k + x^j + 1 must be primitive\n delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]])\n k = delays[1] + 10\n m = 8\n print(f\"delays = {delays}, k = {k}, m = {m}\")\nelse:\n delays = [int(item) for item in delays.split()]\n k = int(input(\"Длина начального заполнения: \"))\n m = int(input(\"Модуль: \"))\ninitial_filling = gen_linear_congruential(k)\nprint(LFG(initial_filling, delays, m, 1000))\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def GenTests(api): yield api.test('basic') <|reserved_special_token_1|> <|reserved_special_token_0|> def RunSteps(api): try: api.step('test step', [{}]) except AssertionError as e: assert str(e) == "Type <type 'dict'> is not permitted. cmd is [{}]" def GenTests(api): yield api.test('basic') <|reserved_special_token_1|> DEPS = ['step'] def RunSteps(api): try: api.step('test step', [{}]) except AssertionError as e: assert str(e) == "Type <type 'dict'> is not permitted. cmd is [{}]" def GenTests(api): yield api.test('basic') <|reserved_special_token_1|> # Copyright 2017 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. DEPS = [ 'step', ] def RunSteps(api): try: api.step('test step', [{}]) except AssertionError as e: assert str(e) == 'Type <type \'dict\'> is not permitted. cmd is [{}]' def GenTests(api): yield api.test('basic')
flexible
{ "blob_id": "25d210144ef209fd5e4ff7e4e4c2e77fd7eb79ac", "index": 3480, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef GenTests(api):\n yield api.test('basic')\n", "step-3": "<mask token>\n\n\ndef RunSteps(api):\n try:\n api.step('test step', [{}])\n except AssertionError as e:\n assert str(e) == \"Type <type 'dict'> is not permitted. cmd is [{}]\"\n\n\ndef GenTests(api):\n yield api.test('basic')\n", "step-4": "DEPS = ['step']\n\n\ndef RunSteps(api):\n try:\n api.step('test step', [{}])\n except AssertionError as e:\n assert str(e) == \"Type <type 'dict'> is not permitted. cmd is [{}]\"\n\n\ndef GenTests(api):\n yield api.test('basic')\n", "step-5": "# Copyright 2017 The LUCI Authors. All rights reserved.\n# Use of this source code is governed under the Apache License, Version 2.0\n# that can be found in the LICENSE file.\n\nDEPS = [\n 'step',\n]\n\n\ndef RunSteps(api):\n try:\n api.step('test step', [{}])\n except AssertionError as e:\n assert str(e) == 'Type <type \\'dict\\'> is not permitted. cmd is [{}]'\n\n\ndef GenTests(api):\n yield api.test('basic')", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> pygame.init() <|reserved_special_token_0|> pygame.display.set_caption('Social Force Model - Crosswalk') <|reserved_special_token_0|> for line in open(WALLSFILE, newline='', encoding='utf-8-sig'): coords = line.split(',') wall = [] wall.append(float(coords[0])) wall.append(float(coords[1])) wall.append(float(coords[2])) wall.append(float(coords[3])) walls.append(wall) <|reserved_special_token_0|> for n in range(AGENTSNUM): group_id = int(n / MAXGROUPSIZE) subgroup_id = int(n % MAXGROUPSIZE / MAXSUBGROUPSIZE) if n % MAXGROUPSIZE == 0: agents.append([]) if n % MAXSUBGROUPSIZE == 0: agents[group_id].append([]) agent = Agent(n, group_id, subgroup_id) agents[group_id][subgroup_id].append(agent) <|reserved_special_token_0|> while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.MOUSEBUTTONDOWN: mouseX, mouseY = pygame.mouse.get_pos() screen.fill(BACKGROUNDCOLOR) for wall in walls: startPos = np.array([wall[0], wall[1]]) endPos = np.array([wall[2], wall[3]]) startPx = startPos * 10 endPx = endPos * 10 pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx. astype(int)) for group in agents: for subgroup in group: for agent in subgroup: agent.direction = normalize(agent.dest - agent.pos) agent.desiredV = agent.desiredSpeed * agent.direction adapt = agent.adaptVel() peopleInter = 0.0 wallInter = 0.0 groupVis = 0.0 groupAtt = 0.0 ownGroupRep = 0.0 otherGroupRep = 0.0 for wall in walls: wallInter += agent.wallInteraction(wall) for groupj in agents: for subgroupj in groupj: for agentj in subgroupj: if agent.agentId != agentj.agentId: peopleInter += agent.peopleInteraction(agentj) agentGroup = [] for sub in group: for mem in sub: if mem.agentId != agent.agentId: agentGroup.append(mem) if len(agentGroup) > 0: groupVis = agent.groupVisual(agentGroup) groupAtt = agent.groupAttraction(agentGroup + [agent]) for agentj in agentGroup: ownGroupRep += agent.ownGroupRepulsion(agentj) groupInter = groupVis + groupAtt + ownGroupRep for gid, g in enumerate(agents): if gid != agent.groupId: otherGroup = [] for sub in g: otherGroup += sub otherGroupRep += agent.otherGroupRepulsion(otherGroup) subgroupForce = agent.subgroupForces(group) sumForce = adapt + wallInter + peopleInter + groupInter accl = sumForce / agent.mass agent.actualV = agent.actualV + accl * 0.5 agent.pos = agent.pos + agent.actualV * 0.5 if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent. Goal == 0): agent.Goal = 1 agent.timeOut = pygame.time.get_ticks() print('Agent ', agent.agentId, 'reached goal at ', agent.timeOut) for group in agents: for subgroup in group: for agent in subgroup: scPos = (agent.pos * 10).astype(int) endPos = ((agent.pos + agent.actualV) * 10).astype(int) endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int) pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE, AGENTTHICKNESS) pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3) pygame.draw.line(screen, agent.color, scPos, endPos, 2) pygame.draw.line(screen, [255, 60, 0], scPos, endPosDV, 2) pygame.display.flip() clock.tick(20) <|reserved_special_token_1|> <|reserved_special_token_0|> SCREENSIZE = [1200, 400] RESOLUTION = 180 AGENTSNUM = 12 GROUPSNUM = 2 MAXGROUPSIZE = 6 MAXSUBGROUPSIZE = 3 BACKGROUNDCOLOR = [255, 255, 255] LINECOLOR = [255, 0, 0] AGENTSIZE = 9 AGENTTHICKNESS = 3 WALLSFILE = 'walls.csv' pygame.init() screen = pygame.display.set_mode(SCREENSIZE) pygame.display.set_caption('Social Force Model - Crosswalk') clock = pygame.time.Clock() walls = [] for line in open(WALLSFILE, newline='', encoding='utf-8-sig'): coords = line.split(',') wall = [] wall.append(float(coords[0])) wall.append(float(coords[1])) wall.append(float(coords[2])) wall.append(float(coords[3])) walls.append(wall) agents = [] for n in range(AGENTSNUM): group_id = int(n / MAXGROUPSIZE) subgroup_id = int(n % MAXGROUPSIZE / MAXSUBGROUPSIZE) if n % MAXGROUPSIZE == 0: agents.append([]) if n % MAXSUBGROUPSIZE == 0: agents[group_id].append([]) agent = Agent(n, group_id, subgroup_id) agents[group_id][subgroup_id].append(agent) running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.MOUSEBUTTONDOWN: mouseX, mouseY = pygame.mouse.get_pos() screen.fill(BACKGROUNDCOLOR) for wall in walls: startPos = np.array([wall[0], wall[1]]) endPos = np.array([wall[2], wall[3]]) startPx = startPos * 10 endPx = endPos * 10 pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx. astype(int)) for group in agents: for subgroup in group: for agent in subgroup: agent.direction = normalize(agent.dest - agent.pos) agent.desiredV = agent.desiredSpeed * agent.direction adapt = agent.adaptVel() peopleInter = 0.0 wallInter = 0.0 groupVis = 0.0 groupAtt = 0.0 ownGroupRep = 0.0 otherGroupRep = 0.0 for wall in walls: wallInter += agent.wallInteraction(wall) for groupj in agents: for subgroupj in groupj: for agentj in subgroupj: if agent.agentId != agentj.agentId: peopleInter += agent.peopleInteraction(agentj) agentGroup = [] for sub in group: for mem in sub: if mem.agentId != agent.agentId: agentGroup.append(mem) if len(agentGroup) > 0: groupVis = agent.groupVisual(agentGroup) groupAtt = agent.groupAttraction(agentGroup + [agent]) for agentj in agentGroup: ownGroupRep += agent.ownGroupRepulsion(agentj) groupInter = groupVis + groupAtt + ownGroupRep for gid, g in enumerate(agents): if gid != agent.groupId: otherGroup = [] for sub in g: otherGroup += sub otherGroupRep += agent.otherGroupRepulsion(otherGroup) subgroupForce = agent.subgroupForces(group) sumForce = adapt + wallInter + peopleInter + groupInter accl = sumForce / agent.mass agent.actualV = agent.actualV + accl * 0.5 agent.pos = agent.pos + agent.actualV * 0.5 if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent. Goal == 0): agent.Goal = 1 agent.timeOut = pygame.time.get_ticks() print('Agent ', agent.agentId, 'reached goal at ', agent.timeOut) for group in agents: for subgroup in group: for agent in subgroup: scPos = (agent.pos * 10).astype(int) endPos = ((agent.pos + agent.actualV) * 10).astype(int) endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int) pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE, AGENTTHICKNESS) pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3) pygame.draw.line(screen, agent.color, scPos, endPos, 2) pygame.draw.line(screen, [255, 60, 0], scPos, endPosDV, 2) pygame.display.flip() clock.tick(20) <|reserved_special_token_1|> import pygame import pygame.draw import numpy as np from agent import * from tools import * SCREENSIZE = [1200, 400] RESOLUTION = 180 AGENTSNUM = 12 GROUPSNUM = 2 MAXGROUPSIZE = 6 MAXSUBGROUPSIZE = 3 BACKGROUNDCOLOR = [255, 255, 255] LINECOLOR = [255, 0, 0] AGENTSIZE = 9 AGENTTHICKNESS = 3 WALLSFILE = 'walls.csv' pygame.init() screen = pygame.display.set_mode(SCREENSIZE) pygame.display.set_caption('Social Force Model - Crosswalk') clock = pygame.time.Clock() walls = [] for line in open(WALLSFILE, newline='', encoding='utf-8-sig'): coords = line.split(',') wall = [] wall.append(float(coords[0])) wall.append(float(coords[1])) wall.append(float(coords[2])) wall.append(float(coords[3])) walls.append(wall) agents = [] for n in range(AGENTSNUM): group_id = int(n / MAXGROUPSIZE) subgroup_id = int(n % MAXGROUPSIZE / MAXSUBGROUPSIZE) if n % MAXGROUPSIZE == 0: agents.append([]) if n % MAXSUBGROUPSIZE == 0: agents[group_id].append([]) agent = Agent(n, group_id, subgroup_id) agents[group_id][subgroup_id].append(agent) running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.MOUSEBUTTONDOWN: mouseX, mouseY = pygame.mouse.get_pos() screen.fill(BACKGROUNDCOLOR) for wall in walls: startPos = np.array([wall[0], wall[1]]) endPos = np.array([wall[2], wall[3]]) startPx = startPos * 10 endPx = endPos * 10 pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx. astype(int)) for group in agents: for subgroup in group: for agent in subgroup: agent.direction = normalize(agent.dest - agent.pos) agent.desiredV = agent.desiredSpeed * agent.direction adapt = agent.adaptVel() peopleInter = 0.0 wallInter = 0.0 groupVis = 0.0 groupAtt = 0.0 ownGroupRep = 0.0 otherGroupRep = 0.0 for wall in walls: wallInter += agent.wallInteraction(wall) for groupj in agents: for subgroupj in groupj: for agentj in subgroupj: if agent.agentId != agentj.agentId: peopleInter += agent.peopleInteraction(agentj) agentGroup = [] for sub in group: for mem in sub: if mem.agentId != agent.agentId: agentGroup.append(mem) if len(agentGroup) > 0: groupVis = agent.groupVisual(agentGroup) groupAtt = agent.groupAttraction(agentGroup + [agent]) for agentj in agentGroup: ownGroupRep += agent.ownGroupRepulsion(agentj) groupInter = groupVis + groupAtt + ownGroupRep for gid, g in enumerate(agents): if gid != agent.groupId: otherGroup = [] for sub in g: otherGroup += sub otherGroupRep += agent.otherGroupRepulsion(otherGroup) subgroupForce = agent.subgroupForces(group) sumForce = adapt + wallInter + peopleInter + groupInter accl = sumForce / agent.mass agent.actualV = agent.actualV + accl * 0.5 agent.pos = agent.pos + agent.actualV * 0.5 if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent. Goal == 0): agent.Goal = 1 agent.timeOut = pygame.time.get_ticks() print('Agent ', agent.agentId, 'reached goal at ', agent.timeOut) for group in agents: for subgroup in group: for agent in subgroup: scPos = (agent.pos * 10).astype(int) endPos = ((agent.pos + agent.actualV) * 10).astype(int) endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int) pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE, AGENTTHICKNESS) pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3) pygame.draw.line(screen, agent.color, scPos, endPos, 2) pygame.draw.line(screen, [255, 60, 0], scPos, endPosDV, 2) pygame.display.flip() clock.tick(20) <|reserved_special_token_1|> # -*-coding:utf-8-*- # Author: Scott Larter import pygame import pygame.draw import numpy as np from agent import * from tools import * SCREENSIZE = [1200, 400] # walls.csv #SCREENSIZE = [1200, 650] # walls2.csv RESOLUTION = 180 AGENTSNUM = 12 GROUPSNUM = 2 MAXGROUPSIZE = 6 MAXSUBGROUPSIZE = 3 BACKGROUNDCOLOR = [255, 255, 255] LINECOLOR = [255,0,0] AGENTSIZE = 9 AGENTTHICKNESS = 3 WALLSFILE = "walls.csv" pygame.init() screen = pygame.display.set_mode(SCREENSIZE) pygame.display.set_caption('Social Force Model - Crosswalk') clock = pygame.time.Clock() # initialize walls walls = [] for line in open(WALLSFILE, newline='', encoding="utf-8-sig"): coords = line.split(",") wall = [] wall.append(float(coords[0])) wall.append(float(coords[1])) wall.append(float(coords[2])) wall.append(float(coords[3])) walls.append(wall) # initialize agents agents = [] for n in range(AGENTSNUM): group_id = (int)(n / MAXGROUPSIZE) subgroup_id = (int)((n % MAXGROUPSIZE) / MAXSUBGROUPSIZE) if n % MAXGROUPSIZE == 0: agents.append([]) if n % MAXSUBGROUPSIZE == 0: agents[group_id].append([]) agent = Agent(n, group_id, subgroup_id) agents[group_id][subgroup_id].append(agent) running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.MOUSEBUTTONDOWN: (mouseX, mouseY) = pygame.mouse.get_pos() screen.fill(BACKGROUNDCOLOR) # draw walls for wall in walls: startPos = np.array([wall[0],wall[1]]) endPos = np.array([wall[2],wall[3]]) startPx = startPos*10 #worldCoord2ScreenCoord(startPos,SCREENSIZE,RESOLUTION) endPx = endPos*10 #worldCoord2ScreenCoord(endPos,SCREENSIZE,RESOLUTION) pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx.astype(int)) for group in agents: for subgroup in group: for agent in subgroup: agent.direction = normalize(agent.dest - agent.pos) agent.desiredV = agent.desiredSpeed * agent.direction adapt = agent.adaptVel() # initial forces values peopleInter = 0.0 wallInter = 0.0 groupVis = 0.0 groupAtt = 0.0 ownGroupRep = 0.0 otherGroupRep = 0.0 # wall interaction for wall in walls: wallInter += agent.wallInteraction(wall) # people interaction for groupj in agents: for subgroupj in groupj: for agentj in subgroupj: if agent.agentId != agentj.agentId: peopleInter += agent.peopleInteraction(agentj) # list of group members excluding current ped agentGroup = [] for sub in group: for mem in sub: if mem.agentId != agent.agentId: agentGroup.append(mem) # group visual and attraction forces if len(agentGroup) > 0: groupVis = agent.groupVisual(agentGroup) groupAtt = agent.groupAttraction(agentGroup + [agent]) # same group repulsion for agentj in agentGroup: ownGroupRep += agent.ownGroupRepulsion(agentj) groupInter = groupVis + groupAtt + ownGroupRep # other groups repulsion for gid,g in enumerate(agents): if gid != agent.groupId: # create list of 'other group' members otherGroup = [] for sub in g: otherGroup += sub otherGroupRep += agent.otherGroupRepulsion(otherGroup) #print(otherGroupRep) # subgroup forces subgroupForce = agent.subgroupForces(group) sumForce = adapt + wallInter + peopleInter + groupInter# + otherGroupRep + subgroupForce accl = sumForce / agent.mass agent.actualV = agent.actualV + accl*0.5 # consider dt = 0.5 agent.pos = agent.pos + agent.actualV*0.5 if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent.Goal == 0): agent.Goal = 1 agent.timeOut = pygame.time.get_ticks() #agent.timeOut = clock.get_time()/1000.0 print('Agent ', agent.agentId, 'reached goal at ', agent.timeOut) for group in agents: for subgroup in group: for agent in subgroup: scPos = (agent.pos*10).astype(int) #worldCoord2ScreenCoord(agent.pos, SCREENSIZE, RESOLUTION) endPos = ((agent.pos + agent.actualV) * 10).astype(int) endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int) pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE, AGENTTHICKNESS) pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3) pygame.draw.line(screen, agent.color, scPos, endPos, 2) pygame.draw.line(screen, [255,60,0], scPos, endPosDV, 2) pygame.display.flip() clock.tick(20) #clock.get_time
flexible
{ "blob_id": "00051a4087bfcf2e6826e9afa898830dc59aa5ab", "index": 5451, "step-1": "<mask token>\n", "step-2": "<mask token>\npygame.init()\n<mask token>\npygame.display.set_caption('Social Force Model - Crosswalk')\n<mask token>\nfor line in open(WALLSFILE, newline='', encoding='utf-8-sig'):\n coords = line.split(',')\n wall = []\n wall.append(float(coords[0]))\n wall.append(float(coords[1]))\n wall.append(float(coords[2]))\n wall.append(float(coords[3]))\n walls.append(wall)\n<mask token>\nfor n in range(AGENTSNUM):\n group_id = int(n / MAXGROUPSIZE)\n subgroup_id = int(n % MAXGROUPSIZE / MAXSUBGROUPSIZE)\n if n % MAXGROUPSIZE == 0:\n agents.append([])\n if n % MAXSUBGROUPSIZE == 0:\n agents[group_id].append([])\n agent = Agent(n, group_id, subgroup_id)\n agents[group_id][subgroup_id].append(agent)\n<mask token>\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouseX, mouseY = pygame.mouse.get_pos()\n screen.fill(BACKGROUNDCOLOR)\n for wall in walls:\n startPos = np.array([wall[0], wall[1]])\n endPos = np.array([wall[2], wall[3]])\n startPx = startPos * 10\n endPx = endPos * 10\n pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx.\n astype(int))\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n agent.direction = normalize(agent.dest - agent.pos)\n agent.desiredV = agent.desiredSpeed * agent.direction\n adapt = agent.adaptVel()\n peopleInter = 0.0\n wallInter = 0.0\n groupVis = 0.0\n groupAtt = 0.0\n ownGroupRep = 0.0\n otherGroupRep = 0.0\n for wall in walls:\n wallInter += agent.wallInteraction(wall)\n for groupj in agents:\n for subgroupj in groupj:\n for agentj in subgroupj:\n if agent.agentId != agentj.agentId:\n peopleInter += agent.peopleInteraction(agentj)\n agentGroup = []\n for sub in group:\n for mem in sub:\n if mem.agentId != agent.agentId:\n agentGroup.append(mem)\n if len(agentGroup) > 0:\n groupVis = agent.groupVisual(agentGroup)\n groupAtt = agent.groupAttraction(agentGroup + [agent])\n for agentj in agentGroup:\n ownGroupRep += agent.ownGroupRepulsion(agentj)\n groupInter = groupVis + groupAtt + ownGroupRep\n for gid, g in enumerate(agents):\n if gid != agent.groupId:\n otherGroup = []\n for sub in g:\n otherGroup += sub\n otherGroupRep += agent.otherGroupRepulsion(otherGroup)\n subgroupForce = agent.subgroupForces(group)\n sumForce = adapt + wallInter + peopleInter + groupInter\n accl = sumForce / agent.mass\n agent.actualV = agent.actualV + accl * 0.5\n agent.pos = agent.pos + agent.actualV * 0.5\n if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent.\n Goal == 0):\n agent.Goal = 1\n agent.timeOut = pygame.time.get_ticks()\n print('Agent ', agent.agentId, 'reached goal at ',\n agent.timeOut)\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n scPos = (agent.pos * 10).astype(int)\n endPos = ((agent.pos + agent.actualV) * 10).astype(int)\n endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int)\n pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE,\n AGENTTHICKNESS)\n pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3)\n pygame.draw.line(screen, agent.color, scPos, endPos, 2)\n pygame.draw.line(screen, [255, 60, 0], scPos, endPosDV, 2)\n pygame.display.flip()\n clock.tick(20)\n", "step-3": "<mask token>\nSCREENSIZE = [1200, 400]\nRESOLUTION = 180\nAGENTSNUM = 12\nGROUPSNUM = 2\nMAXGROUPSIZE = 6\nMAXSUBGROUPSIZE = 3\nBACKGROUNDCOLOR = [255, 255, 255]\nLINECOLOR = [255, 0, 0]\nAGENTSIZE = 9\nAGENTTHICKNESS = 3\nWALLSFILE = 'walls.csv'\npygame.init()\nscreen = pygame.display.set_mode(SCREENSIZE)\npygame.display.set_caption('Social Force Model - Crosswalk')\nclock = pygame.time.Clock()\nwalls = []\nfor line in open(WALLSFILE, newline='', encoding='utf-8-sig'):\n coords = line.split(',')\n wall = []\n wall.append(float(coords[0]))\n wall.append(float(coords[1]))\n wall.append(float(coords[2]))\n wall.append(float(coords[3]))\n walls.append(wall)\nagents = []\nfor n in range(AGENTSNUM):\n group_id = int(n / MAXGROUPSIZE)\n subgroup_id = int(n % MAXGROUPSIZE / MAXSUBGROUPSIZE)\n if n % MAXGROUPSIZE == 0:\n agents.append([])\n if n % MAXSUBGROUPSIZE == 0:\n agents[group_id].append([])\n agent = Agent(n, group_id, subgroup_id)\n agents[group_id][subgroup_id].append(agent)\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouseX, mouseY = pygame.mouse.get_pos()\n screen.fill(BACKGROUNDCOLOR)\n for wall in walls:\n startPos = np.array([wall[0], wall[1]])\n endPos = np.array([wall[2], wall[3]])\n startPx = startPos * 10\n endPx = endPos * 10\n pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx.\n astype(int))\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n agent.direction = normalize(agent.dest - agent.pos)\n agent.desiredV = agent.desiredSpeed * agent.direction\n adapt = agent.adaptVel()\n peopleInter = 0.0\n wallInter = 0.0\n groupVis = 0.0\n groupAtt = 0.0\n ownGroupRep = 0.0\n otherGroupRep = 0.0\n for wall in walls:\n wallInter += agent.wallInteraction(wall)\n for groupj in agents:\n for subgroupj in groupj:\n for agentj in subgroupj:\n if agent.agentId != agentj.agentId:\n peopleInter += agent.peopleInteraction(agentj)\n agentGroup = []\n for sub in group:\n for mem in sub:\n if mem.agentId != agent.agentId:\n agentGroup.append(mem)\n if len(agentGroup) > 0:\n groupVis = agent.groupVisual(agentGroup)\n groupAtt = agent.groupAttraction(agentGroup + [agent])\n for agentj in agentGroup:\n ownGroupRep += agent.ownGroupRepulsion(agentj)\n groupInter = groupVis + groupAtt + ownGroupRep\n for gid, g in enumerate(agents):\n if gid != agent.groupId:\n otherGroup = []\n for sub in g:\n otherGroup += sub\n otherGroupRep += agent.otherGroupRepulsion(otherGroup)\n subgroupForce = agent.subgroupForces(group)\n sumForce = adapt + wallInter + peopleInter + groupInter\n accl = sumForce / agent.mass\n agent.actualV = agent.actualV + accl * 0.5\n agent.pos = agent.pos + agent.actualV * 0.5\n if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent.\n Goal == 0):\n agent.Goal = 1\n agent.timeOut = pygame.time.get_ticks()\n print('Agent ', agent.agentId, 'reached goal at ',\n agent.timeOut)\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n scPos = (agent.pos * 10).astype(int)\n endPos = ((agent.pos + agent.actualV) * 10).astype(int)\n endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int)\n pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE,\n AGENTTHICKNESS)\n pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3)\n pygame.draw.line(screen, agent.color, scPos, endPos, 2)\n pygame.draw.line(screen, [255, 60, 0], scPos, endPosDV, 2)\n pygame.display.flip()\n clock.tick(20)\n", "step-4": "import pygame\nimport pygame.draw\nimport numpy as np\nfrom agent import *\nfrom tools import *\nSCREENSIZE = [1200, 400]\nRESOLUTION = 180\nAGENTSNUM = 12\nGROUPSNUM = 2\nMAXGROUPSIZE = 6\nMAXSUBGROUPSIZE = 3\nBACKGROUNDCOLOR = [255, 255, 255]\nLINECOLOR = [255, 0, 0]\nAGENTSIZE = 9\nAGENTTHICKNESS = 3\nWALLSFILE = 'walls.csv'\npygame.init()\nscreen = pygame.display.set_mode(SCREENSIZE)\npygame.display.set_caption('Social Force Model - Crosswalk')\nclock = pygame.time.Clock()\nwalls = []\nfor line in open(WALLSFILE, newline='', encoding='utf-8-sig'):\n coords = line.split(',')\n wall = []\n wall.append(float(coords[0]))\n wall.append(float(coords[1]))\n wall.append(float(coords[2]))\n wall.append(float(coords[3]))\n walls.append(wall)\nagents = []\nfor n in range(AGENTSNUM):\n group_id = int(n / MAXGROUPSIZE)\n subgroup_id = int(n % MAXGROUPSIZE / MAXSUBGROUPSIZE)\n if n % MAXGROUPSIZE == 0:\n agents.append([])\n if n % MAXSUBGROUPSIZE == 0:\n agents[group_id].append([])\n agent = Agent(n, group_id, subgroup_id)\n agents[group_id][subgroup_id].append(agent)\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouseX, mouseY = pygame.mouse.get_pos()\n screen.fill(BACKGROUNDCOLOR)\n for wall in walls:\n startPos = np.array([wall[0], wall[1]])\n endPos = np.array([wall[2], wall[3]])\n startPx = startPos * 10\n endPx = endPos * 10\n pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx.\n astype(int))\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n agent.direction = normalize(agent.dest - agent.pos)\n agent.desiredV = agent.desiredSpeed * agent.direction\n adapt = agent.adaptVel()\n peopleInter = 0.0\n wallInter = 0.0\n groupVis = 0.0\n groupAtt = 0.0\n ownGroupRep = 0.0\n otherGroupRep = 0.0\n for wall in walls:\n wallInter += agent.wallInteraction(wall)\n for groupj in agents:\n for subgroupj in groupj:\n for agentj in subgroupj:\n if agent.agentId != agentj.agentId:\n peopleInter += agent.peopleInteraction(agentj)\n agentGroup = []\n for sub in group:\n for mem in sub:\n if mem.agentId != agent.agentId:\n agentGroup.append(mem)\n if len(agentGroup) > 0:\n groupVis = agent.groupVisual(agentGroup)\n groupAtt = agent.groupAttraction(agentGroup + [agent])\n for agentj in agentGroup:\n ownGroupRep += agent.ownGroupRepulsion(agentj)\n groupInter = groupVis + groupAtt + ownGroupRep\n for gid, g in enumerate(agents):\n if gid != agent.groupId:\n otherGroup = []\n for sub in g:\n otherGroup += sub\n otherGroupRep += agent.otherGroupRepulsion(otherGroup)\n subgroupForce = agent.subgroupForces(group)\n sumForce = adapt + wallInter + peopleInter + groupInter\n accl = sumForce / agent.mass\n agent.actualV = agent.actualV + accl * 0.5\n agent.pos = agent.pos + agent.actualV * 0.5\n if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent.\n Goal == 0):\n agent.Goal = 1\n agent.timeOut = pygame.time.get_ticks()\n print('Agent ', agent.agentId, 'reached goal at ',\n agent.timeOut)\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n scPos = (agent.pos * 10).astype(int)\n endPos = ((agent.pos + agent.actualV) * 10).astype(int)\n endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int)\n pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE,\n AGENTTHICKNESS)\n pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3)\n pygame.draw.line(screen, agent.color, scPos, endPos, 2)\n pygame.draw.line(screen, [255, 60, 0], scPos, endPosDV, 2)\n pygame.display.flip()\n clock.tick(20)\n", "step-5": "# -*-coding:utf-8-*-\n# Author: Scott Larter\n\nimport pygame\nimport pygame.draw\nimport numpy as np\nfrom agent import *\nfrom tools import *\n\n\nSCREENSIZE = [1200, 400] # walls.csv\n#SCREENSIZE = [1200, 650] # walls2.csv\nRESOLUTION = 180\nAGENTSNUM = 12\nGROUPSNUM = 2\nMAXGROUPSIZE = 6\nMAXSUBGROUPSIZE = 3\nBACKGROUNDCOLOR = [255, 255, 255]\nLINECOLOR = [255,0,0]\nAGENTSIZE = 9\nAGENTTHICKNESS = 3\nWALLSFILE = \"walls.csv\"\n\npygame.init()\nscreen = pygame.display.set_mode(SCREENSIZE)\npygame.display.set_caption('Social Force Model - Crosswalk')\nclock = pygame.time.Clock()\n\n# initialize walls\nwalls = []\nfor line in open(WALLSFILE, newline='', encoding=\"utf-8-sig\"):\n coords = line.split(\",\")\n wall = []\n wall.append(float(coords[0]))\n wall.append(float(coords[1]))\n wall.append(float(coords[2]))\n wall.append(float(coords[3]))\n walls.append(wall)\n\n\n# initialize agents\nagents = []\n\nfor n in range(AGENTSNUM):\n group_id = (int)(n / MAXGROUPSIZE)\n subgroup_id = (int)((n % MAXGROUPSIZE) / MAXSUBGROUPSIZE)\n\n if n % MAXGROUPSIZE == 0:\n agents.append([])\n\n if n % MAXSUBGROUPSIZE == 0:\n agents[group_id].append([])\n\n agent = Agent(n, group_id, subgroup_id)\n agents[group_id][subgroup_id].append(agent)\n\n\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.MOUSEBUTTONDOWN:\n (mouseX, mouseY) = pygame.mouse.get_pos()\n\n screen.fill(BACKGROUNDCOLOR)\n\n # draw walls\n for wall in walls:\n startPos = np.array([wall[0],wall[1]])\n endPos = np.array([wall[2],wall[3]])\n startPx = startPos*10 #worldCoord2ScreenCoord(startPos,SCREENSIZE,RESOLUTION)\n endPx = endPos*10 #worldCoord2ScreenCoord(endPos,SCREENSIZE,RESOLUTION)\n pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx.astype(int))\n\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n agent.direction = normalize(agent.dest - agent.pos)\n agent.desiredV = agent.desiredSpeed * agent.direction\n\n adapt = agent.adaptVel()\n\n # initial forces values\n peopleInter = 0.0\n wallInter = 0.0\n groupVis = 0.0\n groupAtt = 0.0\n ownGroupRep = 0.0\n otherGroupRep = 0.0\n\n # wall interaction\n for wall in walls:\n wallInter += agent.wallInteraction(wall)\n\n # people interaction\n for groupj in agents:\n for subgroupj in groupj:\n for agentj in subgroupj:\n\n if agent.agentId != agentj.agentId:\n peopleInter += agent.peopleInteraction(agentj)\n\n # list of group members excluding current ped\n agentGroup = []\n for sub in group:\n for mem in sub:\n if mem.agentId != agent.agentId:\n agentGroup.append(mem)\n\n # group visual and attraction forces\n if len(agentGroup) > 0:\n groupVis = agent.groupVisual(agentGroup)\n groupAtt = agent.groupAttraction(agentGroup + [agent])\n\n # same group repulsion\n for agentj in agentGroup:\n ownGroupRep += agent.ownGroupRepulsion(agentj)\n\n groupInter = groupVis + groupAtt + ownGroupRep\n\n # other groups repulsion\n for gid,g in enumerate(agents):\n if gid != agent.groupId:\n # create list of 'other group' members\n otherGroup = []\n for sub in g:\n otherGroup += sub\n\n otherGroupRep += agent.otherGroupRepulsion(otherGroup)\n\n #print(otherGroupRep)\n\n # subgroup forces\n subgroupForce = agent.subgroupForces(group)\n\n sumForce = adapt + wallInter + peopleInter + groupInter# + otherGroupRep + subgroupForce\n\n accl = sumForce / agent.mass\n\n agent.actualV = agent.actualV + accl*0.5 # consider dt = 0.5\n\n agent.pos = agent.pos + agent.actualV*0.5\n\n if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent.Goal == 0):\n agent.Goal = 1\n agent.timeOut = pygame.time.get_ticks()\n #agent.timeOut = clock.get_time()/1000.0\n print('Agent ', agent.agentId, 'reached goal at ', agent.timeOut)\n\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n scPos = (agent.pos*10).astype(int) #worldCoord2ScreenCoord(agent.pos, SCREENSIZE, RESOLUTION)\n endPos = ((agent.pos + agent.actualV) * 10).astype(int)\n endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int)\n\n pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE, AGENTTHICKNESS)\n pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3)\n pygame.draw.line(screen, agent.color, scPos, endPos, 2)\n pygame.draw.line(screen, [255,60,0], scPos, endPosDV, 2)\n\n pygame.display.flip()\n clock.tick(20)\n #clock.get_time", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
def word_count(s): # Your code here cache = {} ignore = '":;,.-+=/\\|[]{}()*^&' lower = s.lower() for i in lower: if i in ignore: lower = lower.replace(i, '') words = lower.split() for j in words: if j not in cache: cache[j] = 1 else: cache[j] += 1 return cache if __name__ == "__main__": print(word_count("")) print(word_count("Hello")) print(word_count('Hello, my cat. And my cat doesn\'t say "hello" back.')) print(word_count( 'This is a test of the emergency broadcast network. This is only a test.'))
normal
{ "blob_id": "97d84f99264afa5e7df4b5d22cf4c49b2d14ff7a", "index": 8291, "step-1": "<mask token>\n", "step-2": "def word_count(s):\n cache = {}\n ignore = '\":;,.-+=/\\\\|[]{}()*^&'\n lower = s.lower()\n for i in lower:\n if i in ignore:\n lower = lower.replace(i, '')\n words = lower.split()\n for j in words:\n if j not in cache:\n cache[j] = 1\n else:\n cache[j] += 1\n return cache\n\n\n<mask token>\n", "step-3": "def word_count(s):\n cache = {}\n ignore = '\":;,.-+=/\\\\|[]{}()*^&'\n lower = s.lower()\n for i in lower:\n if i in ignore:\n lower = lower.replace(i, '')\n words = lower.split()\n for j in words:\n if j not in cache:\n cache[j] = 1\n else:\n cache[j] += 1\n return cache\n\n\nif __name__ == '__main__':\n print(word_count(''))\n print(word_count('Hello'))\n print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n print(word_count(\n 'This is a test of the emergency broadcast network. This is only a test.'\n ))\n", "step-4": "def word_count(s):\n # Your code here\n cache = {}\n ignore = '\":;,.-+=/\\\\|[]{}()*^&'\n lower = s.lower()\n\n for i in lower:\n if i in ignore:\n lower = lower.replace(i, '')\n words = lower.split()\n for j in words:\n if j not in cache:\n cache[j] = 1\n else:\n cache[j] += 1\n return cache\n\n\nif __name__ == \"__main__\":\n print(word_count(\"\"))\n print(word_count(\"Hello\"))\n print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n print(word_count(\n 'This is a test of the emergency broadcast network. This is only a test.'))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from django import forms from .models import File, Sample, Plate, Well, Machine, Project class MachineForm(forms.ModelForm): class Meta: model = Machine fields = ['name', 'author', 'status', 'comments'] class ProjectForm(forms.ModelForm): class Meta: model = Project fields = ['name', 'author', 'collaborators', 'status', 'comments'] class FileForm(forms.ModelForm): class Meta: model = File fields = ['name', 'script', 'author', 'file'] class SampleForm(forms.ModelForm): class Meta: model = Sample fields = ['name', 'alias', 'sample_type', 'description', 'project', 'author', 'sequence', 'length', 'genbank', 'source_reference', 'comments', 'parent_id', 'organism', 'genus_specie', 'marker', 'application', 'strategy', 'seq_verified', 'origin_rep', 'cloning_system', 'strand', 'order_number', 'part_type', 'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction', 'tm'] class PlateForm(forms.ModelForm): class Meta: model = Plate fields = ['name', 'barcode', 'type', 'contents', 'location', 'num_cols', 'num_rows', 'num_well', 'function', 'project', 'active', 'status'] class WellForm(forms.ModelForm): class Meta: model = Well fields = ['name', 'volume', 'concentration', 'plate', 'samples', 'active', 'status']
normal
{ "blob_id": "5bb894feaf9293bf70b3f831e33be555f74efde8", "index": 6901, "step-1": "<mask token>\n\n\nclass SampleForm(forms.ModelForm):\n\n\n class Meta:\n model = Sample\n fields = ['name', 'alias', 'sample_type', 'description', 'project',\n 'author', 'sequence', 'length', 'genbank', 'source_reference',\n 'comments', 'parent_id', 'organism', 'genus_specie', 'marker',\n 'application', 'strategy', 'seq_verified', 'origin_rep',\n 'cloning_system', 'strand', 'order_number', 'part_type',\n 'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',\n 'tm']\n\n\nclass PlateForm(forms.ModelForm):\n\n\n class Meta:\n model = Plate\n fields = ['name', 'barcode', 'type', 'contents', 'location',\n 'num_cols', 'num_rows', 'num_well', 'function', 'project',\n 'active', 'status']\n\n\nclass WellForm(forms.ModelForm):\n\n\n class Meta:\n model = Well\n fields = ['name', 'volume', 'concentration', 'plate', 'samples',\n 'active', 'status']\n", "step-2": "<mask token>\n\n\nclass ProjectForm(forms.ModelForm):\n\n\n class Meta:\n model = Project\n fields = ['name', 'author', 'collaborators', 'status', 'comments']\n\n\nclass FileForm(forms.ModelForm):\n\n\n class Meta:\n model = File\n fields = ['name', 'script', 'author', 'file']\n\n\nclass SampleForm(forms.ModelForm):\n\n\n class Meta:\n model = Sample\n fields = ['name', 'alias', 'sample_type', 'description', 'project',\n 'author', 'sequence', 'length', 'genbank', 'source_reference',\n 'comments', 'parent_id', 'organism', 'genus_specie', 'marker',\n 'application', 'strategy', 'seq_verified', 'origin_rep',\n 'cloning_system', 'strand', 'order_number', 'part_type',\n 'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',\n 'tm']\n\n\nclass PlateForm(forms.ModelForm):\n\n\n class Meta:\n model = Plate\n fields = ['name', 'barcode', 'type', 'contents', 'location',\n 'num_cols', 'num_rows', 'num_well', 'function', 'project',\n 'active', 'status']\n\n\nclass WellForm(forms.ModelForm):\n\n\n class Meta:\n model = Well\n fields = ['name', 'volume', 'concentration', 'plate', 'samples',\n 'active', 'status']\n", "step-3": "<mask token>\n\n\nclass MachineForm(forms.ModelForm):\n\n\n class Meta:\n model = Machine\n fields = ['name', 'author', 'status', 'comments']\n\n\nclass ProjectForm(forms.ModelForm):\n\n\n class Meta:\n model = Project\n fields = ['name', 'author', 'collaborators', 'status', 'comments']\n\n\nclass FileForm(forms.ModelForm):\n\n\n class Meta:\n model = File\n fields = ['name', 'script', 'author', 'file']\n\n\nclass SampleForm(forms.ModelForm):\n\n\n class Meta:\n model = Sample\n fields = ['name', 'alias', 'sample_type', 'description', 'project',\n 'author', 'sequence', 'length', 'genbank', 'source_reference',\n 'comments', 'parent_id', 'organism', 'genus_specie', 'marker',\n 'application', 'strategy', 'seq_verified', 'origin_rep',\n 'cloning_system', 'strand', 'order_number', 'part_type',\n 'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',\n 'tm']\n\n\nclass PlateForm(forms.ModelForm):\n\n\n class Meta:\n model = Plate\n fields = ['name', 'barcode', 'type', 'contents', 'location',\n 'num_cols', 'num_rows', 'num_well', 'function', 'project',\n 'active', 'status']\n\n\nclass WellForm(forms.ModelForm):\n\n\n class Meta:\n model = Well\n fields = ['name', 'volume', 'concentration', 'plate', 'samples',\n 'active', 'status']\n", "step-4": "from django import forms\nfrom .models import File, Sample, Plate, Well, Machine, Project\n\n\nclass MachineForm(forms.ModelForm):\n\n\n class Meta:\n model = Machine\n fields = ['name', 'author', 'status', 'comments']\n\n\nclass ProjectForm(forms.ModelForm):\n\n\n class Meta:\n model = Project\n fields = ['name', 'author', 'collaborators', 'status', 'comments']\n\n\nclass FileForm(forms.ModelForm):\n\n\n class Meta:\n model = File\n fields = ['name', 'script', 'author', 'file']\n\n\nclass SampleForm(forms.ModelForm):\n\n\n class Meta:\n model = Sample\n fields = ['name', 'alias', 'sample_type', 'description', 'project',\n 'author', 'sequence', 'length', 'genbank', 'source_reference',\n 'comments', 'parent_id', 'organism', 'genus_specie', 'marker',\n 'application', 'strategy', 'seq_verified', 'origin_rep',\n 'cloning_system', 'strand', 'order_number', 'part_type',\n 'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',\n 'tm']\n\n\nclass PlateForm(forms.ModelForm):\n\n\n class Meta:\n model = Plate\n fields = ['name', 'barcode', 'type', 'contents', 'location',\n 'num_cols', 'num_rows', 'num_well', 'function', 'project',\n 'active', 'status']\n\n\nclass WellForm(forms.ModelForm):\n\n\n class Meta:\n model = Well\n fields = ['name', 'volume', 'concentration', 'plate', 'samples',\n 'active', 'status']\n", "step-5": null, "step-ids": [ 3, 5, 6, 7 ] }
[ 3, 5, 6, 7 ]
import math from chainer import cuda from chainer import function from chainer.functions import Sigmoid from chainer.utils import type_check import numpy def _as_mat(x): if x.ndim == 2: return x return x.reshape(len(x), -1) class Autoencoder(function.Function): def __init__(self, in_size, hidden_size, activation=Sigmoid, wscale=1, bias=0, initialW=None, initial_bias1=None, initial_bias2=None): self.W = None self.gW = None self.b1 = None self.b2 = None self.gb1 = None self.gb2 = None self.activation = None if initialW is not None: assert initialW.shape == (hidden_size, in_size) self.W = initialW else: self.W = numpy.random.normal( 0, wscale * math.sqrt(1. / in_size), (hidden_size, in_size)).astype(numpy.float32) xp = cuda.get_array_module(self.W) self.gW = xp.full_like(self.W, numpy.nan) if initial_bias1 is not None: assert initial_bias1.shape == (hidden_size,) self.b1 = initial_bias1 else: self.b1 = numpy.repeat(numpy.float32(bias), hidden_size) if initial_bias2 is not None: assert initial_bias2.shape == (in_size,) self.b2 = initial_bias2 else: self.b2 = numpy.repeat(numpy.float32(bias), in_size) self.gb1 = xp.empty_like(self.b1) self.gb2 = xp.empty_like(self.b2) if activation is not None: if activation == Sigmoid: self.activation = activation() else: self.activation = activation def hidden(self, x): h = _Encoder(self.W, self.b1)(x) if self.activation is not None: h = self.activation(h) h.unchain_backward() return h @property def parameter_names(self): return 'W', 'b1', 'b2' @property def gradient_names(self): return 'gW', 'gb1', 'gb2' def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) x_type, = in_types type_check.expect( x_type.dtype == numpy.float32, x_type.ndim >= 2, (type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) == type_check.Variable(self.W.shape[1], 'W.shape[1]')), ) def check_type_backward(self, in_types, out_types): type_check.expect( in_types.size() == 1, out_types.size() == 1, ) x_type, = in_types y_type, = out_types type_check.expect( y_type.dtype == numpy.float32, y_type.ndim == 2, y_type.shape[0] == x_type.shape[0], y_type.shape[1] == type_check.Variable(self.W.shape[1], 'W.shape[1]'), ) def zero_grads(self): self.gW.fill(0) self.gb1.fill(0) self.gb2.fill(0) def forward(self, x): _x = _as_mat(x[0]) Wx = _x.dot(self.W.T) Wx += self.b1 self.x_activation = Wx if self.activation is not None: h, = self.activation.forward([Wx]) else: h = Wx self.x_decode = h y = h.dot(self.W) y += self.b2 return y, def backward(self, x, gy): _x = self.x_decode _gy = gy[0] self.gW += _x.T.dot(_gy) self.gb2 += _gy.sum(0) _gy = _gy.dot(self.W.T).reshape(_x.shape) if self.activation is not None: _gy, = self.activation.backward([self.x_activation], [_gy]) _x = _as_mat(x[0]) self.gW += _gy.T.dot(_x) self.gb1 += _gy.sum(0) return _gy.dot(self.W).reshape(x[0].shape), # undifferentiable Linear function class _Encoder(function.Function): def __init__(self, initialW, initial_Bias): self.W = initialW self.b = initial_Bias def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) x_type, = in_types type_check.expect( x_type.dtype == numpy.float32, x_type.ndim >= 2, (type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) == type_check.Variable(self.W.shape[1], 'W.shape[1]')), ) def forward(self, x): x = _as_mat(x[0]) Wx = x.dot(self.W.T) Wx += self.b return Wx,
normal
{ "blob_id": "97eb599ae8bf726d827d6f8313b7cf2838f9c125", "index": 4098, "step-1": "<mask token>\n\n\nclass Autoencoder(function.Function):\n <mask token>\n\n def hidden(self, x):\n h = _Encoder(self.W, self.b1)(x)\n if self.activation is not None:\n h = self.activation(h)\n h.unchain_backward()\n return h\n\n @property\n def parameter_names(self):\n return 'W', 'b1', 'b2'\n <mask token>\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(in_types.size() == 1, out_types.size() == 1)\n x_type, = in_types\n y_type, = out_types\n type_check.expect(y_type.dtype == numpy.float32, y_type.ndim == 2, \n y_type.shape[0] == x_type.shape[0], y_type.shape[1] ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n <mask token>\n\n def forward(self, x):\n _x = _as_mat(x[0])\n Wx = _x.dot(self.W.T)\n Wx += self.b1\n self.x_activation = Wx\n if self.activation is not None:\n h, = self.activation.forward([Wx])\n else:\n h = Wx\n self.x_decode = h\n y = h.dot(self.W)\n y += self.b2\n return y,\n\n def backward(self, x, gy):\n _x = self.x_decode\n _gy = gy[0]\n self.gW += _x.T.dot(_gy)\n self.gb2 += _gy.sum(0)\n _gy = _gy.dot(self.W.T).reshape(_x.shape)\n if self.activation is not None:\n _gy, = self.activation.backward([self.x_activation], [_gy])\n _x = _as_mat(x[0])\n self.gW += _gy.T.dot(_x)\n self.gb1 += _gy.sum(0)\n return _gy.dot(self.W).reshape(x[0].shape),\n\n\nclass _Encoder(function.Function):\n\n def __init__(self, initialW, initial_Bias):\n self.W = initialW\n self.b = initial_Bias\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def forward(self, x):\n x = _as_mat(x[0])\n Wx = x.dot(self.W.T)\n Wx += self.b\n return Wx,\n", "step-2": "<mask token>\n\n\nclass Autoencoder(function.Function):\n\n def __init__(self, in_size, hidden_size, activation=Sigmoid, wscale=1,\n bias=0, initialW=None, initial_bias1=None, initial_bias2=None):\n self.W = None\n self.gW = None\n self.b1 = None\n self.b2 = None\n self.gb1 = None\n self.gb2 = None\n self.activation = None\n if initialW is not None:\n assert initialW.shape == (hidden_size, in_size)\n self.W = initialW\n else:\n self.W = numpy.random.normal(0, wscale * math.sqrt(1.0 /\n in_size), (hidden_size, in_size)).astype(numpy.float32)\n xp = cuda.get_array_module(self.W)\n self.gW = xp.full_like(self.W, numpy.nan)\n if initial_bias1 is not None:\n assert initial_bias1.shape == (hidden_size,)\n self.b1 = initial_bias1\n else:\n self.b1 = numpy.repeat(numpy.float32(bias), hidden_size)\n if initial_bias2 is not None:\n assert initial_bias2.shape == (in_size,)\n self.b2 = initial_bias2\n else:\n self.b2 = numpy.repeat(numpy.float32(bias), in_size)\n self.gb1 = xp.empty_like(self.b1)\n self.gb2 = xp.empty_like(self.b2)\n if activation is not None:\n if activation == Sigmoid:\n self.activation = activation()\n else:\n self.activation = activation\n\n def hidden(self, x):\n h = _Encoder(self.W, self.b1)(x)\n if self.activation is not None:\n h = self.activation(h)\n h.unchain_backward()\n return h\n\n @property\n def parameter_names(self):\n return 'W', 'b1', 'b2'\n\n @property\n def gradient_names(self):\n return 'gW', 'gb1', 'gb2'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(in_types.size() == 1, out_types.size() == 1)\n x_type, = in_types\n y_type, = out_types\n type_check.expect(y_type.dtype == numpy.float32, y_type.ndim == 2, \n y_type.shape[0] == x_type.shape[0], y_type.shape[1] ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n <mask token>\n\n def forward(self, x):\n _x = _as_mat(x[0])\n Wx = _x.dot(self.W.T)\n Wx += self.b1\n self.x_activation = Wx\n if self.activation is not None:\n h, = self.activation.forward([Wx])\n else:\n h = Wx\n self.x_decode = h\n y = h.dot(self.W)\n y += self.b2\n return y,\n\n def backward(self, x, gy):\n _x = self.x_decode\n _gy = gy[0]\n self.gW += _x.T.dot(_gy)\n self.gb2 += _gy.sum(0)\n _gy = _gy.dot(self.W.T).reshape(_x.shape)\n if self.activation is not None:\n _gy, = self.activation.backward([self.x_activation], [_gy])\n _x = _as_mat(x[0])\n self.gW += _gy.T.dot(_x)\n self.gb1 += _gy.sum(0)\n return _gy.dot(self.W).reshape(x[0].shape),\n\n\nclass _Encoder(function.Function):\n\n def __init__(self, initialW, initial_Bias):\n self.W = initialW\n self.b = initial_Bias\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def forward(self, x):\n x = _as_mat(x[0])\n Wx = x.dot(self.W.T)\n Wx += self.b\n return Wx,\n", "step-3": "<mask token>\n\n\nclass Autoencoder(function.Function):\n\n def __init__(self, in_size, hidden_size, activation=Sigmoid, wscale=1,\n bias=0, initialW=None, initial_bias1=None, initial_bias2=None):\n self.W = None\n self.gW = None\n self.b1 = None\n self.b2 = None\n self.gb1 = None\n self.gb2 = None\n self.activation = None\n if initialW is not None:\n assert initialW.shape == (hidden_size, in_size)\n self.W = initialW\n else:\n self.W = numpy.random.normal(0, wscale * math.sqrt(1.0 /\n in_size), (hidden_size, in_size)).astype(numpy.float32)\n xp = cuda.get_array_module(self.W)\n self.gW = xp.full_like(self.W, numpy.nan)\n if initial_bias1 is not None:\n assert initial_bias1.shape == (hidden_size,)\n self.b1 = initial_bias1\n else:\n self.b1 = numpy.repeat(numpy.float32(bias), hidden_size)\n if initial_bias2 is not None:\n assert initial_bias2.shape == (in_size,)\n self.b2 = initial_bias2\n else:\n self.b2 = numpy.repeat(numpy.float32(bias), in_size)\n self.gb1 = xp.empty_like(self.b1)\n self.gb2 = xp.empty_like(self.b2)\n if activation is not None:\n if activation == Sigmoid:\n self.activation = activation()\n else:\n self.activation = activation\n\n def hidden(self, x):\n h = _Encoder(self.W, self.b1)(x)\n if self.activation is not None:\n h = self.activation(h)\n h.unchain_backward()\n return h\n\n @property\n def parameter_names(self):\n return 'W', 'b1', 'b2'\n\n @property\n def gradient_names(self):\n return 'gW', 'gb1', 'gb2'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(in_types.size() == 1, out_types.size() == 1)\n x_type, = in_types\n y_type, = out_types\n type_check.expect(y_type.dtype == numpy.float32, y_type.ndim == 2, \n y_type.shape[0] == x_type.shape[0], y_type.shape[1] ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def zero_grads(self):\n self.gW.fill(0)\n self.gb1.fill(0)\n self.gb2.fill(0)\n\n def forward(self, x):\n _x = _as_mat(x[0])\n Wx = _x.dot(self.W.T)\n Wx += self.b1\n self.x_activation = Wx\n if self.activation is not None:\n h, = self.activation.forward([Wx])\n else:\n h = Wx\n self.x_decode = h\n y = h.dot(self.W)\n y += self.b2\n return y,\n\n def backward(self, x, gy):\n _x = self.x_decode\n _gy = gy[0]\n self.gW += _x.T.dot(_gy)\n self.gb2 += _gy.sum(0)\n _gy = _gy.dot(self.W.T).reshape(_x.shape)\n if self.activation is not None:\n _gy, = self.activation.backward([self.x_activation], [_gy])\n _x = _as_mat(x[0])\n self.gW += _gy.T.dot(_x)\n self.gb1 += _gy.sum(0)\n return _gy.dot(self.W).reshape(x[0].shape),\n\n\nclass _Encoder(function.Function):\n\n def __init__(self, initialW, initial_Bias):\n self.W = initialW\n self.b = initial_Bias\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def forward(self, x):\n x = _as_mat(x[0])\n Wx = x.dot(self.W.T)\n Wx += self.b\n return Wx,\n", "step-4": "<mask token>\n\n\ndef _as_mat(x):\n if x.ndim == 2:\n return x\n return x.reshape(len(x), -1)\n\n\nclass Autoencoder(function.Function):\n\n def __init__(self, in_size, hidden_size, activation=Sigmoid, wscale=1,\n bias=0, initialW=None, initial_bias1=None, initial_bias2=None):\n self.W = None\n self.gW = None\n self.b1 = None\n self.b2 = None\n self.gb1 = None\n self.gb2 = None\n self.activation = None\n if initialW is not None:\n assert initialW.shape == (hidden_size, in_size)\n self.W = initialW\n else:\n self.W = numpy.random.normal(0, wscale * math.sqrt(1.0 /\n in_size), (hidden_size, in_size)).astype(numpy.float32)\n xp = cuda.get_array_module(self.W)\n self.gW = xp.full_like(self.W, numpy.nan)\n if initial_bias1 is not None:\n assert initial_bias1.shape == (hidden_size,)\n self.b1 = initial_bias1\n else:\n self.b1 = numpy.repeat(numpy.float32(bias), hidden_size)\n if initial_bias2 is not None:\n assert initial_bias2.shape == (in_size,)\n self.b2 = initial_bias2\n else:\n self.b2 = numpy.repeat(numpy.float32(bias), in_size)\n self.gb1 = xp.empty_like(self.b1)\n self.gb2 = xp.empty_like(self.b2)\n if activation is not None:\n if activation == Sigmoid:\n self.activation = activation()\n else:\n self.activation = activation\n\n def hidden(self, x):\n h = _Encoder(self.W, self.b1)(x)\n if self.activation is not None:\n h = self.activation(h)\n h.unchain_backward()\n return h\n\n @property\n def parameter_names(self):\n return 'W', 'b1', 'b2'\n\n @property\n def gradient_names(self):\n return 'gW', 'gb1', 'gb2'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(in_types.size() == 1, out_types.size() == 1)\n x_type, = in_types\n y_type, = out_types\n type_check.expect(y_type.dtype == numpy.float32, y_type.ndim == 2, \n y_type.shape[0] == x_type.shape[0], y_type.shape[1] ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def zero_grads(self):\n self.gW.fill(0)\n self.gb1.fill(0)\n self.gb2.fill(0)\n\n def forward(self, x):\n _x = _as_mat(x[0])\n Wx = _x.dot(self.W.T)\n Wx += self.b1\n self.x_activation = Wx\n if self.activation is not None:\n h, = self.activation.forward([Wx])\n else:\n h = Wx\n self.x_decode = h\n y = h.dot(self.W)\n y += self.b2\n return y,\n\n def backward(self, x, gy):\n _x = self.x_decode\n _gy = gy[0]\n self.gW += _x.T.dot(_gy)\n self.gb2 += _gy.sum(0)\n _gy = _gy.dot(self.W.T).reshape(_x.shape)\n if self.activation is not None:\n _gy, = self.activation.backward([self.x_activation], [_gy])\n _x = _as_mat(x[0])\n self.gW += _gy.T.dot(_x)\n self.gb1 += _gy.sum(0)\n return _gy.dot(self.W).reshape(x[0].shape),\n\n\nclass _Encoder(function.Function):\n\n def __init__(self, initialW, initial_Bias):\n self.W = initialW\n self.b = initial_Bias\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def forward(self, x):\n x = _as_mat(x[0])\n Wx = x.dot(self.W.T)\n Wx += self.b\n return Wx,\n", "step-5": "import math\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.functions import Sigmoid\nfrom chainer.utils import type_check\n\nimport numpy\n\ndef _as_mat(x):\n if x.ndim == 2:\n return x\n return x.reshape(len(x), -1)\n\nclass Autoencoder(function.Function):\n\n def __init__(self, in_size, hidden_size, activation=Sigmoid,\n wscale=1, bias=0,\n initialW=None, initial_bias1=None, initial_bias2=None):\n self.W = None\n self.gW = None\n self.b1 = None\n self.b2 = None\n self.gb1 = None\n self.gb2 = None\n self.activation = None\n\n if initialW is not None:\n assert initialW.shape == (hidden_size, in_size)\n self.W = initialW\n else:\n self.W = numpy.random.normal(\n 0, wscale * math.sqrt(1. / in_size),\n (hidden_size, in_size)).astype(numpy.float32)\n xp = cuda.get_array_module(self.W)\n self.gW = xp.full_like(self.W, numpy.nan)\n\n if initial_bias1 is not None:\n assert initial_bias1.shape == (hidden_size,)\n self.b1 = initial_bias1\n else:\n self.b1 = numpy.repeat(numpy.float32(bias), hidden_size)\n\n if initial_bias2 is not None:\n assert initial_bias2.shape == (in_size,)\n self.b2 = initial_bias2\n else:\n self.b2 = numpy.repeat(numpy.float32(bias), in_size)\n\n self.gb1 = xp.empty_like(self.b1)\n self.gb2 = xp.empty_like(self.b2)\n\n if activation is not None:\n if activation == Sigmoid:\n self.activation = activation()\n else:\n self.activation = activation\n\n def hidden(self, x):\n h = _Encoder(self.W, self.b1)(x)\n if self.activation is not None:\n h = self.activation(h)\n h.unchain_backward()\n return h\n\n @property\n def parameter_names(self):\n return 'W', 'b1', 'b2'\n\n @property\n def gradient_names(self):\n return 'gW', 'gb1', 'gb2'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n\n type_check.expect(\n x_type.dtype == numpy.float32,\n x_type.ndim >= 2,\n (type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]')),\n )\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n in_types.size() == 1,\n out_types.size() == 1,\n )\n x_type, = in_types\n y_type, = out_types\n\n type_check.expect(\n y_type.dtype == numpy.float32,\n y_type.ndim == 2,\n y_type.shape[0] == x_type.shape[0],\n y_type.shape[1] == type_check.Variable(self.W.shape[1],\n 'W.shape[1]'),\n )\n\n def zero_grads(self):\n self.gW.fill(0)\n self.gb1.fill(0)\n self.gb2.fill(0)\n\n def forward(self, x):\n _x = _as_mat(x[0])\n Wx = _x.dot(self.W.T)\n Wx += self.b1\n\n self.x_activation = Wx\n if self.activation is not None:\n h, = self.activation.forward([Wx])\n else:\n h = Wx\n self.x_decode = h\n y = h.dot(self.W)\n y += self.b2\n\n return y,\n\n def backward(self, x, gy):\n _x = self.x_decode\n _gy = gy[0]\n self.gW += _x.T.dot(_gy)\n self.gb2 += _gy.sum(0)\n _gy = _gy.dot(self.W.T).reshape(_x.shape)\n\n if self.activation is not None:\n _gy, = self.activation.backward([self.x_activation], [_gy])\n\n _x = _as_mat(x[0])\n self.gW += _gy.T.dot(_x)\n self.gb1 += _gy.sum(0)\n\n return _gy.dot(self.W).reshape(x[0].shape),\n\n# undifferentiable Linear function\nclass _Encoder(function.Function):\n\n def __init__(self, initialW, initial_Bias):\n self.W = initialW\n self.b = initial_Bias\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n\n type_check.expect(\n x_type.dtype == numpy.float32,\n x_type.ndim >= 2,\n (type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]')),\n )\n\n def forward(self, x):\n x = _as_mat(x[0])\n Wx = x.dot(self.W.T)\n Wx += self.b\n return Wx,\n", "step-ids": [ 11, 13, 14, 15, 17 ] }
[ 11, 13, 14, 15, 17 ]