12Parker commited on
Commit
5980447
·
verified ·
1 Parent(s): dae1a12

Upload 96 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. PythonDataset/dev/eNMS-task-instances.jsonl.all +1 -0
  2. PythonDataset/dev/flask-github-task-instances.jsonl.all +1 -0
  3. PythonDataset/dev/glue-task-instances.jsonl.all +1 -0
  4. PythonDataset/dev/gwells-task-instances.jsonl.all +1 -0
  5. PythonDataset/dev/inky-task-instances.jsonl.all +1 -0
  6. PythonDataset/dev/kinto-task-instances.jsonl.all +1 -0
  7. PythonDataset/test/NeMo-task-instances.jsonl.all +1 -0
  8. PythonDataset/test/PypeIt-task-instances.jsonl.all +1 -0
  9. PythonDataset/test/Userge-Plugins-task-instances.jsonl.all +1 -0
  10. PythonDataset/test/decomp-permuter-task-instances.jsonl.all +0 -0
  11. PythonDataset/test/faucet-task-instances.jsonl.all +1 -0
  12. PythonDataset/test/gcloud-aio-task-instances.jsonl.all +1 -0
  13. PythonDataset/test/google-cloud-python-task-instances.jsonl.all +0 -0
  14. PythonDataset/test/isso-task-instances.jsonl.all +1 -0
  15. PythonDataset/test/libcloud-task-instances.jsonl.all +1 -0
  16. PythonDataset/test/maintainer-tools-task-instances.jsonl.all +1 -0
  17. PythonDataset/test/mimic-task-instances.jsonl.all +1 -0
  18. PythonDataset/test/mixpanel-python-task-instances.jsonl.all +1 -0
  19. PythonDataset/test/monet-task-instances.jsonl.all +0 -0
  20. PythonDataset/test/msmtools-task-instances.jsonl.all +1 -0
  21. PythonDataset/test/py-evm-task-instances.jsonl.all +1 -0
  22. PythonDataset/test/py-trello-task-instances.jsonl.all +1 -0
  23. PythonDataset/test/python-gvm-task-instances.jsonl.all +1 -0
  24. PythonDataset/test/python-slack-sdk-task-instances.jsonl.all +1 -0
  25. PythonDataset/test/python-sshpubkeys-task-instances.jsonl.all +1 -0
  26. PythonDataset/test/quality-time-task-instances.jsonl.all +0 -0
  27. PythonDataset/test/respa-task-instances.jsonl.all +1 -0
  28. PythonDataset/test/ripe-atlas-sagan-task-instances.jsonl.all +1 -0
  29. PythonDataset/test/scrapy-pagestorage-task-instances.jsonl.all +1 -0
  30. PythonDataset/test/seed-task-instances.jsonl.all +0 -0
  31. PythonDataset/test/sleap-task-instances.jsonl.all +1 -0
  32. PythonDataset/test/spruned-task-instances.jsonl.all +0 -0
  33. PythonDataset/test/teuthology-task-instances.jsonl.all +1 -0
  34. PythonDataset/test/translate-python-task-instances.jsonl.all +1 -0
  35. PythonDataset/test/wampy-task-instances.jsonl.all +1 -0
  36. PythonDataset/train/DingoLingo-task-instances.jsonl.all +1 -0
  37. PythonDataset/train/Mathics-task-instances.jsonl.all +1 -0
  38. PythonDataset/train/ProjectAlice-task-instances.jsonl.all +0 -0
  39. PythonDataset/train/RESTKnot-task-instances.jsonl.all +1 -0
  40. PythonDataset/train/SempoBlockchain-task-instances.jsonl.all +1 -0
  41. PythonDataset/train/SpockBot-task-instances.jsonl.all +1 -0
  42. PythonDataset/train/TheOrgBook-task-instances.jsonl.all +1 -0
  43. PythonDataset/train/bert_score-task-instances.jsonl.all +1 -0
  44. PythonDataset/train/combine-task-instances.jsonl.all +1 -0
  45. PythonDataset/train/ctlearn-task-instances.jsonl.all +0 -0
  46. PythonDataset/train/cwltool-task-instances.jsonl.all +0 -0
  47. PythonDataset/train/discogs_client-task-instances.jsonl.all +1 -0
  48. PythonDataset/train/django-rq-task-instances.jsonl.all +1 -0
  49. PythonDataset/train/fcn-task-instances.jsonl.all +1 -0
  50. PythonDataset/train/flake8-todo-task-instances.jsonl.all +1 -0
PythonDataset/dev/eNMS-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "eNMS-automation/eNMS", "pull_number": 12, "instance_id": "eNMS-automation__eNMS-12", "issue_numbers": "", "base_commit": "4420e32e0e593b9bd1f904fa467e467ad1c5a149", "patch": "diff --git a/models.py b/models.py\n--- a/models.py\n+++ b/models.py\n@@ -1,4 +1,4 @@\n-from napalm_base import get_network_driver\n+from napalm import get_network_driver\n from netmiko import ConnectHandler\n from sqlalchemy.ext.declarative import declarative_base\n from sqlalchemy import Column, Integer, String\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2017-11-30T20:49:06Z"}
PythonDataset/dev/flask-github-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "jarodl/flask-github", "pull_number": 3, "instance_id": "jarodl__flask-github-3", "issue_numbers": "", "base_commit": "985887fd227a07c424c2f953dc2d285bcc67b0ab", "patch": "diff --git a/flaskext/github.py b/flask_github.py\nsimilarity index 81%\nrename from flaskext/github.py\nrename to flask_github.py\n--- a/flaskext/github.py\n+++ b/flask_github.py\n@@ -5,14 +5,14 @@\n \n Authenticate users in your Flask app with Github.\n \"\"\"\n-import json\n-import oauth2\n-from httplib2 import Http\n from functools import wraps\n from urllib import urlencode\n from urlparse import parse_qs\n+\n+import requests\n from flask import redirect, request\n \n+\n class GithubAuth(object):\n \"\"\"\n Provides decorators for authenticating users with Github within a Flask\n@@ -37,6 +37,7 @@ def __init__(self, client_id, client_secret, session_key):\n self.get_access_token = lambda: None\n self.base_url = 'https://api.github.com/'\n self.base_auth_url = 'https://github.com/login/oauth/'\n+ self.session = requests.session()\n \n def access_token_getter(self, f):\n \"\"\"\n@@ -62,25 +63,26 @@ def authorize(self, callback_url=None, scope=None):\n auth_url = self.base_auth_url + 'authorize?' + urlencode(params)\n return redirect(auth_url)\n \n- def raw_request(self, base_url, resource, params, method, accept='json'):\n+ def raw_request(self, base_url, resource, params, method,\n+ access_token=None):\n \"\"\"\n Makes a raw HTTP request and returns the response and content.\n \"\"\"\n- http = Http(disable_ssl_certificate_validation=True)\n- params.update({'access_token': self.get_access_token()})\n- headers = {\n- \"Content-type\": \"application/x-www-form-urlencoded\",\n- \"Accept\": accept\n- }\n- url = base_url + resource + '?' + urlencode(params)\n- resp, content = http.request(url, method)\n- return resp, content\n+ url = base_url + resource\n+ if params is None:\n+ params = {}\n+ if access_token is None:\n+ access_token = self.get_access_token()\n+ params.update({'access_token': access_token})\n+ return self.session.request(method, url, params)\n \n- def get_resource(self, resource, params={}):\n+ def get_resource(self, resource, params=None, access_token=None):\n \"\"\"\n Makes a raw HTTP GET request and returns the response and content.\n \"\"\"\n- return self.raw_request(self.base_url, resource, params, \"GET\")\n+ response = self.raw_request(\n+ self.base_url, resource, params, \"GET\", access_token)\n+ return response, response.json()\n \n def handle_response(self):\n \"\"\"\n@@ -94,18 +96,16 @@ def handle_response(self):\n 'client_id': self.client_id,\n 'client_secret': self.client_secret\n }\n- resp, content = self.raw_request(self.base_auth_url, 'access_token',\n- params, \"POST\")\n- data = parse_qs(content)\n+ response = self.raw_request(\n+ self.base_auth_url, 'access_token', params, \"POST\")\n+ data = parse_qs(response.content)\n for k, v in data.items():\n if len(v) == 1:\n data[k] = v[0]\n return data\n \n def handle_invalid_response(self):\n- \"\"\"\n- \"\"\"\n- return None\n+ pass\n \n def authorized_handler(self, f):\n \"\"\"\n@@ -136,8 +136,7 @@ def get_github_user(self):\n Requests the authenticated user's data from Github.\n \"\"\"\n path = 'user'\n- resp, content = self.get_resource(path)\n- user = json.loads(content)\n+ resp, user = self.get_resource(path)\n return user\n \n def has_org_access(self, organization):\n@@ -146,8 +145,7 @@ def has_org_access(self, organization):\n organization.\n \"\"\"\n path = 'orgs/' + organization + '/members'\n- resp, content = self.get_resource(path)\n- org_members = json.loads(content)\n+ resp, org_members = self.get_resource(path)\n user = self.github_user()\n for member in org_members:\n if member['login'] == user['login']:\ndiff --git a/flaskext/__init__.py b/flaskext/__init__.py\ndeleted file mode 100644\n--- a/flaskext/__init__.py\n+++ /dev/null\n@@ -1,2 +0,0 @@\n-__import__('pkg_resources').declare_namespace(__name__)\n-\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,13 +16,13 @@\n author_email='jarodluebbert@gmail.com',\n description='Adds support for authorizing users with Github to Flask',\n long_description=__doc__,\n- packages=['flaskext'],\n- namespace_packages=['flaskext'],\n+ py_modules=['flask_github'],\n zip_safe=False,\n+ include_package_data=True,\n platforms='any',\n install_requires=[\n 'Flask',\n- 'oauth2'\n+ 'requests',\n ],\n classifiers=[\n 'Environment :: Web Environment',\n@@ -33,4 +33,3 @@\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ]\n )\n-\n", "test_patch": "diff --git a/tests.py b/tests.py\ndeleted file mode 100644\n", "problem_statement": "", "hints_text": "", "created_at": "2013-05-26T10:44:46Z"}
PythonDataset/dev/glue-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "glue-viz/glue", "pull_number": 99, "instance_id": "glue-viz__glue-99", "issue_numbers": "", "base_commit": "8b026e8681873d0b4408eba96360576ef37b56a9", "patch": "diff --git a/glue/core/coordinates.py b/glue/core/coordinates.py\n--- a/glue/core/coordinates.py\n+++ b/glue/core/coordinates.py\n@@ -1,10 +1,7 @@\n import logging\n \n-import pywcs\n import numpy as np\n \n-import pyfits.core\n-\n __all__ = ['Coordinates', 'WCSCoordinates', 'WCSCube']\n \n \n@@ -43,12 +40,14 @@ class WCSCoordinates(Coordinates):\n def __init__(self, header):\n super(WCSCoordinates, self).__init__()\n self._header = header\n- self._wcs = pywcs.WCS(header)\n+ from astropy import wcs\n+ self._wcs = wcs.WCS(header)\n \n def __setstate__(self, state):\n self.__dict__ = state\n # wcs object doesn't seem to unpickle properly. reconstruct it\n- self._wcs = pywcs.WCS(self._header)\n+ from astropy import wcs\n+ self._wcs = wcs.WCS(self._header)\n \n def pixel2world(self, xpix, ypix):\n '''\n@@ -145,8 +144,9 @@ class WCSCubeCoordinates(WCSCoordinates):\n \n def __init__(self, header):\n super(WCSCubeCoordinates, self).__init__(header)\n- if not isinstance(header, pyfits.core.Header):\n- raise TypeError(\"Header must by a pyfits header instance\")\n+ from astropy.io import fits\n+ if not isinstance(header, fits.Header):\n+ raise TypeError(\"Header must by an astropy.io.fits.Header instance\")\n \n if 'NAXIS' not in header or header['NAXIS'] != 3:\n raise AttributeError(\"Header must describe a 3D array\")\n@@ -173,10 +173,12 @@ def __init__(self, header):\n \"%s = %s\" %\n (k, header[k]))\n self._fix_header_for_2d()\n- self._wcs = pywcs.WCS(header)\n+\n+ from astropy import wcs\n+ self._wcs = wcs.WCS(header)\n \n def _fix_header_for_2d(self):\n- #workaround for pywcs -- need to remove 3D header keywords\n+ #workaround for astropy.wcs -- need to remove 3D header keywords\n self._header['NAXIS'] = 2\n for tag in ['NAXIS3', 'CDELT3', 'CD3_3', 'CRPIX3', 'CRVAL3', 'CTYPE3']:\n if tag in self._header:\n@@ -208,7 +210,7 @@ def coordinates_from_header(header):\n \"\"\" Convert a FITS header into a glue Coordinates object\n \n :param header: Header to convert\n- :type header: :class:`pyfits.Header`\n+ :type header: :class:`astropy.io.fits.Header`\n \n :rtype: :class:`~glue.core.coordinates.Coordinates`\n \"\"\"\ndiff --git a/glue/core/data.py b/glue/core/data.py\n--- a/glue/core/data.py\n+++ b/glue/core/data.py\n@@ -2,7 +2,6 @@\n import logging\n \n import numpy as np\n-import pyfits\n \n from .io import extract_data_fits, extract_data_hdf5\n from .coordinates import Coordinates, coordinates_from_header\n@@ -673,8 +672,9 @@ def read_data(self, filename, format='auto', **kwargs):\n \n # Read in the data\n if format in ['fits', 'fit']:\n+ from astropy.io import fits\n arrays = extract_data_fits(filename, **kwargs)\n- header = pyfits.open(filename, memmap=True)[0].header\n+ header = fits.open(filename, memmap=True)[0].header\n self.coords = coordinates_from_header(header)\n elif format in ['hdf', 'hdf5', 'h5']:\n arrays = extract_data_hdf5(filename, **kwargs)\ndiff --git a/glue/core/io.py b/glue/core/io.py\n--- a/glue/core/io.py\n+++ b/glue/core/io.py\n@@ -7,10 +7,10 @@ def extract_data_fits(filename, use_hdu='all'):\n Exception is raised.\n '''\n \n- import pyfits\n+ from astropy.io import fits\n \n # Read in all HDUs\n- hdulist = pyfits.open(filename, memmap=True)\n+ hdulist = fits.open(filename, memmap=True)\n \n # If only a subset are requested, extract those\n if use_hdu != 'all':\n@@ -18,8 +18,8 @@ def extract_data_fits(filename, use_hdu='all'):\n \n # Now only keep HDUs that are not tables\n for hdu in hdulist:\n- if not isinstance(hdu, pyfits.PrimaryHDU) and \\\n- not isinstance(hdu, pyfits.ImageHDU):\n+ if not isinstance(hdu, fits.PrimaryHDU) and \\\n+ not isinstance(hdu, fits.ImageHDU):\n hdulist.remove(hdu)\n \n # Check that dimensions of all HDU are the same\ndiff --git a/glue/core/subset.py b/glue/core/subset.py\n--- a/glue/core/subset.py\n+++ b/glue/core/subset.py\n@@ -1,6 +1,5 @@\n import operator\n import numpy as np\n-import pyfits\n \n from .visual import VisualAttributes, RED\n from .decorators import memoize_attr_check\n@@ -194,13 +193,15 @@ def write_mask(self, file_name, format=\"fits\"):\n \"\"\"\n mask = np.short(self.to_mask())\n if format == 'fits':\n- pyfits.writeto(file_name, mask, clobber=True)\n+ from astropy.io import fits\n+ fits.writeto(file_name, mask, clobber=True)\n else:\n raise AttributeError(\"format not supported: %s\" % format)\n \n def read_mask(self, file_name):\n try:\n- mask = pyfits.open(file_name)[0].data\n+ from astropy.io import fits\n+ mask = fits.open(file_name)[0].data\n except IOError:\n raise IOError(\"Could not read %s (not a fits file?)\" % file_name)\n ind = np.where(mask.flat)[0]\n", "test_patch": "diff --git a/glue/core/tests/test_coordinates.py b/glue/core/tests/test_coordinates.py\n--- a/glue/core/tests/test_coordinates.py\n+++ b/glue/core/tests/test_coordinates.py\n@@ -1,7 +1,6 @@\n #pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103\n import pytest\n \n-from pyfits import Header, Card\n from mock import patch\n import numpy as np\n from numpy.testing import assert_almost_equal\n@@ -12,7 +11,8 @@\n class TestWcsCoordinates(object):\n \n def default_header(self):\n- hdr = Header()\n+ from astropy.io import fits\n+ hdr = fits.Header()\n hdr.update('NAXIS', 2)\n hdr.update('CRVAL1', 0)\n hdr.update('CRVAL2', 5)\n@@ -235,6 +235,7 @@ def test_attribute_error(self):\n \n \n def header_from_string(string):\n+ from astropy.io import fits\n cards = []\n for s in string.splitlines():\n try:\n@@ -247,8 +248,8 @@ def header_from_string(string):\n pass\n except ValueError:\n continue\n- cards.append(Card(key, value))\n- return Header(cards)\n+ cards.append(fits.Card(key, value))\n+ return fits.Header(cards)\n \n \n @pytest.mark.parametrize(('hdr'), (HDR_2D_VALID, HDR_3D_VALID_NOWCS))\ndiff --git a/glue/core/tests/test_subset.py b/glue/core/tests/test_subset.py\n--- a/glue/core/tests/test_subset.py\n+++ b/glue/core/tests/test_subset.py\n@@ -5,7 +5,6 @@\n import pytest\n import numpy as np\n from mock import MagicMock\n-import pyfits\n \n from ..data import Data, ComponentID, Component\n from ..subset import Subset, SubsetState, ElementSubsetState\n@@ -308,7 +307,8 @@ def setup_method(self, method):\n def test_write(self):\n with tempfile.NamedTemporaryFile() as tmp:\n self.subset.write_mask(tmp.name)\n- data = pyfits.open(tmp.name)[0].data\n+ from astropy.io import fits\n+ data = fits.open(tmp.name)[0].data\n expected = np.array([[0, 1, 1, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n", "problem_statement": "", "hints_text": "", "created_at": "2012-09-23T15:03:37Z"}
PythonDataset/dev/gwells-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "bcgov/gwells", "pull_number": 1031, "instance_id": "bcgov__gwells-1031", "issue_numbers": "", "base_commit": "23f8a27ee1da1673fb8522056ca1d0168fd051c2", "patch": "diff --git a/app/backend/wells/management/__init__.py b/app/backend/wells/management/__init__.py\nnew file mode 100644\ndiff --git a/app/backend/wells/management/commands/__init__.py b/app/backend/wells/management/commands/__init__.py\nnew file mode 100644\ndiff --git a/app/backend/wells/management/commands/export.py b/app/backend/wells/management/commands/export.py\nnew file mode 100644\n--- /dev/null\n+++ b/app/backend/wells/management/commands/export.py\n@@ -0,0 +1,228 @@\n+import csv\n+import zipfile\n+import os\n+import logging\n+import string\n+\n+from django.core.management.base import BaseCommand\n+from django.db import models\n+from django.db import connection\n+\n+from minio import Minio\n+from openpyxl import Workbook\n+\n+from gwells.settings.base import get_env_variable\n+\n+# Run from command line :\n+# python manage.py export\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+class Command(BaseCommand):\n+\n+ def handle(self, *args, **options):\n+ logger.info('starting export')\n+ zip_filename = 'gwells.zip'\n+ spreadsheet_filename = 'gwells.xlsx'\n+ self.generate_files(zip_filename, spreadsheet_filename)\n+ self.upload_files(zip_filename, spreadsheet_filename)\n+ logger.info('cleaning up')\n+ for filename in (zip_filename, spreadsheet_filename):\n+ if os.path.exists(filename):\n+ os.remove(filename)\n+ logger.info('export complete')\n+\n+ def upload_files(self, zip_filename, spreadsheet_filename):\n+ minioClient = Minio(get_env_variable('S3_HOST'),\n+ access_key=get_env_variable('S3_PUBLIC_ACCESS_KEY'),\n+ secret_key=get_env_variable('S3_PUBLIC_SECRET_KEY'),\n+ secure=True)\n+ for filename in (zip_filename, spreadsheet_filename):\n+ logger.info('uploading {}'.format(filename))\n+ with open(filename, 'rb') as file_data:\n+ file_stat = os.stat(filename)\n+ # Do we need to remove the existing files 1st?\n+ minioClient.put_object(get_env_variable('S3_WELL_EXPORT_BUCKET'),\n+ filename,\n+ file_data,\n+ file_stat.st_size)\n+\n+ def export(self, workbook, gwells_zip, worksheet_name, cursor):\n+ logger.info('exporting {}'.format(worksheet_name))\n+ worksheet = workbook.create_sheet(worksheet_name)\n+ csv_file = '{}.csv'.format(worksheet_name)\n+ if os.path.exists(csv_file):\n+ os.remove(csv_file)\n+ with open(csv_file, 'w') as csvfile:\n+ csvwriter = csv.writer(csvfile, dialect='excel')\n+\n+ values = []\n+ # Write the headings\n+ for index, field in enumerate(cursor.description):\n+ values.append(field.name)\n+ worksheet.append(values)\n+ csvwriter.writerow(values)\n+\n+ # Write the values\n+ row_index = 0\n+ for row, record in enumerate(cursor.fetchall()):\n+ values = []\n+ num_values = 0\n+ for col, value in enumerate(record):\n+ if not (value == \"\" or value is None):\n+ num_values += 1\n+ if type(value) is str:\n+ # There are lots of non-printable characters in the source data that can cause\n+ # issues in the export, so we have to clear them out.\n+ v = ''.join([s for s in value if s in string.printable])\n+ # We can't have something starting with an = sign,\n+ # it would be interpreted as a formula in excel.\n+ if v.startswith('='):\n+ v = '\\'{}'.format(v)\n+ values.append(v)\n+ else:\n+ values.append(value)\n+ if num_values > 1:\n+ csvwriter.writerow(values)\n+ worksheet.append(values)\n+ gwells_zip.write(csv_file)\n+ if os.path.exists(csv_file):\n+ os.remove(csv_file)\n+\n+ def generate_files(self, zip_filename, spreadsheet_filename):\n+ #######\n+ # WELL\n+ #######\n+ well_sql = (\"\"\"select well_tag_number, identification_plate_number,\n+ well_identification_plate_attached,\n+ well_status_code, well.well_class_code,\n+ wsc.well_class_code as well_subclass,\n+ intended_water_use_code, licenced_status_code,\n+ observation_well_number, obs_well_status_code, water_supply_system_name,\n+ water_supply_system_well_name,\n+ street_address, city, legal_lot, legal_plan, legal_district_lot, legal_block,\n+ legal_section, legal_township, legal_range,\n+ land_district_code,\n+ legal_pid,\n+ well_location_description,\n+ latitude, longitude, utm_zone_code, utm_northing, utm_easting,\n+ utm_accuracy_code, bcgs_id,\n+ construction_start_date, construction_end_date, alteration_start_date,\n+ alteration_end_date, decommission_start_date, decommission_end_date,\n+ driller_name, consultant_name, consultant_company,\n+ diameter, total_depth_drilled, finished_well_depth, final_casing_stick_up,\n+ bedrock_depth, ground_elevation, ground_elevation_method_code, static_water_level,\n+ well_yield,\n+ well_yield_unit_code,\n+ artesian_flow, artesian_pressure, well_cap_type, well_disinfected,\n+ drilling_method_code, other_drilling_method, well_orientation,\n+ alternative_specs_submitted,\n+ surface_seal_material_code, surface_seal_method_code, surface_seal_length,\n+ backfill_type,\n+ backfill_depth,\n+ liner_material_code, liner_diameter, liner_thickness, surface_seal_thickness,\n+ liner_from, liner_to,\n+ screen_intake_method_code, screen_type_code, screen_material_code,\n+ other_screen_material,\n+ screen_opening_code, screen_bottom_code, other_screen_bottom, development_method_code,\n+ filter_pack_from,\n+ filter_pack_to, filter_pack_material_code,\n+ filter_pack_thickness,\n+ filter_pack_material_size_code,\n+ development_hours, development_notes,\n+ water_quality_colour, water_quality_odour, ems_id,\n+ decommission_reason, decommission_method_code, decommission_details, sealant_material,\n+ backfill_material,\n+ comments, aquifer_id,\n+ drilling_company.drilling_company_code,\n+ ems,\n+ aquifer_id,\n+ registries_person.surname as driller_responsible\n+ from well\n+ left join well_subclass_code as wsc on wsc.well_subclass_guid = well.well_subclass_guid\n+ left join drilling_company on\n+ drilling_company.drilling_company_guid = well.drilling_company_guid\n+ left join registries_person on\n+ registries_person.person_guid = well.driller_responsible_guid\n+ order by well_tag_number\"\"\")\n+ ###########\n+ # LITHOLOGY\n+ ###########\n+ lithology_sql = (\"\"\"select well_tag_number, lithology_from, lithology_to, lithology_raw_data,\n+ ldc.description as lithology_description_code,\n+ lmc.description as lithology_material_code,\n+ lhc.description as lithology_hardness_code,\n+ lcc.description as lithology_colour_code,\n+ water_bearing_estimated_flow,\n+ well_yield_unit_code, lithology_observation\n+ from lithology_description\n+ left join lithology_description_code as ldc on\n+ ldc.lithology_description_code = lithology_description.lithology_description_code\n+ left join lithology_material_code as lmc on\n+ lmc.lithology_material_code = lithology_description.lithology_material_code\n+ left join lithology_hardness_code as lhc on\n+ lhc.lithology_hardness_code = lithology_description.lithology_hardness_code\n+ left join lithology_colour_code as lcc on\n+ lcc.lithology_colour_code = lithology_description.lithology_colour_code\n+ order by well_tag_number\"\"\")\n+ ########\n+ # CASING\n+ ########\n+ casing_sql = (\"\"\"select well_tag_number, casing_from, casing_to, diameter, casing_code,\n+ casing_material_code, wall_thickness, drive_shoe from casing\n+ order by well_tag_number\"\"\")\n+ ########\n+ # SCREEN\n+ ########\n+ screen_sql = (\"\"\"select well_tag_number, screen_from, screen_to, internal_diameter,\n+ screen_assembly_type_code, slot_size from screen\n+ order by well_tag_number\"\"\")\n+ ############\n+ # PRODUCTION\n+ ############\n+ production_sql = (\"\"\"select well_tag_number, yield_estimation_method_code, well_yield_unit_code,\n+ yield_estimation_rate,\n+ yield_estimation_duration, static_level, drawdown,\n+ hydro_fracturing_performed, hydro_fracturing_yield_increase from production_data\n+ order by well_tag_number\"\"\")\n+ ##############\n+ # PERFORATIONS\n+ ##############\n+ perforation_sql = (\"\"\"select well_tag_number, liner_from, liner_to, liner_diameter,\n+ liner_perforation_from, liner_perforation_to, liner_thickness\n+ from\n+ perforation\n+ order by well_tag_number\"\"\")\n+\n+ if os.path.exists(zip_filename):\n+ os.remove(zip_filename)\n+ with zipfile.ZipFile(zip_filename, 'w') as gwells_zip:\n+ if os.path.exists(spreadsheet_filename):\n+ os.remove(spreadsheet_filename)\n+ workbook = Workbook(write_only=True)\n+ # Well\n+ with connection.cursor() as cursor:\n+ cursor.execute(well_sql)\n+ self.export(workbook, gwells_zip, 'well', cursor)\n+ # Lithology\n+ with connection.cursor() as cursor:\n+ cursor.execute(lithology_sql)\n+ self.export(workbook, gwells_zip, 'lithology', cursor)\n+ # Casing\n+ with connection.cursor() as cursor:\n+ cursor.execute(casing_sql)\n+ self.export(workbook, gwells_zip, 'casing', cursor)\n+ # Screen\n+ with connection.cursor() as cursor:\n+ cursor.execute(screen_sql)\n+ self.export(workbook, gwells_zip, 'screen', cursor)\n+ # Production\n+ with connection.cursor() as cursor:\n+ cursor.execute(production_sql)\n+ self.export(workbook, gwells_zip, 'production', cursor)\n+ # Perforation\n+ with connection.cursor() as cursor:\n+ cursor.execute(perforation_sql)\n+ self.export(workbook, gwells_zip, 'perforation', cursor)\n+ workbook.save(filename=spreadsheet_filename)\ndiff --git a/app/backend/wells/urls.py b/app/backend/wells/urls.py\n--- a/app/backend/wells/urls.py\n+++ b/app/backend/wells/urls.py\n@@ -36,6 +36,9 @@\n url(r'^api/v1/wells/(?P<tag>[0-9]+)/files$',\n never_cache(views.ListFiles.as_view()), name='file-list'),\n \n+ # Extract files\n+ url(r'^api/v1/wells/extracts$', views.ListExtracts.as_view(), name='extract-list'),\n+\n # Well list\n url(r'^api/v1/wells/$',\n never_cache(views.WellListAPIView.as_view()), name='well-list'),\ndiff --git a/app/backend/wells/views.py b/app/backend/wells/views.py\n--- a/app/backend/wells/views.py\n+++ b/app/backend/wells/views.py\n@@ -11,6 +11,7 @@\n See the License for the specific language governing permissions and\n limitations under the License.\n \"\"\"\n+from urllib.parse import quote\n \n from django.db.models import Prefetch\n from django.http import Http404\n@@ -24,10 +25,13 @@\n \n from drf_yasg.utils import swagger_auto_schema\n \n+from minio import Minio\n+\n from gwells import settings\n from gwells.models import Survey\n from gwells.roles import WELLS_VIEWER_ROLE, WELLS_EDIT_ROLE\n from gwells.pagination import APILimitOffsetPagination\n+from gwells.settings.base import get_env_variable\n \n from wells.models import Well\n from wells.documents import MinioClient\n@@ -65,6 +69,45 @@ def get_serializer(self, data):\n lookup_field = 'well_tag_number'\n \n \n+class ListExtracts(APIView):\n+ \"\"\"\n+ List well extracts\n+\n+ get: list well extracts\n+ \"\"\"\n+ @swagger_auto_schema(auto_schema=None)\n+ def get(self, request):\n+ host = get_env_variable('S3_HOST')\n+ minioClient = Minio(host,\n+ access_key=get_env_variable('S3_PUBLIC_ACCESS_KEY'),\n+ secret_key=get_env_variable('S3_PUBLIC_SECRET_KEY'),\n+ secure=True)\n+ objects = minioClient.list_objects(get_env_variable('S3_WELL_EXPORT_BUCKET'))\n+ urls = list(\n+ map(\n+ lambda document: {\n+ 'url': 'https://{}/{}/{}'.format(host,\n+ quote(document.bucket_name),\n+ quote(document.object_name)),\n+ 'name': document.object_name,\n+ 'size': document.size,\n+ 'last_modified': document.last_modified,\n+ 'description': self.create_description(document.object_name)\n+ }, objects)\n+ )\n+ return Response(urls)\n+\n+ def create_description(self, name):\n+ extension = name[name.rfind('.')+1:]\n+ print(extension)\n+ if extension == 'zip':\n+ return 'ZIP, CSV'\n+ elif extension == 'xlsx':\n+ return 'XLSX'\n+ else:\n+ return None\n+\n+\n class ListFiles(APIView):\n \"\"\"\n List documents associated with a well (e.g. well construction report)\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2018-11-20T23:25:57Z"}
PythonDataset/dev/inky-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "pimoroni/inky", "pull_number": 24, "instance_id": "pimoroni__inky-24", "issue_numbers": "", "base_commit": "5f70ca8db69d8bcea79cb5faf1f0d59f358d555e", "patch": "diff --git a/library/inky/eeprom.py b/library/inky/eeprom.py\n--- a/library/inky/eeprom.py\n+++ b/library/inky/eeprom.py\n@@ -4,7 +4,7 @@\n \n import datetime\n import struct\n-import smbus\n+from smbus2 import SMBus\n \n EEP_ADRESS = 0x50\n EEP_WP = 12\n@@ -95,7 +95,7 @@ def get_color(self):\n def read_eeprom():\n \"\"\"Return a class representing EEPROM contents, or none.\"\"\"\n try:\n- i2c = smbus.SMBus(1)\n+ i2c = SMBus(1)\n i2c.write_i2c_block_data(EEP_ADRESS, 0x00, [0x00])\n return EPDType.from_bytes(i2c.read_i2c_block_data(0x50, 0, 29))\n except IOError:\ndiff --git a/library/inky/inky.py b/library/inky/inky.py\n--- a/library/inky/inky.py\n+++ b/library/inky/inky.py\n@@ -13,7 +13,7 @@\n sys.exit('This library requires the RPi.GPIO module\\nInstall with: sudo apt install python-rpi.gpio')\n \n try:\n- import smbus\n+ from smbus2 import SMBus\n except ImportError:\n sys.exit('This library requires the SMBus module\\nInstall with: sudo apt install python-smbus')\n \ndiff --git a/library/setup.py b/library/setup.py\n--- a/library/setup.py\n+++ b/library/setup.py\n@@ -53,5 +53,5 @@\n py_modules=[],\n packages=['inky'],\n include_package_data=True,\n- install_requires=['numpy', 'spidev', 'RPi.GPIO']\n+ install_requires=['numpy', 'spidev', 'RPi.GPIO', 'smbus2']\n )\n", "test_patch": "diff --git a/library/tests/test_init.py b/library/tests/test_init.py\n--- a/library/tests/test_init.py\n+++ b/library/tests/test_init.py\n@@ -9,8 +9,8 @@ def mockery():\n sys.modules['RPi'] = mock.Mock()\n sys.modules['RPi.GPIO'] = mock.Mock()\n sys.modules['spidev'] = mock.Mock()\n- sys.modules['smbus'] = mock.Mock()\n- sys.modules['smbus'].SMBus = MockSMBus\n+ sys.modules['smbus2'] = mock.Mock()\n+ sys.modules['smbus2'].SMBus = MockSMBus\n \n \n def test_init_phat_black():\n", "problem_statement": "", "hints_text": "", "created_at": "2019-03-03T02:30:46Z"}
PythonDataset/dev/kinto-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "Kinto/kinto", "pull_number": 2737, "instance_id": "Kinto__kinto-2737", "issue_numbers": "", "base_commit": "6102cf98e3246b44532432df61275858653d9323", "patch": "diff --git a/kinto/core/initialization.py b/kinto/core/initialization.py\n--- a/kinto/core/initialization.py\n+++ b/kinto/core/initialization.py\n@@ -47,11 +47,11 @@ def setup_json_serializer(config):\n import requests\n import webob\n \n- # Monkey patch to use ujson\n+ # Monkey patch to use rapidjson\n webob.request.json = utils.json\n requests.models.json = utils.json\n \n- # Override json renderer using ujson\n+ # Override json renderer using rapidjson\n renderer = JSONRenderer(serializer=utils.json_serializer)\n config.add_renderer(\"ultrajson\", renderer) # See `kinto.core.Service`\n \ndiff --git a/kinto/core/utils.py b/kinto/core/utils.py\n--- a/kinto/core/utils.py\n+++ b/kinto/core/utils.py\n@@ -11,7 +11,7 @@\n from urllib.parse import unquote\n \n import jsonpatch\n-import ujson as json\n+import rapidjson\n from colander import null\n from cornice import cors\n from pyramid import httpexceptions\n@@ -32,8 +32,21 @@\n memcache = None\n \n \n-def json_serializer(v, **kw):\n- return json.dumps(v, escape_forward_slashes=False)\n+class json:\n+ def dumps(v, **kw):\n+ kw.setdefault(\"bytes_mode\", rapidjson.BM_NONE)\n+ return rapidjson.dumps(v, **kw)\n+\n+ def load(v, **kw):\n+ kw.setdefault(\"number_mode\", rapidjson.NM_NATIVE)\n+ return rapidjson.load(v, **kw)\n+\n+ def loads(v, **kw):\n+ kw.setdefault(\"number_mode\", rapidjson.NM_NATIVE)\n+ return rapidjson.loads(v, **kw)\n+\n+\n+json_serializer = json.dumps\n \n \n def strip_whitespace(v):\ndiff --git a/kinto/plugins/quotas/utils.py b/kinto/plugins/quotas/utils.py\n--- a/kinto/plugins/quotas/utils.py\n+++ b/kinto/plugins/quotas/utils.py\n@@ -2,6 +2,6 @@\n \n \n def record_size(record):\n- # We cannot use ultrajson here, since the `separator` option is not available.\n+ # We cannot use rapidjson here, since the `separator` option is not available.\n canonical_json = json.dumps(record, sort_keys=True, separators=(\",\", \":\"))\n return len(canonical_json)\n", "test_patch": "diff --git a/tests/test_views_schema_record.py b/tests/test_views_schema_record.py\n--- a/tests/test_views_schema_record.py\n+++ b/tests/test_views_schema_record.py\n@@ -385,3 +385,27 @@ def setUp(self):\n \n def test_records_are_valid_if_match_schema(self):\n self.app.post_json(RECORDS_URL, {\"data\": {\"title\": \"b\"}}, headers=self.headers, status=400)\n+\n+\n+class RecordsWithLargeNumbers(BaseWebTestWithSchema, unittest.TestCase):\n+ def setUp(self):\n+ super().setUp()\n+ self.app.put_json(COLLECTION_URL, {\"data\": {\"schema\": SCHEMA}}, headers=self.headers)\n+\n+ def test_record_with_number_less_than_64_bits(self):\n+ size = 2 ** 63\n+ self.app.post_json(\n+ RECORDS_URL,\n+ {\"data\": {\"title\": \"Very large file\", \"file\": {\"size\": size}}},\n+ headers=self.headers,\n+ status=201,\n+ )\n+\n+ def test_record_with_number_greater_than_64_bits(self):\n+ size = 2 ** 65\n+ self.app.post_json(\n+ RECORDS_URL,\n+ {\"data\": {\"title\": \"Very large file\", \"file\": {\"size\": size}}},\n+ headers=self.headers,\n+ status=201,\n+ )\n", "problem_statement": "", "hints_text": "", "created_at": "2021-03-04T16:32:35Z"}
PythonDataset/test/NeMo-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "NVIDIA/NeMo", "pull_number": 162, "instance_id": "NVIDIA__NeMo-162", "issue_numbers": "", "base_commit": "f5f09838b96ab48f40d97c100fbcfc5b7f1ac59e", "patch": "diff --git a/collections/nemo_nlp/nemo_nlp/data/data_layers.py b/collections/nemo_nlp/nemo_nlp/data/data_layers.py\n--- a/collections/nemo_nlp/nemo_nlp/data/data_layers.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/data_layers.py\n@@ -683,9 +683,9 @@ def _collate_fn(self, x):\n [np.stack(x, axis=0) for x in components]\n src_ids = torch.Tensor(src_ids).long().to(self._device)\n src_segment_ids = torch.Tensor(src_segment_ids).long().to(self._device)\n- src_mask = torch.Tensor(src_mask).float().to(self._device)\n+ src_mask = torch.Tensor(src_mask).long().to(self._device)\n tgt_ids = torch.Tensor(tgt_ids).long().to(self._device)\n- tgt_mask = torch.Tensor(tgt_mask).float().to(self._device)\n+ tgt_mask = torch.Tensor(tgt_mask).long().to(self._device)\n sent_ids = torch.Tensor(sent_ids).long().to(self._device)\n return src_ids, src_segment_ids, src_mask, tgt_ids, tgt_mask, sent_ids\n \ndiff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/bert_pretraining.py b/collections/nemo_nlp/nemo_nlp/data/datasets/bert_pretraining.py\n--- a/collections/nemo_nlp/nemo_nlp/data/datasets/bert_pretraining.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/datasets/bert_pretraining.py\n@@ -249,7 +249,7 @@ def truncate_seq_pair(a, b, max_num_tokens):\n \n input_ids, output_mask = self.mask_ids(output_ids)\n \n- input_mask = np.zeros(self.max_seq_length, dtype=np.float32)\n+ input_mask = np.zeros(self.max_seq_length, dtype=np.long)\n input_mask[:len(input_ids)] = 1\n \n input_type_ids = np.zeros(self.max_seq_length, dtype=np.int)\n@@ -263,7 +263,7 @@ def truncate_seq_pair(a, b, max_num_tokens):\n \n # TODO: wrap the return value with () for consistent style.\n return np.array(input_ids), input_type_ids,\\\n- np.array(input_mask, dtype=np.float32), np.array(output_ids),\\\n+ np.array(input_mask, dtype=np.long), np.array(output_ids),\\\n np.array(output_mask, dtype=np.float32), is_next\n \n def mask_ids(self, ids):\ndiff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/glue.py b/collections/nemo_nlp/nemo_nlp/data/datasets/glue.py\n--- a/collections/nemo_nlp/nemo_nlp/data/datasets/glue.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/datasets/glue.py\n@@ -55,7 +55,7 @@ def __getitem__(self, idx):\n feature = self.features[idx]\n return (np.array(feature.input_ids),\n np.array(feature.segment_ids),\n- np.array(feature.input_mask, dtype=np.float32),\n+ np.array(feature.input_mask, dtype=np.long),\n np.array(feature.label_id))\n \n \ndiff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/joint_intent_slot.py b/collections/nemo_nlp/nemo_nlp/data/datasets/joint_intent_slot.py\n--- a/collections/nemo_nlp/nemo_nlp/data/datasets/joint_intent_slot.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/datasets/joint_intent_slot.py\n@@ -214,7 +214,7 @@ def __len__(self):\n def __getitem__(self, idx):\n return (np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n- np.array(self.all_input_mask[idx]),\n+ np.array(self.all_input_mask[idx], dtype=np.long),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]),\n self.all_intents[idx],\n@@ -263,6 +263,6 @@ def __len__(self):\n def __getitem__(self, idx):\n return (np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n- np.array(self.all_input_mask[idx], dtype=np.float32),\n+ np.array(self.all_input_mask[idx], dtype=np.long),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]))\ndiff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/punctuation_capitalization.py b/collections/nemo_nlp/nemo_nlp/data/datasets/punctuation_capitalization.py\n--- a/collections/nemo_nlp/nemo_nlp/data/datasets/punctuation_capitalization.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/datasets/punctuation_capitalization.py\n@@ -386,7 +386,7 @@ def __len__(self):\n def __getitem__(self, idx):\n return (np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n- np.array(self.all_input_mask[idx], dtype=np.float32),\n+ np.array(self.all_input_mask[idx], dtype=np.long),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]),\n np.array(self.punct_all_labels[idx]),\ndiff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/sentence_classification.py b/collections/nemo_nlp/nemo_nlp/data/datasets/sentence_classification.py\n--- a/collections/nemo_nlp/nemo_nlp/data/datasets/sentence_classification.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/datasets/sentence_classification.py\n@@ -115,7 +115,7 @@ def __getitem__(self, idx):\n \n return (np.array(feature.input_ids),\n np.array(feature.segment_ids),\n- np.array(feature.input_mask, dtype=np.float32),\n+ np.array(feature.input_mask, dtype=np.long),\n feature.sent_label)\n \n def convert_sequences_to_features(self,\ndiff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/token_classification.py b/collections/nemo_nlp/nemo_nlp/data/datasets/token_classification.py\n--- a/collections/nemo_nlp/nemo_nlp/data/datasets/token_classification.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/datasets/token_classification.py\n@@ -333,7 +333,7 @@ def __len__(self):\n def __getitem__(self, idx):\n return (np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n- np.array(self.all_input_mask[idx], dtype=np.float32),\n+ np.array(self.all_input_mask[idx], dtype=np.long),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]),\n np.array(self.all_labels[idx]))\n@@ -377,6 +377,6 @@ def __len__(self):\n def __getitem__(self, idx):\n return (np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n- np.array(self.all_input_mask[idx], dtype=np.float32),\n+ np.array(self.all_input_mask[idx], dtype=np.long),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]))\ndiff --git a/collections/nemo_nlp/nemo_nlp/data/tokenizers/bert_tokenizer.py b/collections/nemo_nlp/nemo_nlp/data/tokenizers/bert_tokenizer.py\n--- a/collections/nemo_nlp/nemo_nlp/data/tokenizers/bert_tokenizer.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/tokenizers/bert_tokenizer.py\n@@ -1,5 +1,5 @@\n from .tokenizer_spec import TokenizerSpec\n-from pytorch_transformers import BertTokenizer\n+from transformers import BertTokenizer\n import re\n \n \ndiff --git a/collections/nemo_nlp/nemo_nlp/data/tokenizers/gpt2_tokenizer.py b/collections/nemo_nlp/nemo_nlp/data/tokenizers/gpt2_tokenizer.py\n--- a/collections/nemo_nlp/nemo_nlp/data/tokenizers/gpt2_tokenizer.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/tokenizers/gpt2_tokenizer.py\n@@ -1,5 +1,5 @@\n from .tokenizer_spec import TokenizerSpec\n-from pytorch_transformers import GPT2Tokenizer\n+from transformers import GPT2Tokenizer\n \n \n class NemoGPT2Tokenizer(TokenizerSpec):\ndiff --git a/collections/nemo_nlp/nemo_nlp/huggingface/bert.py b/collections/nemo_nlp/nemo_nlp/huggingface/bert.py\n--- a/collections/nemo_nlp/nemo_nlp/huggingface/bert.py\n+++ b/collections/nemo_nlp/nemo_nlp/huggingface/bert.py\n@@ -1,10 +1,10 @@\n # Copyright (c) 2019 NVIDIA Corporation\n from typing import Optional, List\n \n-from pytorch_transformers import (BertConfig,\n- BertModel,\n- BERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n- BERT_PRETRAINED_CONFIG_ARCHIVE_MAP)\n+from transformers import (BertConfig,\n+ BertModel,\n+ BERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n+ BERT_PRETRAINED_CONFIG_ARCHIVE_MAP)\n \n from nemo.backends.pytorch.nm import TrainableNM\n from nemo.core.neural_modules import PretrainedModelInfo\n@@ -18,7 +18,7 @@\n class BERT(TrainableNM):\n \"\"\"\n BERT wraps around the Huggingface implementation of BERT from their\n- pytorch-transformers repository for easy use within NeMo.\n+ transformers repository for easy use within NeMo.\n \n Args:\n pretrained_model_name (str): If using a pretrained model, this should\ndiff --git a/collections/nemo_nlp/setup.py b/collections/nemo_nlp/setup.py\n--- a/collections/nemo_nlp/setup.py\n+++ b/collections/nemo_nlp/setup.py\n@@ -25,7 +25,7 @@\n 'python-dateutil<2.8.1,>=2.1',\n 'boto3',\n 'unidecode',\n- 'pytorch-transformers',\n+ 'transformers',\n 'matplotlib',\n 'h5py',\n 'youtokentome'\ndiff --git a/examples/nlp/joint_intent_slot_infer.py b/examples/nlp/joint_intent_slot_infer.py\n--- a/examples/nlp/joint_intent_slot_infer.py\n+++ b/examples/nlp/joint_intent_slot_infer.py\n@@ -2,7 +2,7 @@\n import os\n \n import numpy as np\n-from pytorch_transformers import BertTokenizer\n+from transformers import BertTokenizer\n from sklearn.metrics import confusion_matrix, classification_report\n \n import nemo\ndiff --git a/examples/nlp/joint_intent_slot_infer_b1.py b/examples/nlp/joint_intent_slot_infer_b1.py\n--- a/examples/nlp/joint_intent_slot_infer_b1.py\n+++ b/examples/nlp/joint_intent_slot_infer_b1.py\n@@ -1,7 +1,7 @@\n import argparse\n \n import numpy as np\n-from pytorch_transformers import BertTokenizer\n+from transformers import BertTokenizer\n \n import nemo\n import nemo_nlp\ndiff --git a/examples/nlp/joint_intent_slot_with_bert.py b/examples/nlp/joint_intent_slot_with_bert.py\n--- a/examples/nlp/joint_intent_slot_with_bert.py\n+++ b/examples/nlp/joint_intent_slot_with_bert.py\n@@ -3,7 +3,7 @@\n import os\n \n import numpy as np\n-from pytorch_transformers import BertTokenizer\n+from transformers import BertTokenizer\n \n import nemo\n from nemo.utils.lr_policies import get_lr_policy\ndiff --git a/examples/nlp/sentence_classification_with_bert.py b/examples/nlp/sentence_classification_with_bert.py\n--- a/examples/nlp/sentence_classification_with_bert.py\n+++ b/examples/nlp/sentence_classification_with_bert.py\n@@ -2,7 +2,7 @@\n import math\n \n import numpy as np\n-from pytorch_transformers import BertTokenizer\n+from transformers import BertTokenizer\n from torch import nn\n import torch\n \ndiff --git a/nemo/nemo/backends/pytorch/nm.py b/nemo/nemo/backends/pytorch/nm.py\n--- a/nemo/nemo/backends/pytorch/nm.py\n+++ b/nemo/nemo/backends/pytorch/nm.py\n@@ -36,7 +36,7 @@ def __init__(self, **kwargs):\n nn.Module.__init__(self) # For PyTorch API\n self._device = get_cuda_device(self.placement)\n \n- def __call__(self, force_pt=False, *input, **kwargs):\n+ def __call__(self, *input, force_pt=False, **kwargs):\n pt_call = len(input) > 0 or force_pt\n if pt_call:\n return nn.Module.__call__(self, *input, **kwargs)\ndiff --git a/scripts/get_decoder_params_from_bert.py b/scripts/get_decoder_params_from_bert.py\n--- a/scripts/get_decoder_params_from_bert.py\n+++ b/scripts/get_decoder_params_from_bert.py\n@@ -1,6 +1,6 @@\n import torch\n-from pytorch_transformers import BERT_PRETRAINED_MODEL_ARCHIVE_MAP\n-from pytorch_transformers.file_utils import cached_path\n+from transformers import BERT_PRETRAINED_MODEL_ARCHIVE_MAP\n+from transformers.file_utils import cached_path\n import argparse\n \n state_dict_mappings = {\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2019-12-03T01:19:14Z"}
PythonDataset/test/PypeIt-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "pypeit/PypeIt", "pull_number": 1121, "instance_id": "pypeit__PypeIt-1121", "issue_numbers": "", "base_commit": "c755167f5b1b09fdffa8b22ebdcccc6521fdd564", "patch": "diff --git a/pypeit/datamodel.py b/pypeit/datamodel.py\n--- a/pypeit/datamodel.py\n+++ b/pypeit/datamodel.py\n@@ -946,7 +946,7 @@ def _parse(cls, hdu, ext=None, transpose_table_arrays=False, hdu_prefix=None):\n dm_version_passed &= hdu[hduindx].header['DMODVER'] == cls.version\n # Grab it\n _d[e] = _hdu[hduindx].data if isinstance(hdu[hduindx], fits.ImageHDU) \\\n- else Table.read(hdu[hduindx])\n+ else Table.read(hdu[hduindx]).copy()\n \n for e in _ext:\n if 'DMODCLS' not in _hdu[e].header.keys() or 'DMODVER' not in _hdu[e].header.keys() \\\ndiff --git a/pypeit/scripts/run_pypeit.py b/pypeit/scripts/run_pypeit.py\n--- a/pypeit/scripts/run_pypeit.py\n+++ b/pypeit/scripts/run_pypeit.py\n@@ -117,6 +117,7 @@ def main(args):\n # QA HTML\n msgs.info('Generating QA HTML')\n pypeIt.build_qa()\n+ msgs.close()\n \n return 0\n \ndiff --git a/pypeit/scripts/show_1dspec.py b/pypeit/scripts/show_1dspec.py\n--- a/pypeit/scripts/show_1dspec.py\n+++ b/pypeit/scripts/show_1dspec.py\n@@ -28,7 +28,7 @@ def main(args):\n import sys\n import numpy as np\n \n- from PySide2.QtWidgets import QApplication\n+ from qtpy.QtWidgets import QApplication\n \n from linetools.guis.xspecgui import XSpecGui\n \n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2021-01-07T20:52:53Z"}
PythonDataset/test/Userge-Plugins-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "UsergeTeam/Userge-Plugins", "pull_number": 186, "instance_id": "UsergeTeam__Userge-Plugins-186", "issue_numbers": "", "base_commit": "1969de7afbe06cc3b0248b09f48fe9f85ffef07c", "patch": "diff --git a/plugins/info.py b/plugins/info.py\n--- a/plugins/info.py\n+++ b/plugins/info.py\n@@ -3,7 +3,8 @@\n # By @Krishna_Singhal\n \n import spamwatch\n-import requests\n+import aiohttp\n+import json\n from datetime import datetime\n \n from userge import userge, Config, Message, get_collection\n@@ -55,27 +56,41 @@ async def info(msg: Message):\n user_info += \"\\n**SpamWatch Banned** : `False`\\n\"\n else:\n user_info += \"\\n**SpamWatch Banned** : `True`\\n\"\n- user_info += f\"**\u2022Reason** : `{status.reason or None}`\\n\"\n- user_info += f\"**\u2022Message** : `{status.message or None}`\\n\"\n+ user_info += f\" **\u25cf Reason** : `{status.reason or None}`\\n\"\n+ user_info += f\" **\u25cf Message** : `{status.message or None}`\\n\"\n else:\n- user_info += \"\\n**SpamWatch Banned** : `To get this Info, Set Var`\\n\"\n- cas_banned = requests.get(f'https://api.cas.chat/check?user_id={user.id}').json()\n+ user_info += \"\\n**SpamWatch Banned** : `to get this Info, set var`\\n\"\n+\n+ async with aiohttp.ClientSession() as ses:\n+ async with ses.get(\n+ f\"https://api.intellivoid.net/spamprotection/v1/lookup?query={user_id}\"\n+ ) as i_v:\n+ iv = json.loads(await i_v.text)\n+ async with ses.get(f'https://api.cas.chat/check?user_id={user.id}') as c_s:\n+ cas_banned = json.loads(await c_s.text)\n+ user_gbanned = await GBAN_USER_BASE.find_one({'user_id': user.id})\n+ user_gmuted = await GMUTE_USER_BASE.find_one({'user_id': user.id})\n+\n+ if iv['success'] and iv['results']['attributes']['is_blacklisted'] is True:\n+ reason = iv['results']['attributes']['blacklist_reason']\n+ user_info += \"**Intellivoid SpamProtection** : `True`\\n\"\n+ user_info += f\" **\u25cf Reason** : `{reason}`\\n\"\n+ else:\n+ user_info += \"**Intellivoid SpamProtection** : `False`\\n\"\n if cas_banned['ok']:\n reason = cas_banned['result']['messages'][0] or None\n user_info += \"**AntiSpam Banned** : `True`\\n\"\n- user_info += f\"**\u2022Reason** : `{reason}`\\n\"\n+ user_info += f\" **\u25cf Reason** : `{reason}`\\n\"\n else:\n user_info += \"**AntiSpam Banned** : `False`\\n\"\n- user_gmuted = await GMUTE_USER_BASE.find_one({'user_id': user.id})\n if user_gmuted:\n user_info += \"**User GMuted** : `True`\\n\"\n- user_info += f\"**\u2022Reason** : `{user_gmuted['reason'] or None}`\\n\"\n+ user_info += f\" **\u25cf Reason** : `{user_gmuted['reason'] or None}`\\n\"\n else:\n user_info += \"**User GMuted** : `False`\\n\"\n- user_gbanned = await GBAN_USER_BASE.find_one({'user_id': user.id})\n if user_gbanned:\n user_info += \"**User GBanned** : `True`\\n\"\n- user_info += f\"**\u2022Reason** : `{user_gbanned['reason'] or None}`\"\n+ user_info += f\" **\u25cf Reason** : `{user_gbanned['reason'] or None}`\"\n else:\n user_info += \"**User Gbanned** : `False`\"\n await msg.edit_or_send_as_file(text=user_info, disable_web_page_preview=True)\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2021-02-17T06:03:59Z"}
PythonDataset/test/decomp-permuter-task-instances.jsonl.all ADDED
The diff for this file is too large to render. See raw diff
 
PythonDataset/test/faucet-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "faucetsdn/faucet", "pull_number": 469, "instance_id": "faucetsdn__faucet-469", "issue_numbers": "", "base_commit": "0e0ebac27ba98d0eaa88b24e645216489161305a", "patch": "diff --git a/src/ryu_faucet/org/onfsdn/faucet/config_parser.py b/src/ryu_faucet/org/onfsdn/faucet/config_parser.py\n--- a/src/ryu_faucet/org/onfsdn/faucet/config_parser.py\n+++ b/src/ryu_faucet/org/onfsdn/faucet/config_parser.py\n@@ -112,7 +112,7 @@ def _dp_parser_v2(logger, acls_conf, dps_conf, routers_conf, vlans_conf):\n except AssertionError as err:\n logger.exception('Error in config file: %s', err)\n return None\n- for port in ports.itervalues():\n+ for port in ports.values():\n dp.add_port(port)\n for acl_ident, acl in acls:\n dp.add_acl(acl_ident, acl)\ndiff --git a/src/ryu_faucet/org/onfsdn/faucet/faucet.py b/src/ryu_faucet/org/onfsdn/faucet/faucet.py\n--- a/src/ryu_faucet/org/onfsdn/faucet/faucet.py\n+++ b/src/ryu_faucet/org/onfsdn/faucet/faucet.py\n@@ -22,7 +22,7 @@\n import random\n import signal\n \n-import ipaddr\n+import ipaddress\n \n from config_parser import dp_parser\n from config_parser_util import config_file_hash\n@@ -216,8 +216,8 @@ def _bgp_route_handler(self, path_change, vlan):\n path_change (ryu.services.protocols.bgp.bgpspeaker.EventPrefix): path change\n vlan (vlan): Valve VLAN this path change was received for.\n \"\"\"\n- prefix = ipaddr.IPNetwork(path_change.prefix)\n- nexthop = ipaddr.IPAddress(path_change.nexthop)\n+ prefix = ipaddress.ip_network(unicode(path_change.prefix))\n+ nexthop = ipaddress.ip_address(unicode(path_change.nexthop))\n withdraw = path_change.is_withdraw\n flowmods = []\n valve = self.valves[vlan.dp_id]\n@@ -235,8 +235,7 @@ def _bgp_route_handler(self, path_change, vlan):\n \n if withdraw:\n self.logger.info(\n- 'BGP withdraw %s nexthop %s',\n- prefix, nexthop)\n+ 'BGP withdraw %s nexthop %s', prefix, nexthop)\n flowmods = valve.del_route(vlan, prefix)\n else:\n self.logger.info(\n@@ -260,9 +259,8 @@ def _create_bgp_speaker_for_vlan(self, vlan):\n bgp_server_port=vlan.bgp_port,\n best_path_change_handler=handler)\n for faucet_vip in vlan.faucet_vips:\n- prefix = ipaddr.IPNetwork(faucet_vip.exploded)\n bgp_speaker.prefix_add(\n- prefix=str(prefix), next_hop=str(faucet_vip.ip))\n+ prefix=str(faucet_vip), next_hop=str(faucet_vip.ip))\n for route_table in (vlan.ipv4_routes, vlan.ipv6_routes):\n for ip_dst, ip_gw in route_table.items():\n bgp_speaker.prefix_add(\n@@ -284,9 +282,9 @@ def _reset_bgp(self):\n if dp_id not in self.dp_bgp_speakers:\n self.dp_bgp_speakers[dp_id] = {}\n bgp_speakers = self.dp_bgp_speakers[dp_id]\n- for bgp_speaker in bgp_speakers.itervalues():\n+ for bgp_speaker in bgp_speakers.values():\n bgp_speaker.shutdown()\n- for vlan in valve.dp.vlans.itervalues():\n+ for vlan in valve.dp.vlans.values():\n if vlan.bgp_as:\n bgp_speakers[vlan] = self._create_bgp_speaker_for_vlan(vlan)\n \ndiff --git a/src/ryu_faucet/org/onfsdn/faucet/valve.py b/src/ryu_faucet/org/onfsdn/faucet/valve.py\n--- a/src/ryu_faucet/org/onfsdn/faucet/valve.py\n+++ b/src/ryu_faucet/org/onfsdn/faucet/valve.py\n@@ -514,7 +514,7 @@ def _add_ports_and_vlans(self, discovered_port_nums):\n all_port_nums.add(port.number)\n \n # add vlan ports\n- for vlan in self.dp.vlans.itervalues():\n+ for vlan in self.dp.vlans.values():\n ofmsgs.extend(self._add_vlan(vlan, all_port_nums))\n \n # add any ports discovered but not configured\n@@ -955,7 +955,7 @@ def host_expire(self):\n if not self.dp.running:\n return\n now = time.time()\n- for vlan in self.dp.vlans.itervalues():\n+ for vlan in self.dp.vlans.values():\n self.host_manager.expire_hosts_from_vlan(vlan, now)\n \n def _get_config_changes(self, new_dp):\n@@ -1107,7 +1107,7 @@ def resolve_gateways(self):\n return []\n ofmsgs = []\n now = time.time()\n- for vlan in self.dp.vlans.itervalues():\n+ for vlan in self.dp.vlans.values():\n ofmsgs.extend(self.ipv4_route_manager.resolve_gateways(vlan, now))\n ofmsgs.extend(self.ipv6_route_manager.resolve_gateways(vlan, now))\n return ofmsgs\n@@ -1122,7 +1122,7 @@ def get_config_dict(self):\n self.dp.name: self.dp.to_conf()\n }\n vlans_dict = {}\n- for vlan in self.dp.vlans.itervalues():\n+ for vlan in self.dp.vlans.values():\n vlans_dict[vlan.name] = vlan.to_conf()\n acls_dict = {}\n for acl_id, acl in self.dp.acls.items():\ndiff --git a/src/ryu_faucet/org/onfsdn/faucet/valve_flood.py b/src/ryu_faucet/org/onfsdn/faucet/valve_flood.py\n--- a/src/ryu_faucet/org/onfsdn/faucet/valve_flood.py\n+++ b/src/ryu_faucet/org/onfsdn/faucet/valve_flood.py\n@@ -45,7 +45,7 @@ def __init__(self, flood_table, flood_priority,\n self.stack = dp_stack\n self.use_group_table = use_group_table\n self.stack_ports = [\n- port for port in dp_ports.itervalues() if port.stack is not None]\n+ port for port in dp_ports.values() if port.stack is not None]\n self.towards_root_stack_ports = []\n self.away_from_root_stack_ports = []\n my_root_distance = dp_shortest_path_to_root()\ndiff --git a/src/ryu_faucet/org/onfsdn/faucet/valve_of.py b/src/ryu_faucet/org/onfsdn/faucet/valve_of.py\n--- a/src/ryu_faucet/org/onfsdn/faucet/valve_of.py\n+++ b/src/ryu_faucet/org/onfsdn/faucet/valve_of.py\n@@ -17,6 +17,7 @@\n # limitations under the License.\n \n from collections import namedtuple\n+import ipaddress\n \n from ryu.lib import ofctl_v1_3 as ofctl\n from ryu.ofproto import ether\n@@ -244,6 +245,13 @@ def match_from_dict(match_dict):\n return acl_match\n \n \n+def _match_ip_masked(ip):\n+ if isinstance(ip, ipaddress.IPv4Network) or isinstance(ip, ipaddress.IPv6Network):\n+ return (str(ip.network_address), str(ip.netmask))\n+ else:\n+ return (str(ip.ip), str(ip.netmask))\n+\n+\n def build_match_dict(in_port=None, vlan=None,\n eth_type=None, eth_src=None,\n eth_dst=None, eth_dst_mask=None,\n@@ -270,13 +278,13 @@ def build_match_dict(in_port=None, vlan=None,\n if nw_proto is not None:\n match_dict['ip_proto'] = nw_proto\n if nw_src is not None:\n- match_dict['ipv4_src'] = (str(nw_src.ip), str(nw_src.netmask))\n+ match_dict['ipv4_src'] = _match_ip_masked(nw_src)\n if icmpv6_type is not None:\n match_dict['icmpv6_type'] = icmpv6_type\n if ipv6_nd_target is not None:\n match_dict['ipv6_nd_target'] = str(ipv6_nd_target.ip)\n if nw_dst is not None:\n- nw_dst_masked = (str(nw_dst.ip), str(nw_dst.netmask))\n+ nw_dst_masked = _match_ip_masked(nw_dst)\n if eth_type == ether.ETH_TYPE_ARP:\n match_dict['arp_tpa'] = nw_dst_masked\n elif eth_type == ether.ETH_TYPE_IP:\ndiff --git a/src/ryu_faucet/org/onfsdn/faucet/valve_packet.py b/src/ryu_faucet/org/onfsdn/faucet/valve_packet.py\n--- a/src/ryu_faucet/org/onfsdn/faucet/valve_packet.py\n+++ b/src/ryu_faucet/org/onfsdn/faucet/valve_packet.py\n@@ -16,7 +16,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import ipaddr\n+import ipaddress\n \n from ryu.lib import mac\n from ryu.lib.packet import arp, ethernet, icmp, icmpv6, ipv4, ipv6, packet, vlan\n@@ -78,8 +78,8 @@ def arp_request(eth_src, vid, src_ip, dst_ip):\n Args:\n eth_src (str): Ethernet source address.\n vid (int or None): VLAN VID to use (or None).\n- src_ip (ipaddr.IPv4Address): source IPv4 address.\n- dst_ip (ipaddr.IPv4Address): requested IPv4 address.\n+ src_ip (ipaddress.IPv4Address): source IPv4 address.\n+ dst_ip (ipaddress.IPv4Address): requested IPv4 address.\n Returns:\n ryu.lib.packet.arp: serialized ARP request packet.\n \"\"\"\n@@ -99,8 +99,8 @@ def arp_reply(eth_src, eth_dst, vid, src_ip, dst_ip):\n eth_src (str): Ethernet source address.\n eth_dst (str): destination Ethernet MAC address.\n vid (int or None): VLAN VID to use (or None).\n- src_ip (ipaddr.IPv4Address): source IPv4 address.\n- dst_ip (ipaddr.IPv4Address): destination IPv4 address.\n+ src_ip (ipaddress.IPv4Address): source IPv4 address.\n+ dst_ip (ipaddress.IPv4Address): destination IPv4 address.\n Returns:\n ryu.lib.packet.arp: serialized ARP reply packet.\n \"\"\"\n@@ -120,8 +120,8 @@ def echo_reply(eth_src, eth_dst, vid, src_ip, dst_ip, data):\n eth_src (str): Ethernet source address.\n eth_dst (str): destination Ethernet MAC address.\n vid (int or None): VLAN VID to use (or None).\n- src_ip (ipaddr.IPv4Address): source IPv4 address.\n- dst_ip (ipaddr.IPv4Address): destination IPv4 address.\n+ src_ip (ipaddress.IPv4Address): source IPv4 address.\n+ dst_ip (ipaddress.IPv4Address): destination IPv4 address.\n Returns:\n ryu.lib.packet.icmp: serialized ICMP echo reply packet.\n \"\"\"\n@@ -143,11 +143,11 @@ def ipv6_link_eth_mcast(dst_ip):\n See RFC 2464 section 7.\n \n Args:\n- dst_ip (ipaddr.IPv6Address): IPv6 address.\n+ dst_ip (ipaddress.IPv6Address): IPv6 address.\n Returns:\n str: Ethernet multicast address.\n \"\"\"\n- mcast_mac_bytes = ipaddr.Bytes('\\x33\\x33') + dst_ip.packed[-4:]\n+ mcast_mac_bytes = b'\\x33\\x33' + dst_ip.packed[-4:]\n mcast_mac = ':'.join(['%02X' % ord(x) for x in mcast_mac_bytes])\n return mcast_mac\n \n@@ -158,14 +158,13 @@ def ipv6_solicited_node_from_ucast(ucast):\n See RFC 3513 section 2.7.1.\n \n Args:\n- ucast (ipaddr.IPv6Address): IPv6 unicast address.\n+ ucast (ipaddress.IPv6Address): IPv6 unicast address.\n Returns:\n- ipaddr.IPv6Address: IPv6 solicited node multicast address.\n+ ipaddress.IPv6Address: IPv6 solicited node multicast address.\n \"\"\"\n- link_mcast_prefix = ipaddr.IPv6Network('ff02::1:ff00:0/104')\n- mcast_bytes = ipaddr.Bytes(\n- link_mcast_prefix.packed[:13] + ucast.packed[-3:])\n- link_mcast = ipaddr.IPv6Address(mcast_bytes)\n+ link_mcast_prefix = ipaddress.ip_interface(u'ff02::1:ff00:0/104')\n+ mcast_bytes = link_mcast_prefix.packed[:13] + ucast.packed[-3:]\n+ link_mcast = ipaddress.IPv6Address(mcast_bytes)\n return link_mcast\n \n \n@@ -175,8 +174,8 @@ def nd_request(eth_src, vid, src_ip, dst_ip):\n Args:\n eth_src (str): source Ethernet MAC address.\n vid (int or None): VLAN VID to use (or None).\n- src_ip (ipaddr.IPv6Address): source IPv6 address.\n- dst_ip (ipaddr.IPv6Address): requested IPv6 address.\n+ src_ip (ipaddress.IPv6Address): source IPv6 address.\n+ dst_ip (ipaddress.IPv6Address): requested IPv6 address.\n Returns:\n ryu.lib.packet.ethernet: Serialized IPv6 neighbor discovery packet.\n \"\"\"\n@@ -203,8 +202,8 @@ def nd_reply(eth_src, eth_dst, vid, src_ip, dst_ip, hop_limit):\n eth_src (str): source Ethernet MAC address.\n eth_dst (str): destination Ethernet MAC address.\n vid (int or None): VLAN VID to use (or None).\n- src_ip (ipaddr.IPv6Address): source IPv6 address.\n- dst_ip (ipaddr.IPv6Address): destination IPv6 address.\n+ src_ip (ipaddress.IPv6Address): source IPv6 address.\n+ dst_ip (ipaddress.IPv6Address): destination IPv6 address.\n hop_limit (int): IPv6 hop limit.\n Returns:\n ryu.lib.packet.ethernet: Serialized IPv6 neighbor discovery packet.\n@@ -235,8 +234,8 @@ def icmpv6_echo_reply(eth_src, eth_dst, vid, src_ip, dst_ip, hop_limit,\n eth_src (str): source Ethernet MAC address.\n eth_dst (str): destination Ethernet MAC address.\n vid (int or None): VLAN VID to use (or None).\n- src_ip (ipaddr.IPv6Address): source IPv6 address.\n- dst_ip (ipaddr.IPv6Address): destination IPv6 address.\n+ src_ip (ipaddress.IPv6Address): source IPv6 address.\n+ dst_ip (ipaddress.IPv6Address): destination IPv6 address.\n hop_limit (int): IPv6 hop limit.\n id_ (int): identifier for echo reply.\n seq (int): sequence number for echo reply.\ndiff --git a/src/ryu_faucet/org/onfsdn/faucet/valve_route.py b/src/ryu_faucet/org/onfsdn/faucet/valve_route.py\n--- a/src/ryu_faucet/org/onfsdn/faucet/valve_route.py\n+++ b/src/ryu_faucet/org/onfsdn/faucet/valve_route.py\n@@ -18,7 +18,7 @@\n \n import time\n \n-import ipaddr\n+import ipaddress\n \n from ryu.lib.packet import arp, icmp, icmpv6, ipv4, ipv6\n from ryu.ofproto import ether\n@@ -131,7 +131,7 @@ def _add_resolved_route(self, vlan, ip_gw, ip_dst, eth_dst, is_updated):\n in_match = self.valve_in_match(\n self.fib_table, vlan=vlan,\n eth_type=self._eth_type(), nw_dst=ip_dst)\n- prefixlen = ipaddr.IPNetwork(ip_dst).prefixlen\n+ prefixlen = ipaddress.ip_network(ip_dst).prefixlen\n priority = self.route_priority + prefixlen\n if is_updated:\n self.logger.info(\n@@ -227,7 +227,7 @@ def _vlan_ip_gws(self, vlan):\n ip_gws = []\n for ip_gw in set(routes.values()):\n for faucet_vip in vlan.faucet_vips:\n- if ip_gw in faucet_vip:\n+ if ip_gw in faucet_vip.network:\n ip_gws.append((ip_gw, faucet_vip))\n return ip_gws\n \n@@ -282,7 +282,7 @@ def _is_host_fib_route(self, vlan, host_ip):\n \n Args:\n vlan (vlan): VLAN containing this RIB/FIB.\n- ip_gw (ipaddr.IPAddress): potential host FIB route.\n+ ip_gw (ipaddress.ip_address): potential host FIB route.\n Returns:\n True if a host FIB route (and not used as a gateway).\n \"\"\"\n@@ -348,13 +348,18 @@ def _cached_nexthop_eth_dst(self, vlan, ip_gw):\n return nexthop_cache_entry.eth_src\n return None\n \n+ def _host_from_faucet_vip(self, faucet_vip):\n+ max_prefixlen = faucet_vip.ip.max_prefixlen\n+ return ipaddress.ip_interface(\n+ u'/'.join((faucet_vip.ip.exploded, str(max_prefixlen))))\n+\n def add_route(self, vlan, ip_gw, ip_dst):\n \"\"\"Add a route to the RIB.\n \n Args:\n vlan (vlan): VLAN containing this RIB.\n- ip_gw (ipaddr.IPAddress): IP address of nexthop.\n- ip_dst (ipaddr.IPNetwork): destination IP network.\n+ ip_gw (ipaddress.ip_address): IP address of nexthop.\n+ ip_dst (ipaddress.ip_network): destination IP network.\n Returns:\n list: OpenFlow messages.\n \"\"\"\n@@ -376,11 +381,11 @@ def _add_host_fib_route(self, vlan, host_ip):\n \n Args:\n vlan (vlan): VLAN containing this RIB.\n- host_ip (ipaddr.IPAddress): IP address of host.\n+ host_ip (ipaddress.ip_address): IP address of host.\n Returns:\n list: OpenFlow messages.\n \"\"\"\n- host_route = ipaddr.IPNetwork(host_ip.exploded)\n+ host_route = ipaddress.ip_network(host_ip.exploded)\n return self.add_route(vlan, host_ip, host_route)\n \n def _del_host_fib_route(self, vlan, host_ip):\n@@ -388,11 +393,11 @@ def _del_host_fib_route(self, vlan, host_ip):\n \n Args:\n vlan (vlan): VLAN containing this RIB.\n- host_ip (ipaddr.IPAddress): IP address of host.\n+ host_ip (ipaddress.ip_address): IP address of host.\n Returns:\n list: OpenFlow messages.\n \"\"\"\n- host_route = ipaddr.IPNetwork(host_ip.exploded)\n+ host_route = ipaddress.ip_network(host_ip.exploded)\n return self.del_route(vlan, host_route)\n \n def _ip_pkt(self, pkt):\n@@ -426,7 +431,7 @@ def add_host_fib_route_from_pkt(self, pkt_meta):\n ip_pkt = self._ip_pkt(pkt_meta.pkt)\n ofmsgs = []\n if ip_pkt:\n- src_ip = ipaddr.IPAddress(ip_pkt.src)\n+ src_ip = ipaddress.ip_address(unicode(ip_pkt.src))\n if src_ip and pkt_meta.vlan.ip_in_vip_subnet(src_ip):\n now = time.time()\n nexthop_fresh = self._nexthop_fresh(pkt_meta.vlan, src_ip, now)\n@@ -444,7 +449,7 @@ def del_route(self, vlan, ip_dst):\n \n Args:\n vlan (vlan): VLAN containing this RIB.\n- ip_dst (ipaddr.IPNetwork): destination IP network.\n+ ip_dst (ipaddress.ip_network): destination IP network.\n Returns:\n list: OpenFlow messages.\n \"\"\"\n@@ -485,9 +490,8 @@ def _ip_pkt(self, pkt):\n \n def add_faucet_vip(self, vlan, faucet_vip):\n ofmsgs = []\n- faucet_vip_net = ipaddr.IPNetwork(faucet_vip.exploded)\n- faucet_vip_host = ipaddr.IPNetwork(faucet_vip.ip)\n- max_prefixlen = faucet_vip_host.prefixlen\n+ max_prefixlen = faucet_vip.ip.max_prefixlen\n+ faucet_vip_host = self._host_from_faucet_vip(faucet_vip)\n priority = self.route_priority + max_prefixlen\n ofmsgs.append(self.valve_flowmod(\n self.eth_src_table,\n@@ -516,14 +520,14 @@ def add_faucet_vip(self, vlan, faucet_vip):\n vlan=vlan,\n eth_type=self._eth_type(),\n nw_proto=inet.IPPROTO_ICMP,\n- nw_src=faucet_vip_net,\n+ nw_src=faucet_vip,\n nw_dst=faucet_vip_host),\n priority=priority))\n return ofmsgs\n \n def _control_plane_arp_handler(self, pkt_meta, arp_pkt):\n- src_ip = ipaddr.IPv4Address(arp_pkt.src_ip)\n- dst_ip = ipaddr.IPv4Address(arp_pkt.dst_ip)\n+ src_ip = ipaddress.IPv4Address(unicode(arp_pkt.src_ip))\n+ dst_ip = ipaddress.IPv4Address(unicode(arp_pkt.dst_ip))\n vlan = pkt_meta.vlan\n opcode = arp_pkt.opcode\n ofmsgs = []\n@@ -550,8 +554,8 @@ def _control_plane_arp_handler(self, pkt_meta, arp_pkt):\n return ofmsgs\n \n def _control_plane_icmp_handler(self, pkt_meta, ipv4_pkt, icmp_pkt):\n- src_ip = ipaddr.IPv4Address(ipv4_pkt.src)\n- dst_ip = ipaddr.IPv4Address(ipv4_pkt.dst)\n+ src_ip = ipaddress.IPv4Address(unicode(ipv4_pkt.src))\n+ dst_ip = ipaddress.IPv4Address(unicode(ipv4_pkt.dst))\n vlan = pkt_meta.vlan\n icmpv4_type = icmp_pkt.type\n ofmsgs = []\n@@ -601,8 +605,8 @@ def _ip_pkt(self, pkt):\n \n def add_faucet_vip(self, vlan, faucet_vip):\n ofmsgs = []\n- faucet_vip_host = ipaddr.IPNetwork(faucet_vip.ip)\n- max_prefixlen = faucet_vip_host.prefixlen\n+ max_prefixlen = faucet_vip.ip.max_prefixlen\n+ faucet_vip_host = self._host_from_faucet_vip(faucet_vip)\n priority = self.route_priority + max_prefixlen\n ofmsgs.append(self.valve_flowmod(\n self.eth_src_table,\n@@ -652,8 +656,8 @@ def add_faucet_vip(self, vlan, faucet_vip):\n \n def _control_plane_icmpv6_handler(self, pkt_meta, ipv6_pkt, icmpv6_pkt):\n vlan = pkt_meta.vlan\n- src_ip = ipaddr.IPv6Address(ipv6_pkt.src)\n- dst_ip = ipaddr.IPv6Address(ipv6_pkt.dst)\n+ src_ip = ipaddress.IPv6Address(unicode(ipv6_pkt.src))\n+ dst_ip = ipaddress.IPv6Address(unicode(ipv6_pkt.dst))\n icmpv6_type = icmpv6_pkt.type_\n ofmsgs = []\n if vlan.ip_in_vip_subnet(src_ip):\n@@ -661,8 +665,8 @@ def _control_plane_icmpv6_handler(self, pkt_meta, ipv6_pkt, icmpv6_pkt):\n vid = self._vlan_vid(vlan, in_port)\n eth_src = pkt_meta.eth_src\n if icmpv6_type == icmpv6.ND_NEIGHBOR_SOLICIT:\n- solicited_ip = icmpv6_pkt.data.dst\n- if vlan.is_faucet_vip(ipaddr.IPAddress(solicited_ip)):\n+ solicited_ip = unicode(icmpv6_pkt.data.dst)\n+ if vlan.is_faucet_vip(ipaddress.ip_address(solicited_ip)):\n ofmsgs.extend(\n self._add_host_fib_route(vlan, src_ip))\n nd_reply = valve_packet.nd_reply(\ndiff --git a/src/ryu_faucet/org/onfsdn/faucet/vlan.py b/src/ryu_faucet/org/onfsdn/faucet/vlan.py\n--- a/src/ryu_faucet/org/onfsdn/faucet/vlan.py\n+++ b/src/ryu_faucet/org/onfsdn/faucet/vlan.py\n@@ -13,7 +13,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import ipaddr\n+import ipaddress\n \n from conf import Conf\n \n@@ -80,20 +80,20 @@ def __init__(self, _id, dp_id, conf=None):\n \n if self.faucet_vips:\n self.faucet_vips = [\n- ipaddr.IPNetwork(ip) for ip in self.faucet_vips]\n+ ipaddress.ip_interface(unicode(ip)) for ip in self.faucet_vips]\n \n if self.bgp_as:\n assert self.bgp_port\n- assert ipaddr.IPv4Address(self.bgp_routerid)\n+ assert ipaddress.IPv4Address(unicode(self.bgp_routerid))\n for neighbor_ip in self.bgp_neighbor_addresses:\n- assert ipaddr.IPAddress(neighbor_ip)\n+ assert ipaddress.ip_address(unicode(neighbor_ip))\n assert self.bgp_neighbor_as\n \n if self.routes:\n self.routes = [route['route'] for route in self.routes]\n for route in self.routes:\n- ip_gw = ipaddr.IPAddress(route['ip_gw'])\n- ip_dst = ipaddr.IPNetwork(route['ip_dst'])\n+ ip_gw = ipaddress.ip_address(unicode(route['ip_gw']))\n+ ip_dst = ipaddress.ip_network(unicode(route['ip_dst']))\n assert ip_gw.version == ip_dst.version\n if ip_gw.version == 4:\n self.ipv4_routes[ip_dst] = ip_gw\n@@ -201,7 +201,7 @@ def is_faucet_vip(self, ip):\n \n def ip_in_vip_subnet(self, ip):\n for faucet_vip in self.faucet_vips:\n- if ip in faucet_vip:\n+ if ip in faucet_vip.network:\n return True\n return False\n \n@@ -215,8 +215,8 @@ def from_connected_to_vip(self, src_ip, dst_ip):\n \"\"\"Return True if src_ip in connected network and dst_ip is a VIP.\n \n Args:\n- src_ip (ipaddr.IPAddress): source IP.\n- dst_ip (ipaddr.IPAddress): destination IP\n+ src_ip (ipaddress.ip_address): source IP.\n+ dst_ip (ipaddress.ip_address): destination IP\n Returns:\n True if local traffic for a VIP.\n \"\"\"\n", "test_patch": "diff --git a/tests/faucet_mininet_test.py b/tests/faucet_mininet_test.py\n--- a/tests/faucet_mininet_test.py\n+++ b/tests/faucet_mininet_test.py\n@@ -43,7 +43,7 @@\n from SimpleHTTPServer import SimpleHTTPRequestHandler\n from BaseHTTPServer import HTTPServer\n \n-import ipaddr\n+import ipaddress\n import yaml\n \n from concurrencytest import ConcurrentTestSuite, fork_for_tests\n@@ -881,13 +881,13 @@ def test_untagged(self):\n first_host, second_host = self.net.hosts[:2]\n # wait until 10.0.0.1 has been resolved\n self.wait_for_route_as_flow(\n- first_host.MAC(), ipaddr.IPv4Network('10.99.99.0/24'))\n+ first_host.MAC(), ipaddress.IPv4Network(u'10.99.99.0/24'))\n self.wait_bgp_up(self.exabgp_log)\n self.wait_exabgp_sent_updates(self.exabgp_log)\n self.verify_invalid_bgp_route('10.0.0.4/24 cannot be us')\n self.verify_invalid_bgp_route('10.0.0.5/24 is not a connected network')\n self.wait_for_route_as_flow(\n- second_host.MAC(), ipaddr.IPv4Network('10.0.3.0/24'))\n+ second_host.MAC(), ipaddress.IPv4Network(u'10.0.3.0/24'))\n self.verify_ipv4_routing_mesh()\n self.flap_all_switch_ports()\n self.verify_ipv4_routing_mesh()\n@@ -1600,8 +1600,8 @@ class FaucetTaggedIPv4RouteTest(FaucetTaggedTest):\n def test_tagged(self):\n host_pair = self.net.hosts[:2]\n first_host, second_host = host_pair\n- first_host_routed_ip = ipaddr.IPv4Network('10.0.1.1/24')\n- second_host_routed_ip = ipaddr.IPv4Network('10.0.2.1/24')\n+ first_host_routed_ip = ipaddress.ip_interface(u'10.0.1.1/24')\n+ second_host_routed_ip = ipaddress.ip_interface(u'10.0.2.1/24')\n for _ in range(3):\n self.verify_ipv4_routing(\n first_host, first_host_routed_ip,\n@@ -1643,15 +1643,15 @@ class FaucetUntaggedIPv4InterVLANRouteTest(FaucetUntaggedTest):\n \"\"\"\n \n def test_untagged(self):\n- first_host_ip = ipaddr.IPv4Network('10.100.0.1/24')\n- first_faucet_vip = ipaddr.IPv4Network('10.100.0.254/24')\n- second_host_ip = ipaddr.IPv4Network('10.200.0.1/24')\n- second_faucet_vip = ipaddr.IPv4Network('10.200.0.254/24')\n+ first_host_ip = ipaddress.ip_interface(u'10.100.0.1/24')\n+ first_faucet_vip = ipaddress.ip_interface(u'10.100.0.254/24')\n+ second_host_ip = ipaddress.ip_interface(u'10.200.0.1/24')\n+ second_faucet_vip = ipaddress.ip_interface(u'10.200.0.254/24')\n first_host, second_host = self.net.hosts[:2]\n first_host.setIP(str(first_host_ip.ip))\n second_host.setIP(str(second_host_ip.ip))\n- self.add_host_ipv4_route(first_host, second_host_ip, first_faucet_vip.ip)\n- self.add_host_ipv4_route(second_host, first_host_ip, second_faucet_vip.ip)\n+ self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)\n+ self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)\n self.one_ipv4_ping(first_host, first_faucet_vip.ip)\n self.one_ipv4_ping(second_host, second_faucet_vip.ip)\n self.one_ipv4_ping(first_host, second_host_ip.ip)\n@@ -1688,15 +1688,15 @@ class FaucetUntaggedMixedIPv4RouteTest(FaucetUntaggedTest):\n def test_untagged(self):\n host_pair = self.net.hosts[:2]\n first_host, second_host = host_pair\n- first_host_net = ipaddr.IPv4Network('10.0.0.1/24')\n- second_host_net = ipaddr.IPv4Network('172.16.0.1/24')\n+ first_host_net = ipaddress.ip_interface(u'10.0.0.1/24')\n+ second_host_net = ipaddress.ip_interface(u'172.16.0.1/24')\n second_host.setIP(str(second_host_net.ip))\n self.one_ipv4_ping(first_host, self.FAUCET_VIPV4.ip)\n self.one_ipv4_ping(second_host, self.FAUCET_VIPV4_2.ip)\n- self.add_host_ipv4_route(\n- first_host, second_host_net.masked(), self.FAUCET_VIPV4.ip)\n- self.add_host_ipv4_route(\n- second_host, first_host_net.masked(), self.FAUCET_VIPV4_2.ip)\n+ self.add_host_route(\n+ first_host, second_host_net, self.FAUCET_VIPV4.ip)\n+ self.add_host_route(\n+ second_host, first_host_net, self.FAUCET_VIPV4_2.ip)\n self.one_ipv4_ping(first_host, second_host_net.ip)\n self.one_ipv4_ping(second_host, first_host_net.ip)\n \n@@ -1731,16 +1731,16 @@ class FaucetUntaggedMixedIPv6RouteTest(FaucetUntaggedTest):\n def test_untagged(self):\n host_pair = self.net.hosts[:2]\n first_host, second_host = host_pair\n- first_host_net = ipaddr.IPv6Network('fc00::1:1/64')\n- second_host_net = ipaddr.IPv6Network('fc01::1:1/64')\n+ first_host_net = ipaddress.ip_interface(u'fc00::1:1/64')\n+ second_host_net = ipaddress.ip_interface(u'fc01::1:1/64')\n self.add_host_ipv6_address(first_host, first_host_net)\n self.one_ipv6_ping(first_host, self.FAUCET_VIPV6.ip)\n self.add_host_ipv6_address(second_host, second_host_net)\n self.one_ipv6_ping(second_host, self.FAUCET_VIPV6_2.ip)\n- self.add_host_ipv6_route(\n- first_host, second_host_net.masked(), self.FAUCET_VIPV6.ip)\n- self.add_host_ipv6_route(\n- second_host, first_host_net.masked(), self.FAUCET_VIPV6_2.ip)\n+ self.add_host_route(\n+ first_host, second_host_net, self.FAUCET_VIPV6.ip)\n+ self.add_host_route(\n+ second_host, first_host_net, self.FAUCET_VIPV6_2.ip)\n self.one_ipv6_ping(first_host, second_host_net.ip)\n self.one_ipv6_ping(second_host, first_host_net.ip)\n \n@@ -1847,20 +1847,20 @@ class FaucetUntaggedSameVlanIPv6RouteTest(FaucetUntaggedTest):\n \n def test_untagged(self):\n first_host, second_host = self.net.hosts[:2]\n- first_host_ip = ipaddr.IPv6Network('fc00::10:2/112')\n- first_host_ctrl_ip = ipaddr.IPv6Address('fc00::10:1')\n- second_host_ip = ipaddr.IPv6Network('fc00::20:2/112')\n- second_host_ctrl_ip = ipaddr.IPv6Address('fc00::20:1')\n+ first_host_ip = ipaddress.ip_interface(u'fc00::10:2/112')\n+ first_host_ctrl_ip = ipaddress.ip_address(u'fc00::10:1')\n+ second_host_ip = ipaddress.ip_interface(u'fc00::20:2/112')\n+ second_host_ctrl_ip = ipaddress.ip_address(u'fc00::20:1')\n self.add_host_ipv6_address(first_host, first_host_ip)\n self.add_host_ipv6_address(second_host, second_host_ip)\n- self.add_host_ipv6_route(\n+ self.add_host_route(\n first_host, second_host_ip, first_host_ctrl_ip)\n- self.add_host_ipv6_route(\n+ self.add_host_route(\n second_host, first_host_ip, second_host_ctrl_ip)\n self.wait_for_route_as_flow(\n- first_host.MAC(), first_host_ip)\n+ first_host.MAC(), first_host_ip.network)\n self.wait_for_route_as_flow(\n- second_host.MAC(), second_host_ip)\n+ second_host.MAC(), second_host_ip.network)\n self.one_ipv6_ping(first_host, second_host_ip.ip)\n self.one_ipv6_ping(first_host, second_host_ctrl_ip)\n self.one_ipv6_ping(second_host, first_host_ip.ip)\n@@ -1936,7 +1936,7 @@ def test_untagged(self):\n second_host = self.net.hosts[1]\n self.flap_all_switch_ports()\n self.wait_for_route_as_flow(\n- second_host.MAC(), ipaddr.IPv6Network('fc00::30:0/112'))\n+ second_host.MAC(), ipaddress.IPv6Network(u'fc00::30:0/112'))\n self.verify_ipv6_routing_mesh()\n self.wait_bgp_up(self.exabgp_log)\n updates = self.exabgp_updates(self.exabgp_log)\n@@ -1986,10 +1986,10 @@ def test_tagged(self):\n \"\"\"Test IPv6 routing works.\"\"\"\n host_pair = self.net.hosts[:2]\n first_host, second_host = host_pair\n- first_host_ip = ipaddr.IPv6Network('fc00::1:1/112')\n- second_host_ip = ipaddr.IPv6Network('fc00::1:2/112')\n- first_host_routed_ip = ipaddr.IPv6Network('fc00::10:1/112')\n- second_host_routed_ip = ipaddr.IPv6Network('fc00::20:1/112')\n+ first_host_ip = ipaddress.ip_interface(u'fc00::1:1/112')\n+ second_host_ip = ipaddress.ip_interface(u'fc00::1:2/112')\n+ first_host_routed_ip = ipaddress.ip_interface(u'fc00::10:1/112')\n+ second_host_routed_ip = ipaddress.ip_interface(u'fc00::20:1/112')\n for _ in range(5):\n self.verify_ipv6_routing_pair(\n first_host, first_host_ip, first_host_routed_ip,\n@@ -2484,8 +2484,8 @@ class FaucetSingleGroupTableUntaggedIPv4RouteTest(FaucetUntaggedTest):\n def test_untagged(self):\n host_pair = self.net.hosts[:2]\n first_host, second_host = host_pair\n- first_host_routed_ip = ipaddr.IPv4Network('10.0.1.1/24')\n- second_host_routed_ip = ipaddr.IPv4Network('10.0.2.1/24')\n+ first_host_routed_ip = ipaddress.ip_interface(u'10.0.1.1/24')\n+ second_host_routed_ip = ipaddress.ip_interface(u'10.0.2.1/24')\n self.verify_ipv4_routing(\n first_host, first_host_routed_ip,\n second_host, second_host_routed_ip,\n@@ -2538,10 +2538,10 @@ class FaucetSingleGroupUntaggedIPv6RouteTest(FaucetUntaggedTest):\n def test_untagged(self):\n host_pair = self.net.hosts[:2]\n first_host, second_host = host_pair\n- first_host_ip = ipaddr.IPv6Network('fc00::1:1/112')\n- second_host_ip = ipaddr.IPv6Network('fc00::1:2/112')\n- first_host_routed_ip = ipaddr.IPv6Network('fc00::10:1/112')\n- second_host_routed_ip = ipaddr.IPv6Network('fc00::20:1/112')\n+ first_host_ip = ipaddress.ip_interface(u'fc00::1:1/112')\n+ second_host_ip = ipaddress.ip_interface(u'fc00::1:2/112')\n+ first_host_routed_ip = ipaddress.ip_interface(u'fc00::10:1/112')\n+ second_host_routed_ip = ipaddress.ip_interface(u'fc00::20:1/112')\n self.verify_ipv6_routing_pair(\n first_host, first_host_ip, first_host_routed_ip,\n second_host, second_host_ip, second_host_routed_ip,\ndiff --git a/tests/faucet_mininet_test_base.py b/tests/faucet_mininet_test_base.py\n--- a/tests/faucet_mininet_test_base.py\n+++ b/tests/faucet_mininet_test_base.py\n@@ -11,7 +11,7 @@\n import unittest\n import yaml\n \n-import ipaddr\n+import ipaddress\n import requests\n \n from mininet.node import Controller\n@@ -207,10 +207,10 @@ class FaucetTestBase(unittest.TestCase):\n \"\"\"Base class for all FAUCET unit tests.\"\"\"\n \n ONE_GOOD_PING = '1 packets transmitted, 1 received, 0% packet loss'\n- FAUCET_VIPV4 = ipaddr.IPv4Network('10.0.0.254/24')\n- FAUCET_VIPV4_2 = ipaddr.IPv4Network('172.16.0.254/24')\n- FAUCET_VIPV6 = ipaddr.IPv6Network('fc00::1:254/64')\n- FAUCET_VIPV6_2 = ipaddr.IPv6Network('fc01::1:254/64')\n+ FAUCET_VIPV4 = ipaddress.ip_interface(u'10.0.0.254/24')\n+ FAUCET_VIPV4_2 = ipaddress.ip_interface(u'172.16.0.254/24')\n+ FAUCET_VIPV6 = ipaddress.ip_interface(u'fc00::1:254/64')\n+ FAUCET_VIPV6_2 = ipaddress.ip_interface(u'fc01::1:254/64')\n OFCTL = 'ovs-ofctl -OOpenFlow13'\n BOGUS_MAC = '01:02:03:04:05:06'\n FAUCET_MAC = '0e:00:00:00:00:01'\n@@ -445,7 +445,7 @@ def require_host_learned(self, host, retries=3):\n ping_cmd = 'ping'\n if not host_ip_net:\n host_ip_net = self.host_ipv6(host)\n- broadcast = (ipaddr.IPNetwork(host_ip_net).broadcast)\n+ broadcast = (ipaddress.ip_interface(unicode(host_ip_net)).network.broadcast_address)\n if broadcast.version == 6:\n ping_cmd = 'ping6'\n for _ in range(retries):\n@@ -521,19 +521,14 @@ def add_host_ipv6_address(self, host, ip_v6):\n '',\n host.cmd('ip -6 addr add %s dev %s' % (ip_v6, host.intf())))\n \n- def add_host_ipv6_route(self, host, ip_dst, ip_gw):\n- \"\"\"Add an IPv6 route to a Mininet host.\"\"\"\n- host.cmd('ip -6 route del %s' % ip_dst.masked())\n+ def add_host_route(self, host, ip_dst, ip_gw):\n+ \"\"\"Add an IP route to a Mininet host.\"\"\"\n+ host.cmd('ip -%u route del %s' % (\n+ ip_dst.version, ip_dst.network.with_prefixlen))\n self.assertEquals(\n '',\n- host.cmd('ip -6 route add %s via %s' % (ip_dst.masked(), ip_gw)))\n-\n- def add_host_ipv4_route(self, host, ip_dst, ip_gw):\n- \"\"\"Add an IPv4 route to a Mininet host.\"\"\"\n- host.cmd('ip -4 route del %s' % ip_dst.masked())\n- self.assertEquals(\n- '',\n- host.cmd('ip -4 route add %s via %s' % (ip_dst.masked(), ip_gw)))\n+ host.cmd('ip -%u route add %s via %s' % (\n+ ip_dst.version, ip_dst.network.with_prefixlen, ip_gw)))\n \n def one_ipv4_ping(self, host, dst, retries=3, require_host_learned=True):\n \"\"\"Ping an IPv4 destination from a host.\"\"\"\n@@ -679,12 +674,11 @@ def ping_all_when_learned(self, retries=3):\n def wait_for_route_as_flow(self, nexthop, prefix, timeout=10,\n with_group_table=False):\n \"\"\"Verify a route has been added as a flow.\"\"\"\n+ exp_prefix = '%s/%s' % (\n+ prefix.network_address, prefix.netmask)\n if prefix.version == 6:\n- exp_prefix = '/'.join(\n- (str(prefix.masked().ip), str(prefix.netmask)))\n nw_dst_match = '\"ipv6_dst\": \"%s\"' % exp_prefix\n else:\n- exp_prefix = prefix.masked().with_netmask\n nw_dst_match = '\"nw_dst\": \"%s\"' % exp_prefix\n if with_group_table:\n group_id = self.get_group_id_for_matching_flow(nw_dst_match)\n@@ -697,17 +691,17 @@ def wait_for_route_as_flow(self, nexthop, prefix, timeout=10,\n \n def host_ipv4_alias(self, host, alias_ip):\n \"\"\"Add an IPv4 alias address to a host.\"\"\"\n- del_cmd = 'ip addr del %s/%s dev %s' % (\n- alias_ip.ip, alias_ip.prefixlen, host.intf())\n- add_cmd = 'ip addr add %s/%s dev %s label %s:1' % (\n- alias_ip.ip, alias_ip.prefixlen, host.intf(), host.intf())\n+ del_cmd = 'ip addr del %s dev %s' % (\n+ alias_ip.with_prefixlen, host.intf())\n+ add_cmd = 'ip addr add %s dev %s label %s:1' % (\n+ alias_ip.with_prefixlen, host.intf(), host.intf())\n host.cmd(del_cmd)\n self.assertEquals('', host.cmd(add_cmd))\n \n- def verify_ipv4_host_learned_mac(self, host, ip, mac, retries=3):\n+ def _verify_host_learned_mac(self, host, ip, ip_ver, mac, retries):\n for _ in range(retries):\n learned_mac = host.cmd(\n- \"arp -n %s | grep %s | awk '{ print $3 }'\" % (ip, ip)).strip()\n+ \"ip -%u neighbor show %s | awk '{ print $5 }'\" % (ip_ver, ip)).strip()\n if learned_mac:\n break\n time.sleep(1)\n@@ -716,24 +710,18 @@ def verify_ipv4_host_learned_mac(self, host, ip, mac, retries=3):\n msg='MAC learned on host mismatch (expected %s found %s)' % (\n mac, learned_mac))\n \n+ def verify_ipv4_host_learned_mac(self, host, ip, mac, retries=3):\n+ self._verify_host_learned_mac(host, ip, 4, mac, retries)\n+\n def verify_ipv4_host_learned_host(self, host, learned_host):\n- learned_ip = ipaddr.IPNetwork(self.host_ipv4(learned_host))\n+ learned_ip = ipaddress.ip_interface(unicode(self.host_ipv4(learned_host)))\n self.verify_ipv4_host_learned_mac(host, learned_ip.ip, learned_host.MAC())\n \n def verify_ipv6_host_learned_mac(self, host, ip6, mac, retries=3):\n- for _ in range(retries):\n- learned_mac = host.cmd(\n- \"ip -6 neighbor show %s | awk '{ print $5 }'\" % ip6).strip()\n- if learned_mac:\n- break\n- time.sleep(1)\n- self.assertEqual(\n- mac, learned_mac,\n- msg='MAC learned on host mismatch (expected %s found %s)' % (\n- mac, learned_mac))\n+ self._verify_host_learned_mac(host, ip6, 6, mac, retries)\n \n def verify_ipv6_host_learned_host(self, host, learned_host):\n- learned_ip6 = ipaddr.IPNetwork(self.host_ipv6(learned_host))\n+ learned_ip6 = ipaddress.ip_interface(unicode(self.host_ipv6(learned_host)))\n self.verify_ipv6_host_learned_mac(host, learned_ip6.ip, learned_host.MAC())\n \n def verify_ipv4_routing(self, first_host, first_host_routed_ip,\n@@ -742,16 +730,16 @@ def verify_ipv4_routing(self, first_host, first_host_routed_ip,\n \"\"\"Verify one host can IPV4 route to another via FAUCET.\"\"\"\n self.host_ipv4_alias(first_host, first_host_routed_ip)\n self.host_ipv4_alias(second_host, second_host_routed_ip)\n- self.add_host_ipv4_route(\n+ self.add_host_route(\n first_host, second_host_routed_ip, self.FAUCET_VIPV4.ip)\n- self.add_host_ipv4_route(\n+ self.add_host_route(\n second_host, first_host_routed_ip, self.FAUCET_VIPV4.ip)\n self.net.ping(hosts=(first_host, second_host))\n self.wait_for_route_as_flow(\n- first_host.MAC(), first_host_routed_ip,\n+ first_host.MAC(), first_host_routed_ip.network,\n with_group_table=with_group_table)\n self.wait_for_route_as_flow(\n- second_host.MAC(), second_host_routed_ip,\n+ second_host.MAC(), second_host_routed_ip.network,\n with_group_table=with_group_table)\n self.one_ipv4_ping(first_host, second_host_routed_ip.ip)\n self.one_ipv4_ping(second_host, first_host_routed_ip.ip)\n@@ -762,9 +750,9 @@ def verify_ipv4_routing_mesh(self, with_group_table=False):\n \"\"\"Verify hosts can route to each other via FAUCET.\"\"\"\n host_pair = self.net.hosts[:2]\n first_host, second_host = host_pair\n- first_host_routed_ip = ipaddr.IPv4Network('10.0.1.1/24')\n- second_host_routed_ip = ipaddr.IPv4Network('10.0.2.1/24')\n- second_host_routed_ip2 = ipaddr.IPv4Network('10.0.3.1/24')\n+ first_host_routed_ip = ipaddress.ip_interface(u'10.0.1.1/24')\n+ second_host_routed_ip = ipaddress.ip_interface(u'10.0.2.1/24')\n+ second_host_routed_ip2 = ipaddress.ip_interface(u'10.0.3.1/24')\n self.verify_ipv4_routing(\n first_host, first_host_routed_ip,\n second_host, second_host_routed_ip,\n@@ -803,15 +791,15 @@ def verify_ipv6_routing(self, first_host, first_host_ip,\n \"\"\"Verify one host can IPV6 route to another via FAUCET.\"\"\"\n self.one_ipv6_ping(first_host, second_host_ip.ip)\n self.one_ipv6_ping(second_host, first_host_ip.ip)\n- self.add_host_ipv6_route(\n+ self.add_host_route(\n first_host, second_host_routed_ip, self.FAUCET_VIPV6.ip)\n- self.add_host_ipv6_route(\n+ self.add_host_route(\n second_host, first_host_routed_ip, self.FAUCET_VIPV6.ip)\n self.wait_for_route_as_flow(\n- first_host.MAC(), first_host_routed_ip,\n+ first_host.MAC(), first_host_routed_ip.network,\n with_group_table=with_group_table)\n self.wait_for_route_as_flow(\n- second_host.MAC(), second_host_routed_ip,\n+ second_host.MAC(), second_host_routed_ip.network,\n with_group_table=with_group_table)\n self.one_ipv6_controller_ping(first_host)\n self.one_ipv6_controller_ping(second_host)\n@@ -839,11 +827,11 @@ def verify_ipv6_routing_mesh(self, with_group_table=False):\n \"\"\"Verify IPv6 routing between hosts and multiple subnets.\"\"\"\n host_pair = self.net.hosts[:2]\n first_host, second_host = host_pair\n- first_host_ip = ipaddr.IPv6Network('fc00::1:1/112')\n- second_host_ip = ipaddr.IPv6Network('fc00::1:2/112')\n- first_host_routed_ip = ipaddr.IPv6Network('fc00::10:1/112')\n- second_host_routed_ip = ipaddr.IPv6Network('fc00::20:1/112')\n- second_host_routed_ip2 = ipaddr.IPv6Network('fc00::30:1/112')\n+ first_host_ip = ipaddress.ip_interface(u'fc00::1:1/112')\n+ second_host_ip = ipaddress.ip_interface(u'fc00::1:2/112')\n+ first_host_routed_ip = ipaddress.ip_interface(u'fc00::10:1/112')\n+ second_host_routed_ip = ipaddress.ip_interface(u'fc00::20:1/112')\n+ second_host_routed_ip2 = ipaddress.ip_interface(u'fc00::30:1/112')\n self.verify_ipv6_routing_pair(\n first_host, first_host_ip, first_host_routed_ip,\n second_host, second_host_ip, second_host_routed_ip,\ndiff --git a/tests/test_api.py b/tests/test_api.py\n--- a/tests/test_api.py\n+++ b/tests/test_api.py\n@@ -1,6 +1,6 @@\n import os\n import sys\n-import ipaddr\n+import ipaddress\n \n from ryu.base import app_manager\n from ryu.lib import hub\ndiff --git a/tests/test_config.py b/tests/test_config.py\n--- a/tests/test_config.py\n+++ b/tests/test_config.py\n@@ -19,7 +19,7 @@\n import logging\n import sys\n import os\n-import ipaddr\n+import ipaddress\n \n testdir = os.path.dirname(__file__)\n srcdir = '../src/ryu_faucet/org/onfsdn/faucet'\n@@ -181,7 +181,7 @@ def test_routing(self):\n for dp in (self.v2_dp,):\n vlan = dp.vlans[41]\n self.assertIn(\n- ipaddr.IPNetwork('10.0.0.253/24'),\n+ ipaddress.ip_interface(u'10.0.0.253/24'),\n vlan.faucet_vips\n )\n self.assertEquals(vlan.bgp_port, 9179)\n@@ -190,15 +190,15 @@ def test_routing(self):\n self.assertIn('127.0.0.1', vlan.bgp_neighbor_addresses)\n self.assertEquals(vlan.bgp_neighbor_as, 2)\n self.assertIn(\n- ipaddr.IPNetwork('10.0.1.0/24'),\n+ ipaddress.ip_network(u'10.0.1.0/24'),\n vlan.ipv4_routes\n )\n self.assertIn(\n- ipaddr.IPNetwork('10.0.2.0/24'),\n+ ipaddress.ip_network(u'10.0.2.0/24'),\n vlan.ipv4_routes\n )\n self.assertIn(\n- ipaddr.IPNetwork('10.0.3.0/24'),\n+ ipaddress.ip_network(u'10.0.3.0/24'),\n vlan.ipv4_routes\n )\n \n", "problem_statement": "", "hints_text": "", "created_at": "2017-03-15T09:44:02Z"}
PythonDataset/test/gcloud-aio-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "talkiq/gcloud-aio", "pull_number": 24, "instance_id": "talkiq__gcloud-aio-24", "issue_numbers": "", "base_commit": "02d7ce2526855c16a36d7cced824a9c7b7d9bd6b", "patch": "diff --git a/auth/gcloud/aio/auth/auth.py b/auth/gcloud/aio/auth/auth.py\n--- a/auth/gcloud/aio/auth/auth.py\n+++ b/auth/gcloud/aio/auth/auth.py\n@@ -1,15 +1,16 @@\n \"\"\"\n Google Cloud auth via service account file\n \"\"\"\n+import asyncio\n import datetime\n import json\n import time\n import typing\n+from urllib.parse import quote_plus\n+from urllib.parse import urlencode\n \n import aiohttp\n import jwt\n-from gcloud.aio.core.aio import auto\n-from gcloud.aio.core.http import post\n \n \n ScopeList = typing.List[str]\n@@ -20,29 +21,24 @@\n 'project_id.'\n \n \n-async def acquire_token(session: aiohttp.ClientSession,\n- service_data: dict,\n+async def acquire_token(session: aiohttp.ClientSession, service_data: dict,\n scopes: ScopeList = None):\n-\n url, assertion = generate_assertion(service_data, scopes)\n \n- payload = {\n- 'grant_type': JWT_GRANT_TYPE,\n- 'assertion': assertion\n+ headers = {\n+ 'Content-Type': 'application/x-www-form-urlencoded',\n }\n+ payload = urlencode({\n+ 'assertion': assertion,\n+ 'grant_type': JWT_GRANT_TYPE,\n+ }, quote_via=quote_plus)\n \n- _status, content = await post(\n- url,\n- payload,\n- headers={'content-type': 'application/x-www-form-urlencoded'},\n- timeout=60,\n- urlencoded=True,\n- json_response=True,\n- session=session\n- )\n+ response = await session.post(url, data=payload, headers=headers,\n+ params=None, timeout=60)\n+ content = await response.json()\n \n if 'error' in content:\n- raise Exception('{}'.format(content))\n+ raise Exception(f'got error acquiring token: {content}')\n \n return {\n 'access_token': str(content['access_token']),\n@@ -100,7 +96,7 @@ def __init__(self, project: str, service_file: str,\n \n self.scopes = scopes or []\n \n- self.session = session\n+ self.session = session or aiohttp.ClientSession()\n self.access_token = None\n self.access_token_duration = None\n self.access_token_acquired_at = None\n@@ -121,7 +117,7 @@ async def ensure_token(self):\n \n elif not self.access_token:\n \n- self.acquiring = self.acquire_access_token()\n+ self.acquiring = asyncio.ensure_future(self.acquire_access_token())\n \n await self.acquiring\n \n@@ -132,11 +128,11 @@ async def ensure_token(self):\n \n if delta > self.access_token_duration / 2:\n \n- self.acquiring = self.acquire_access_token()\n+ self.acquiring = asyncio.ensure_future(\n+ self.acquire_access_token())\n \n await self.acquiring\n \n- @auto\n async def acquire_access_token(self):\n \n data = await acquire_token(\ndiff --git a/auth/nox.py b/auth/nox.py\n--- a/auth/nox.py\n+++ b/auth/nox.py\n@@ -4,16 +4,13 @@\n import nox\n \n \n-LOCAL_DEPS = ('../core/',)\n-\n-\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def unit_tests(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'unit-' + python_version\n+ session.interpreter = f'python{python_version}'\n+ session.virtualenv_dirname = f'unit-{python_version}'\n \n- session.install('pytest', 'pytest-cov', *LOCAL_DEPS)\n+ session.install('pytest', 'pytest-cov')\n session.install('-e', '.')\n \n session.run(\n@@ -23,7 +20,7 @@ def unit_tests(session, python_version):\n '--cov=tests.unit',\n '--cov-append',\n '--cov-report=',\n- '--cov-fail-under=39',\n+ '--cov-fail-under=38',\n os.path.join('tests', 'unit'),\n *session.posargs)\n \n@@ -34,10 +31,10 @@ def integration_tests(session, python_version):\n if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):\n session.skip('Credentials must be set via environment variable.')\n \n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'integration-' + python_version\n+ session.interpreter = f'python{python_version}'\n+ session.virtualenv_dirname = f'integration-{python_version}'\n \n- session.install('aiohttp', 'pytest', *LOCAL_DEPS)\n+ session.install('aiohttp', 'pytest')\n session.install('.')\n \n session.run('py.test', '--quiet', 'tests/integration')\n@@ -46,7 +43,7 @@ def integration_tests(session, python_version):\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def lint_setup_py(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n+ session.interpreter = f'python{python_version}'\n session.virtualenv_dirname = 'setup'\n \n session.install('docutils', 'Pygments')\n@@ -61,7 +58,7 @@ def lint_setup_py(session, python_version):\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def cover(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n+ session.interpreter = f'python{python_version}'\n session.virtualenv_dirname = 'cover'\n \n session.install('codecov', 'coverage', 'pytest-cov')\ndiff --git a/bigquery/gcloud/aio/bigquery/bigquery.py b/bigquery/gcloud/aio/bigquery/bigquery.py\n--- a/bigquery/gcloud/aio/bigquery/bigquery.py\n+++ b/bigquery/gcloud/aio/bigquery/bigquery.py\n@@ -2,9 +2,12 @@\n import logging\n import uuid\n \n-import ujson\n+import aiohttp\n from gcloud.aio.auth import Token\n-from gcloud.aio.core.http import post\n+try:\n+ import ujson as json\n+except ModuleNotFoundError:\n+ import json\n \n \n API_ROOT = 'https://www.googleapis.com/bigquery/v2'\n@@ -63,51 +66,43 @@ async def headers(self):\n token = await self.token.get()\n \n return {\n- 'Authorization': 'Bearer {}'.format(token)\n+ 'Authorization': f'Bearer {token}',\n }\n \n async def insert(self, rows, skip_invalid=False, ignore_unknown=True,\n session=None):\n-\n session = session or self.session\n \n- body = make_insert_body(\n- rows,\n- skip_invalid=skip_invalid,\n- ignore_unknown=ignore_unknown\n- )\n-\n- headers = await self.headers()\n-\n- url = '{}/{}'.format(\n- API_ROOT,\n- INSERT_TEMPLATE.format(\n- proj=self.project,\n- dataset=self.dataset_name,\n- table=self.table_name\n- )\n- )\n-\n+ insert_url = INSERT_TEMPLATE.format(proj=self.project,\n+ dataset=self.dataset_name,\n+ table=self.table_name)\n+ url = f'{API_ROOT}/{insert_url}'\n log.info('Inserting %d rows to %s', len(rows), url)\n \n- status, content = await post(\n- url,\n- payload=body,\n- headers=headers\n- )\n+ body = make_insert_body(rows, skip_invalid=skip_invalid,\n+ ignore_unknown=ignore_unknown)\n+ payload = json.dumps(body).encode('utf-8')\n+\n+ headers = await self.headers()\n+ headers.update({\n+ 'Content-Length': str(len(payload)),\n+ 'Content-Type': 'application/json'\n+ })\n \n- success = 299 >= status >= 200 and 'insertErrors' not in content\n+ async with aiohttp.ClientSession() as s:\n+ response = await s.post(url, data=payload, headers=headers,\n+ params=None, timeout=60)\n+ content = await response.json()\n \n- if success:\n- return success\n+ if 299 >= response.status >= 200 and 'insertErrors' not in content:\n+ return True\n \n- log.debug('response code: %d', status)\n+ log.debug('response code: %d', response.status)\n log.debug('url: %s', url)\n- log.debug('body:\\n%s\\n', body)\n+ log.debug('body:\\n%s\\n', payload)\n \n- raise Exception('Could not insert: {}'.format(ujson.dumps(\n- content, sort_keys=True\n- )))\n+ content_blob = json.dumps(content, sort_keys=True)\n+ raise Exception(f'could not insert: {content_blob}')\n \n \n async def stream_insert(table, rows):\ndiff --git a/bigquery/nox.py b/bigquery/nox.py\n--- a/bigquery/nox.py\n+++ b/bigquery/nox.py\n@@ -4,14 +4,14 @@\n import nox\n \n \n-LOCAL_DEPS = ('../core/', '../auth/')\n+LOCAL_DEPS = ('../auth/', )\n \n \n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def unit_tests(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'unit-' + python_version\n+ session.interpreter = f'python{python_version}'\n+ session.virtualenv_dirname = f'unit-{python_version}'\n \n session.install('pytest', 'pytest-cov', *LOCAL_DEPS)\n session.install('-e', '.')\n@@ -23,7 +23,7 @@ def unit_tests(session, python_version):\n '--cov=tests.unit',\n '--cov-append',\n '--cov-report=',\n- '--cov-fail-under=47',\n+ '--cov-fail-under=46',\n os.path.join('tests', 'unit'),\n *session.posargs)\n \n@@ -34,8 +34,8 @@ def integration_tests(session, python_version):\n if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):\n session.skip('Credentials must be set via environment variable.')\n \n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'integration-' + python_version\n+ session.interpreter = f'python{python_version}'\n+ session.virtualenv_dirname = f'integration-{python_version}'\n \n session.install('aiohttp', 'pytest', *LOCAL_DEPS)\n session.install('.')\n@@ -46,7 +46,7 @@ def integration_tests(session, python_version):\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def lint_setup_py(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n+ session.interpreter = f'python{python_version}'\n session.virtualenv_dirname = 'setup'\n \n session.install('docutils', 'Pygments')\n@@ -61,11 +61,11 @@ def lint_setup_py(session, python_version):\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def cover(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n+ session.interpreter = f'python{python_version}'\n session.virtualenv_dirname = 'cover'\n \n session.install('codecov', 'coverage', 'pytest-cov')\n \n- session.run('coverage', 'report', '--show-missing', '--fail-under=47')\n+ session.run('coverage', 'report', '--show-missing', '--fail-under=46')\n session.run('codecov')\n session.run('coverage', 'erase')\ndiff --git a/core/gcloud/__init__.py b/core/gcloud/__init__.py\ndeleted file mode 100644\n--- a/core/gcloud/__init__.py\n+++ /dev/null\n@@ -1,6 +0,0 @@\n-try:\n- import pkg_resources\n- pkg_resources.declare_namespace(__name__)\n-except ImportError:\n- import pkgutil\n- __path__ = pkgutil.extend_path(__path__, __name__)\ndiff --git a/core/gcloud/aio/__init__.py b/core/gcloud/aio/__init__.py\ndeleted file mode 100644\n--- a/core/gcloud/aio/__init__.py\n+++ /dev/null\n@@ -1,6 +0,0 @@\n-try:\n- import pkg_resources\n- pkg_resources.declare_namespace(__name__)\n-except ImportError:\n- import pkgutil\n- __path__ = pkgutil.extend_path(__path__, __name__)\ndiff --git a/core/gcloud/aio/core/__init__.py b/core/gcloud/aio/core/__init__.py\ndeleted file mode 100644\n--- a/core/gcloud/aio/core/__init__.py\n+++ /dev/null\n@@ -1,5 +0,0 @@\n-from pkg_resources import get_distribution\n-__version__ = get_distribution('gcloud-aio-core').version\n-\n-\n-__all__ = ['__version__']\ndiff --git a/core/gcloud/aio/core/aio.py b/core/gcloud/aio/core/aio.py\ndeleted file mode 100644\n--- a/core/gcloud/aio/core/aio.py\n+++ /dev/null\n@@ -1,66 +0,0 @@\n-import asyncio\n-import functools\n-\n-\n-def maybe_async(callable_, *args, **kwargs):\n-\n- \"\"\"\n- Turn a callable into a coroutine if it isn't\n- \"\"\"\n-\n- if asyncio.iscoroutine(callable_):\n- return callable_\n-\n- return asyncio.coroutine(callable_)(*args, **kwargs)\n-\n-\n-def fire(callable_, *args, **kwargs):\n-\n- \"\"\"\n- Start a callable as a coroutine, and return it's future. The cool thing\n- about this function is that (via maybe_async) it lets you treat synchronous\n- and asynchronous callables the same (both as async), which simplifies code.\n- \"\"\"\n-\n- return asyncio.ensure_future(maybe_async(callable_, *args, **kwargs))\n-\n-\n-def auto(fn):\n-\n- \"\"\"\n- Decorate a function or method with this, and it will become a callable\n- that can be scheduled in the event loop just by calling it. Normally you'd\n- have to do an `asyncio.ensure_future(my_callable())`. Not you can just do\n- `my_callable()`. Twisted has always let you do this, and now you can let\n- asyncio do it as well (with a decorator, albeit...)\n- \"\"\"\n-\n- @functools.wraps(fn)\n- def wrapper(*args, **kwargs):\n-\n- return fire(fn, *args, **kwargs)\n-\n- return wrapper\n-\n-\n-async def _call_later(delay, callable_, *args, **kwargs):\n-\n- \"\"\"\n- The bus stop, where we wait.\n- \"\"\"\n-\n- await asyncio.sleep(delay)\n-\n- fire(callable_, *args, **kwargs)\n-\n-\n-def call_later(delay, callable_, *args, **kwargs):\n-\n- \"\"\"\n- After :delay seconds, call :callable with :args and :kwargs; :callable can\n- be a synchronous or asynchronous callable (a coroutine). Note that _this_\n- function is synchronous - mission accomplished - it can be used from within\n- any synchronous or asynchronous callable.\n- \"\"\"\n-\n- return fire(_call_later, delay, callable_, *args, **kwargs)\ndiff --git a/core/gcloud/aio/core/astate.py b/core/gcloud/aio/core/astate.py\ndeleted file mode 100644\n--- a/core/gcloud/aio/core/astate.py\n+++ /dev/null\n@@ -1,83 +0,0 @@\n-import asyncio\n-import logging\n-\n-from gcloud.aio.core.aio import fire\n-\n-\n-log = logging.getLogger(__name__)\n-\n-\n-class AwaitableState:\n- # pylint: disable=too-few-public-methods\n-\n- \"\"\"\n- Wrap a :future with a name and data. If :future is a coroutine, turn it\n- into a future by firing it.\n-\n- Use instances of AwaitableState as named states in state machines. Use\n- :data for arbitrary context beyond :name.\n- \"\"\"\n-\n- def __init__(self, name, future, data=None):\n-\n- self.name = name\n- self.future = future\n- self.data = data\n-\n- if asyncio.iscoroutine(self.future):\n- self.future = fire(self.future)\n-\n- def __await__(self):\n-\n- return self.future.__await__()\n-\n- def __str__(self):\n-\n- return self.__repr__()\n-\n- def __repr__(self):\n-\n- return '<awaitable state: {} at 0x{}>'.format(\n- self.name,\n- id(self)\n- )\n-\n- def __getattr__(self, attr):\n-\n- return getattr(self.future, attr)\n-\n- def __hash__(self):\n-\n- return hash(self.name)\n-\n- def __eq__(self, other):\n-\n- return hash(self) == hash(other)\n-\n-\n-def make_stepper(default_step, state_step, name='sm'):\n-\n- \"\"\"\n- `default_step`: a callable that takes no args\n- `state_step`: a mapping between AwaitableState.name -> callable\n- \"\"\"\n-\n- async def step(state, args):\n-\n- state_name = getattr(state, 'name', None)\n- step = state_step.get(state_name, default_step)\n- next_state = step(args) if args is not None else step()\n-\n- if next_state:\n- args = await next_state\n- else:\n- args = tuple()\n-\n- if next_state != state:\n- log.debug('%s state change: %s -> %s', name,\n- getattr(state, 'name', None),\n- getattr(next_state, 'name', None))\n-\n- return next_state, args\n-\n- return step\ndiff --git a/core/gcloud/aio/core/http.py b/core/gcloud/aio/core/http.py\ndeleted file mode 100644\n--- a/core/gcloud/aio/core/http.py\n+++ /dev/null\n@@ -1,140 +0,0 @@\n-from urllib.parse import quote_plus\n-from urllib.parse import urlencode\n-\n-import aiohttp\n-import ujson\n-from asyncio_extras.contextmanager import async_contextmanager\n-\n-\n-class HttpError(Exception):\n- pass\n-\n-\n-@async_contextmanager\n-async def ensure_session(session):\n-\n- if session:\n- yield session\n- else:\n- async with aiohttp.ClientSession() as session:\n- yield session\n-\n-\n-async def delete(url, headers=None, params=None, timeout=60, session=None):\n-\n- async with ensure_session(session) as s: # pylint: disable=not-async-context-manager\n-\n- response = await s.delete(\n- url,\n- headers=headers,\n- params=params,\n- timeout=timeout\n- )\n-\n- phrase = await response.text()\n-\n- return response.status, phrase\n-\n-\n-async def post(url, payload=None, timeout=60, urlencoded=False,\n- json_response=True, session=None, headers=None, params=None):\n- # pylint: disable=too-many-arguments\n-\n- headers = headers or {}\n-\n- if urlencoded:\n-\n- if payload:\n- payload = urlencode(payload, quote_via=quote_plus)\n-\n- headers['content-type'] = 'application/x-www-form-urlencoded'\n-\n- else:\n-\n- if payload:\n- payload = ujson.dumps(payload)\n- payload = payload.encode('utf-8')\n- content_length = str(len(payload))\n- else:\n- content_length = '0'\n-\n- headers.update({\n- 'content-length': content_length,\n- 'content-type': 'application/json'\n- })\n-\n- async with ensure_session(session) as s: # pylint: disable=not-async-context-manager\n-\n- response = await s.post(\n- url,\n- data=payload,\n- headers=headers,\n- params=params,\n- timeout=timeout\n- )\n-\n- if json_response:\n- content = await response.json()\n- else:\n- content = await response.text()\n-\n- return response.status, content\n-\n-\n-async def get(url, timeout=60, json_response=True, session=None, headers=None,\n- params=None):\n- # pylint: disable=too-many-arguments\n-\n- async with ensure_session(session) as s: # pylint: disable=not-async-context-manager\n-\n- response = await s.get(\n- url,\n- headers=headers,\n- params=params,\n- timeout=timeout\n- )\n-\n- if json_response:\n- content = await response.json()\n- else:\n- content = await response.text()\n-\n- return response.status, content\n-\n-\n-async def put(*args, **kwargs): # pylint: disable=unused-argument\n-\n- raise Exception('Not implemented.')\n-\n-\n-async def patch(url, payload=None, timeout=60, session=None, headers=None,\n- params=None):\n- # pylint: disable=too-many-arguments\n-\n- headers = headers or {}\n-\n- if payload:\n- payload = ujson.dumps(payload)\n- payload = payload.encode('utf-8')\n- content_length = str(len(payload))\n- else:\n- content_length = '0'\n-\n- headers.update({\n- 'content-length': content_length,\n- 'content-type': 'application/json'\n- })\n-\n- async with ensure_session(session) as s: # pylint: disable=not-async-context-manager\n-\n- response = await s.patch(\n- url,\n- data=payload,\n- headers=headers,\n- params=params,\n- timeout=timeout\n- )\n-\n- phrase = await response.text()\n-\n- return response.status, phrase\ndiff --git a/core/nox.py b/core/nox.py\ndeleted file mode 100644\n--- a/core/nox.py\n+++ /dev/null\n@@ -1,53 +0,0 @@\n-# pylint: disable=import-self,no-member\n-import os\n-\n-import nox\n-\n-\n-@nox.session\n-@nox.parametrize('python_version', ['3.6'])\n-def unit_tests(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'unit-' + python_version\n-\n- session.install('pytest', 'pytest-cov')\n- session.install('-e', '.')\n-\n- session.run(\n- 'py.test',\n- '--quiet',\n- '--cov=gcloud.aio.core',\n- '--cov=tests.unit',\n- '--cov-append',\n- '--cov-report=',\n- '--cov-fail-under=37',\n- os.path.join('tests', 'unit'),\n- *session.posargs)\n-\n-\n-@nox.session\n-@nox.parametrize('python_version', ['3.6'])\n-def lint_setup_py(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'setup'\n-\n- session.install('docutils', 'Pygments')\n- session.run(\n- 'python',\n- 'setup.py',\n- 'check',\n- '--restructuredtext',\n- '--strict')\n-\n-\n-@nox.session\n-@nox.parametrize('python_version', ['3.6'])\n-def cover(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'cover'\n-\n- session.install('codecov', 'coverage', 'pytest-cov')\n-\n- session.run('coverage', 'report', '--show-missing', '--fail-under=37')\n- session.run('codecov')\n- session.run('coverage', 'erase')\ndiff --git a/core/setup.py b/core/setup.py\ndeleted file mode 100644\n--- a/core/setup.py\n+++ /dev/null\n@@ -1,41 +0,0 @@\n-import os\n-\n-import setuptools\n-\n-\n-PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))\n-with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as f:\n- README = f.read()\n-\n-with open(os.path.join(PACKAGE_ROOT, 'requirements.txt')) as f:\n- REQUIREMENTS = [r.strip() for r in f.readlines()]\n-\n-\n-setuptools.setup(\n- name='gcloud-aio-core',\n- version='0.7.2',\n- description='Core Helpers for Asyncio Google Cloud Library',\n- long_description=README,\n- namespace_packages=[\n- 'gcloud',\n- 'gcloud.aio',\n- ],\n- packages=setuptools.find_packages(exclude=('tests',)),\n- install_requires=REQUIREMENTS,\n- author='TalkIQ',\n- author_email='engineering@talkiq.com',\n- url='https://github.com/talkiq/gcloud-aio',\n- platforms='Posix; MacOS X; Windows',\n- include_package_data=True,\n- zip_safe=False,\n- license='MIT License',\n- classifiers=[\n- 'Development Status :: 4 - Beta',\n- 'Intended Audience :: Developers',\n- 'License :: OSI Approved :: MIT License',\n- 'Operating System :: OS Independent',\n- 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.6',\n- 'Topic :: Internet',\n- ],\n-)\ndiff --git a/datastore/gcloud/aio/datastore/datastore.py b/datastore/gcloud/aio/datastore/datastore.py\n--- a/datastore/gcloud/aio/datastore/datastore.py\n+++ b/datastore/gcloud/aio/datastore/datastore.py\n@@ -1,11 +1,15 @@\n import datetime\n import logging\n \n+import aiohttp\n from gcloud.aio.auth import Token\n-from gcloud.aio.core.http import post\n from gcloud.aio.datastore.constants import Mode\n from gcloud.aio.datastore.constants import Operation\n from gcloud.aio.datastore.constants import TypeName\n+try:\n+ import ujson as json\n+except ModuleNotFoundError:\n+ import json\n \n \n API_ROOT = 'https://datastore.googleapis.com/v1/projects'\n@@ -30,9 +34,7 @@ def infer_type(value):\n }.get(type(value))\n \n if not type_name:\n- raise Exception('Type {} not supported for DS insert. :('.format(\n- type(value)\n- ))\n+ raise Exception(f'type {type(value)} not supported for DS insert')\n \n return type_name\n \n@@ -109,45 +111,54 @@ async def headers(self):\n token = await self.token.get()\n \n return {\n- 'Authorization': 'Bearer {}'.format(token),\n+ 'Authorization': f'Bearer {token}',\n }\n \n async def transact(self):\n- url = '{}/{}:beginTransaction'.format(API_ROOT, self.project)\n+ url = f'{API_ROOT}/{self.project}:beginTransaction'\n headers = await self.headers()\n- body = {}\n+ headers.update({\n+ 'Content-Length': '0',\n+ 'Content-Type': 'application/json'\n+ })\n \n- status, content = await post(url, payload={}, headers=headers)\n+ async with aiohttp.ClientSession() as s:\n+ response = await s.post(url, data={}, headers=headers, params=None,\n+ timeout=60)\n+ content = await response.json()\n \n # TODO: make this raise_for_status-able.\n- success = 299 >= status >= 200\n-\n- if success:\n+ if 299 >= response.status >= 200:\n transaction = content['transaction']\n return transaction\n \n- log.debug('response code: %d', status)\n+ log.debug('response code: %d', response.status)\n log.debug('url: %s', url)\n- log.debug('body:\\n%s\\n', body)\n \n- raise Exception('Could not transact: {}'.format(content))\n+ raise Exception(f'could not transact: {content}')\n \n async def commit(self, transaction, mutations, mode=Mode.TRANSACTIONAL):\n- url = '{}/{}:commit'.format(API_ROOT, self.project)\n+ url = f'{API_ROOT}/{self.project}:commit'\n \n body = make_commit_body(transaction, mode, mutations)\n+ payload = json.dumps(body).encode('utf-8')\n \n headers = await self.headers()\n+ headers.update({\n+ 'Content-Length': str(len(payload)),\n+ 'Content-Type': 'application/json'\n+ })\n \n- status, content = await post(url, payload=body, headers=headers)\n+ async with aiohttp.ClientSession() as s:\n+ response = await s.post(url, data=payload, headers=headers,\n+ params=None, timeout=60)\n+ content = await response.json()\n \n # TODO: make this raise_for_status-able.\n- success = 299 >= status >= 200 and 'insertErrors' not in content\n-\n- if success:\n- return success\n+ if 299 >= response.status >= 200 and 'insertErrors' not in content:\n+ return True\n \n- raise Exception('{}: {} > {}'.format(status, url, content))\n+ raise Exception(f'{response.status}: {url} > {content}')\n \n # TODO: look into deletion payload format\n \ndiff --git a/datastore/nox.py b/datastore/nox.py\n--- a/datastore/nox.py\n+++ b/datastore/nox.py\n@@ -4,14 +4,14 @@\n import nox\n \n \n-LOCAL_DEPS = ('../core/', '../auth/')\n+LOCAL_DEPS = ('../auth/', )\n \n \n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def unit_tests(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'unit-' + python_version\n+ session.interpreter = f'python{python_version}'\n+ session.virtualenv_dirname = f'unit-{python_version}'\n \n session.install('pytest', 'pytest-cov', *LOCAL_DEPS)\n session.install('-e', '.')\n@@ -34,8 +34,8 @@ def integration_tests(session, python_version):\n if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):\n session.skip('Credentials must be set via environment variable.')\n \n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'integration-' + python_version\n+ session.interpreter = f'python{python_version}'\n+ session.virtualenv_dirname = f'integration-{python_version}'\n \n session.install('pytest', *LOCAL_DEPS)\n session.install('.')\n@@ -46,7 +46,7 @@ def integration_tests(session, python_version):\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def lint_setup_py(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n+ session.interpreter = f'python{python_version}'\n session.virtualenv_dirname = 'setup'\n \n session.install('docutils', 'Pygments')\n@@ -61,7 +61,7 @@ def lint_setup_py(session, python_version):\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def cover(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n+ session.interpreter = f'python{python_version}'\n session.virtualenv_dirname = 'cover'\n \n session.install('codecov', 'coverage', 'pytest-cov')\ndiff --git a/pubsub/nox.py b/pubsub/nox.py\n--- a/pubsub/nox.py\n+++ b/pubsub/nox.py\n@@ -7,8 +7,8 @@\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def unit_tests(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'unit-' + python_version\n+ session.interpreter = f'python{python_version}'\n+ session.virtualenv_dirname = f'unit-{python_version}'\n \n session.install('pytest', 'pytest-cov')\n session.install('-e', '.')\n@@ -31,8 +31,8 @@ def integration_tests(session, python_version):\n if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):\n session.skip('Credentials must be set via environment variable.')\n \n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'integration-' + python_version\n+ session.interpreter = f'python{python_version}'\n+ session.virtualenv_dirname = f'integration-{python_version}'\n \n session.install('pytest')\n session.install('.')\n@@ -43,7 +43,7 @@ def integration_tests(session, python_version):\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def lint_setup_py(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n+ session.interpreter = f'python{python_version}'\n session.virtualenv_dirname = 'setup'\n \n session.install('docutils', 'Pygments')\n@@ -58,7 +58,7 @@ def lint_setup_py(session, python_version):\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def cover(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n+ session.interpreter = f'python{python_version}'\n session.virtualenv_dirname = 'cover'\n \n session.install('codecov', 'coverage', 'pytest-cov')\ndiff --git a/storage/gcloud/aio/storage/__init__.py b/storage/gcloud/aio/storage/__init__.py\n--- a/storage/gcloud/aio/storage/__init__.py\n+++ b/storage/gcloud/aio/storage/__init__.py\n@@ -1,10 +1,10 @@\n from pkg_resources import get_distribution\n __version__ = get_distribution('gcloud-aio-storage').version\n \n-from gcloud.aio.storage.storage import Blob\n-from gcloud.aio.storage.storage import Bucket\n-from gcloud.aio.storage.storage import make_download\n+from gcloud.aio.storage.blob import Blob\n+from gcloud.aio.storage.bucket import Bucket\n from gcloud.aio.storage.storage import Storage\n+from gcloud.aio.storage.utils import make_download\n \n \n-__all__ = ['__version__', 'Blob', 'Bucket', 'make_download', 'Storage']\n+__all__ = ['__version__', 'Blob', 'Bucket', 'Storage', 'make_download']\ndiff --git a/storage/gcloud/aio/storage/blob.py b/storage/gcloud/aio/storage/blob.py\nnew file mode 100644\n--- /dev/null\n+++ b/storage/gcloud/aio/storage/blob.py\n@@ -0,0 +1,33 @@\n+try:\n+ import ujson as json\n+except ModuleNotFoundError:\n+ import json\n+\n+\n+class Blob:\n+ def __init__(self, bucket, name, data):\n+ self.__dict__.update(**data)\n+\n+ self.bucket = bucket\n+ self.name = name\n+ self.size = int(self.size)\n+\n+ @property\n+ def chunk_size(self):\n+ return self.size + (262144 - (self.size % 262144))\n+\n+ async def download_as_string(self, session=None):\n+ return await self.bucket.storage.download_as_string(self.bucket.name,\n+ self.name,\n+ session=session)\n+\n+ async def upload_from_string(self, data, session=None):\n+ status, content = await self.bucket.storage.upload(self.bucket.name,\n+ self.name, data,\n+ session=session)\n+\n+ if status < 200 or status >= 300:\n+ raise Exception(f'{status}: {json.dumps(content)}')\n+\n+ self.__dict__.update(content)\n+ return content\ndiff --git a/storage/gcloud/aio/storage/bucket.py b/storage/gcloud/aio/storage/bucket.py\nnew file mode 100644\n--- /dev/null\n+++ b/storage/gcloud/aio/storage/bucket.py\n@@ -0,0 +1,47 @@\n+import logging\n+\n+from gcloud.aio.storage.blob import Blob\n+try:\n+ import ujson as json\n+except ModuleNotFoundError:\n+ import json\n+\n+\n+log = logging.getLogger(__name__)\n+\n+\n+class Bucket:\n+ def __init__(self, storage, name):\n+ self.storage = storage\n+ self.name = name\n+\n+ async def get_blob(self, blob_name, session=None):\n+ blob_name = blob_name.replace('/', '%2F')\n+\n+ status, content = await self.storage.download(self.name, blob_name,\n+ session=session)\n+\n+ if status < 200 or status >= 300:\n+ log.error('Could not download %s/%s: %s', self.name, blob_name,\n+ content)\n+ return\n+\n+ content = json.loads(content)\n+\n+ return Blob(self, blob_name, content)\n+\n+ async def list_blobs(self, prefix='', session=None):\n+ params = {'prefix': prefix}\n+\n+ status, content = await self.storage.list_objects(self.name,\n+ params=params,\n+ session=session)\n+\n+ if status < 200 or status >= 300:\n+ log.error('Could not list %s/%s: %s', self.name, prefix, content)\n+ return\n+\n+ return [x['name'] for x in content.get('items', list())]\n+\n+ def new_blob(self, blob_name):\n+ return Blob(self, blob_name, {'size': 0})\ndiff --git a/storage/gcloud/aio/storage/storage.py b/storage/gcloud/aio/storage/storage.py\n--- a/storage/gcloud/aio/storage/storage.py\n+++ b/storage/gcloud/aio/storage/storage.py\n@@ -1,18 +1,16 @@\n-import functools\n import logging\n-import mimetypes\n \n import aiohttp\n-import ujson\n from gcloud.aio.auth import Token\n-from gcloud.aio.core.http import get\n-from gcloud.aio.core.http import HttpError\n-from gcloud.aio.core.http import post\n+from gcloud.aio.storage.bucket import Bucket\n+try:\n+ import ujson as json\n+except ModuleNotFoundError:\n+ import json\n \n \n STORAGE_API_ROOT = 'https://www.googleapis.com/storage/v1/b'\n STORAGE_UPLOAD_API_ROOT = 'https://www.googleapis.com/upload/storage/v1/b'\n-READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/devstorage.read_only'\n READ_WRITE_SCOPE = 'https://www.googleapis.com/auth/devstorage.read_write'\n \n log = logging.getLogger(__name__)\n@@ -28,64 +26,68 @@ def __init__(self, project, service_file, token=None, session=None):\n scopes=[READ_WRITE_SCOPE])\n \n async def download(self, bucket, object_name, params=None, session=None):\n- session = session or self.session\n-\n token = await self.token.get()\n- url = '{}/{}/o/{}'.format(STORAGE_API_ROOT, bucket, object_name)\n+ url = f'{STORAGE_API_ROOT}/{bucket}/o/{object_name}'\n headers = {\n- 'Authorization': 'Bearer {}'.format(token),\n+ 'Authorization': f'Bearer {token}',\n }\n \n- return await get(url, params=params or {}, headers=headers,\n- session=self.session, json_response=False)\n-\n- async def list_objects(self, bucket, params=None, session=None):\n session = session or self.session\n+ response = await session.get(url, headers=headers, params=params or {},\n+ timeout=60)\n+ content = await response.text()\n+\n+ return response.status, content\n \n+ async def list_objects(self, bucket, params=None, session=None):\n token = await self.token.get()\n- url = '{}/{}/o'.format(STORAGE_API_ROOT, bucket)\n+ url = f'{STORAGE_API_ROOT}/{bucket}/o'\n headers = {\n- 'Authorization': 'Bearer {}'.format(token),\n+ 'Authorization': f'Bearer {token}',\n }\n \n- return await get(url, params=params or {}, headers=headers,\n- session=self.session, json_response=True)\n+ session = session or self.session\n+ response = await session.get(url, headers=headers, params=params or {},\n+ timeout=60)\n+ content = await response.json()\n+\n+ return response.status, content\n \n async def upload(self, bucket, object_name, file_data, headers=None,\n session=None):\n # pylint: disable=too-many-arguments\n # https://cloud.google.com/storage/docs/json_api/v1/how-tos/simple-upload\n- session = session or self.session\n-\n token = await self.token.get()\n- url = '{}/{}/o'.format(STORAGE_UPLOAD_API_ROOT, bucket)\n+ url = f'{STORAGE_UPLOAD_API_ROOT}/{bucket}/o'\n headers = headers or {}\n \n- # TODO: verify this\n- if not isinstance(file_data, bytes):\n- body = file_data.encode('utf-8')\n- else:\n- body = file_data\n-\n- body_length = str(len(body))\n-\n params = {\n 'name': object_name,\n 'uploadType': 'media',\n }\n \n- content_type = mimetypes.guess_type(object_name)[0]\n- content_type = content_type or 'application/octet-stream'\n+ if not isinstance(file_data, bytes):\n+ file_data = file_data.encode('utf-8')\n+\n+ if file_data:\n+ file_data = json.dumps(file_data).encode('utf-8')\n+ content_length = str(len(file_data))\n+ else:\n+ content_length = '0'\n \n headers.update({\n- 'accept': 'application/json',\n- 'Authorization': 'Bearer {}'.format(token),\n- 'Content-Length': body_length,\n- 'Content-Type': content_type,\n+ 'Accept': 'application/json',\n+ 'Authorization': f'Bearer {token}',\n+ 'Content-Length': content_length,\n+ 'Content-Type': 'application/json',\n })\n \n- return await post(url, params=params, payload=body, headers=headers,\n- timeout=120, session=session)\n+ session = session or self.session\n+ response = await session.post(url, data=file_data, headers=headers,\n+ params=params, timeout=120)\n+ content = await response.json()\n+\n+ return response.status, content\n \n async def download_as_string(self, bucket, object_name, session=None):\n object_name = object_name.replace('/', '%2F')\n@@ -98,88 +100,3 @@ async def download_as_string(self, bucket, object_name, session=None):\n \n def get_bucket(self, bucket_name):\n return Bucket(self, bucket_name)\n-\n-\n-class Bucket:\n- def __init__(self, storage, name):\n- self.storage = storage\n- self.name = name\n-\n- async def get_blob(self, blob_name, session=None):\n- blob_name = blob_name.replace('/', '%2F')\n-\n- status, content = await self.storage.download(self.name, blob_name,\n- session=session)\n-\n- if status < 200 or status >= 300:\n- log.error('Could not download %s/%s: %s', self.name, blob_name,\n- content)\n- return\n-\n- content = ujson.loads(content)\n-\n- return Blob(self, blob_name, content)\n-\n- async def list_blobs(self, prefix='', session=None):\n- params = {'prefix': prefix}\n-\n- status, content = await self.storage.list_objects(self.name,\n- params=params,\n- session=session)\n-\n- if status < 200 or status >= 300:\n- log.error('Could not list %s/%s: %s', self.name, prefix, content)\n- return\n-\n- return [x['name'] for x in content.get('items', list())]\n-\n- def new_blob(self, blob_name):\n- return Blob(self, blob_name, {'size': 0})\n-\n-\n-class Blob:\n- def __init__(self, bucket, name, data):\n- self.__dict__.update(**data)\n-\n- self.bucket = bucket\n- self.name = name\n- self.size = int(self.size)\n-\n- @property\n- def chunk_size(self):\n- return self.size + (262144 - (self.size % 262144))\n-\n- async def download_as_string(self, session=None):\n- return await self.bucket.storage.download_as_string(self.bucket.name,\n- self.name,\n- session=session)\n-\n- async def upload_from_string(self, data, session=None):\n- status, content = await self.bucket.storage.upload(self.bucket.name,\n- self.name, data,\n- session=session)\n-\n- if status < 200 or status >= 300:\n- raise HttpError('{}: {}'.format(status, ujson.dumps(content)))\n-\n- self.__dict__.update(content)\n- return content\n-\n-\n-async def download(bucket, object_name):\n- blob = await bucket.get_blob(object_name)\n- if not blob:\n- raise Exception('No such object \"{}/{}\"'.format(bucket.name,\n- object_name))\n-\n- return await blob.download_as_string()\n-\n-\n-def make_download(project, service_file, bucket_name, session=None,\n- token=None):\n- token = token or Token(project, service_file, scopes=[READ_ONLY_SCOPE])\n-\n- storage = Storage(project, service_file, session=session, token=token)\n- bucket = storage.get_bucket(bucket_name)\n-\n- return functools.partial(download, bucket)\ndiff --git a/storage/gcloud/aio/storage/utils.py b/storage/gcloud/aio/storage/utils.py\nnew file mode 100644\n--- /dev/null\n+++ b/storage/gcloud/aio/storage/utils.py\n@@ -0,0 +1,25 @@\n+import functools\n+\n+from gcloud.aio.auth import Token\n+from gcloud.aio.storage.storage import Storage\n+\n+\n+READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/devstorage.read_only'\n+\n+\n+async def download(bucket, object_name):\n+ blob = await bucket.get_blob(object_name)\n+ if not blob:\n+ raise Exception(f'No such object \"{bucket.name}/{object_name}\"')\n+\n+ return await blob.download_as_string()\n+\n+\n+def make_download(project, service_file, bucket_name, session=None,\n+ token=None):\n+ token = token or Token(project, service_file, scopes=[READ_ONLY_SCOPE])\n+\n+ storage = Storage(project, service_file, session=session, token=token)\n+ bucket = storage.get_bucket(bucket_name)\n+\n+ return functools.partial(download, bucket)\ndiff --git a/storage/nox.py b/storage/nox.py\n--- a/storage/nox.py\n+++ b/storage/nox.py\n@@ -4,14 +4,14 @@\n import nox\n \n \n-LOCAL_DEPS = ('../core/', '../auth/')\n+LOCAL_DEPS = ('../auth/', )\n \n \n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def unit_tests(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'unit-' + python_version\n+ session.interpreter = f'python{python_version}'\n+ session.virtualenv_dirname = f'unit-{python_version}'\n \n session.install('pytest', 'pytest-cov', *LOCAL_DEPS)\n session.install('-e', '.')\n@@ -34,8 +34,8 @@ def integration_tests(session, python_version):\n if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):\n session.skip('Credentials must be set via environment variable.')\n \n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'integration-' + python_version\n+ session.interpreter = f'python{python_version}'\n+ session.virtualenv_dirname = f'integration-{python_version}'\n \n session.install('aiohttp', 'pytest', *LOCAL_DEPS)\n session.install('.')\n@@ -46,7 +46,7 @@ def integration_tests(session, python_version):\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def lint_setup_py(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n+ session.interpreter = f'python{python_version}'\n session.virtualenv_dirname = 'setup'\n \n session.install('docutils', 'Pygments')\n@@ -61,7 +61,7 @@ def lint_setup_py(session, python_version):\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def cover(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n+ session.interpreter = f'python{python_version}'\n session.virtualenv_dirname = 'cover'\n \n session.install('codecov', 'coverage', 'pytest-cov')\ndiff --git a/taskqueue/gcloud/aio/taskqueue/taskmanager.py b/taskqueue/gcloud/aio/taskqueue/taskmanager.py\n--- a/taskqueue/gcloud/aio/taskqueue/taskmanager.py\n+++ b/taskqueue/gcloud/aio/taskqueue/taskmanager.py\n@@ -59,9 +59,9 @@ async def stop(self):\n \n @staticmethod\n def autorenew(event, headers, task, lease_seconds):\n- url = '{}/{}:renewLease'.format(API_ROOT, task['name'])\n+ url = f'{API_ROOT}/{task[\"name\"]}:renewLease'\n body = {\n- 'leaseDuration': '{}s'.format(lease_seconds),\n+ 'leaseDuration': f'{lease_seconds}s',\n 'responseView': 'FULL',\n }\n \ndiff --git a/taskqueue/gcloud/aio/taskqueue/taskqueue.py b/taskqueue/gcloud/aio/taskqueue/taskqueue.py\n--- a/taskqueue/gcloud/aio/taskqueue/taskqueue.py\n+++ b/taskqueue/gcloud/aio/taskqueue/taskqueue.py\n@@ -28,21 +28,22 @@ def __init__(self, project, service_file, taskqueue, location=LOCATION,\n self.session = session or aiohttp.ClientSession(conn_timeout=10,\n read_timeout=10)\n \n- self.api_root = '{}/projects/{}/locations/{}/queues/{}'.format(\n- API_ROOT, project, location, taskqueue)\n+ self.api_root = (f'{API_ROOT}/projects/{project}/'\n+ f'locations/{location}/queues/{taskqueue}')\n \n self.token = token or Token(project, service_file, scopes=SCOPES,\n session=self.session)\n \n async def headers(self):\n+ token = await self.token.get()\n return {\n- 'Authorization': 'Bearer {}'.format(await self.token.get()),\n+ 'Authorization': f'Bearer {token}',\n 'Content-Type': 'application/json',\n }\n \n # https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/acknowledge\n async def ack(self, task, session=None):\n- url = '{}/{}:acknowledge'.format(API_ROOT, task['name'])\n+ url = f'{API_ROOT}/{task[\"name\"]}:acknowledge'\n body = {\n 'scheduleTime': task['scheduleTime'],\n }\n@@ -55,7 +56,7 @@ async def ack(self, task, session=None):\n \n # https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/cancelLease\n async def cancel(self, task, session=None):\n- url = '{}/{}:cancelLease'.format(API_ROOT, task['name'])\n+ url = f'{API_ROOT}/{task[\"name\"]}:cancelLease'\n body = {\n 'scheduleTime': task['scheduleTime'],\n 'responseView': 'BASIC',\n@@ -69,7 +70,7 @@ async def cancel(self, task, session=None):\n \n # https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/delete\n async def delete(self, tname, session=None):\n- url = '{}/{}'.format(API_ROOT, tname)\n+ url = f'{API_ROOT}/{tname}'\n \n s = session or self.session\n resp = await retry(s.delete(url, headers=await self.headers()))\n@@ -84,7 +85,7 @@ async def drain(self):\n \n # https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/get\n async def get(self, tname, full=False, session=None):\n- url = '{}/{}'.format(API_ROOT, tname)\n+ url = f'{API_ROOT}/{tname}'\n params = {\n 'responseView': 'FULL' if full else 'BASIC',\n }\n@@ -97,7 +98,7 @@ async def get(self, tname, full=False, session=None):\n \n # https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/create\n async def insert(self, payload, tag=None, session=None):\n- url = '{}/tasks'.format(self.api_root)\n+ url = f'{self.api_root}/tasks'\n body = {\n 'task': {\n 'pullMessage': {\n@@ -117,10 +118,10 @@ async def insert(self, payload, tag=None, session=None):\n # https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/lease\n async def lease(self, num_tasks=1, lease_seconds=60, task_filter=None,\n session=None):\n- url = '{}/tasks:lease'.format(self.api_root)\n+ url = f'{self.api_root}/tasks:lease'\n body = {\n 'maxTasks': min(num_tasks, 1000),\n- 'leaseDuration': '{}s'.format(lease_seconds),\n+ 'leaseDuration': f'{lease_seconds}s',\n 'responseView': 'FULL',\n }\n if task_filter:\n@@ -135,7 +136,7 @@ async def lease(self, num_tasks=1, lease_seconds=60, task_filter=None,\n # https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/list\n async def list(self, full=False, page_size=1000, page_token='',\n session=None):\n- url = '{}/tasks'.format(self.api_root)\n+ url = f'{self.api_root}/tasks'\n params = {\n 'responseView': 'FULL' if full else 'BASIC',\n 'pageSize': page_size,\n@@ -150,10 +151,10 @@ async def list(self, full=False, page_size=1000, page_token='',\n \n # https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/renewLease\n async def renew(self, task, lease_seconds=60, session=None):\n- url = '{}/{}:renewLease'.format(API_ROOT, task['name'])\n+ url = f'{API_ROOT}/{task[\"name\"]}:renewLease'\n body = {\n 'scheduleTime': task['scheduleTime'],\n- 'leaseDuration': '{}s'.format(lease_seconds),\n+ 'leaseDuration': f'{lease_seconds}s',\n 'responseView': 'FULL',\n }\n \ndiff --git a/taskqueue/nox.py b/taskqueue/nox.py\n--- a/taskqueue/nox.py\n+++ b/taskqueue/nox.py\n@@ -10,8 +10,8 @@\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def unit_tests(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'unit-' + python_version\n+ session.interpreter = f'python{python_version}'\n+ session.virtualenv_dirname = f'unit-{python_version}'\n \n session.install('pytest', 'pytest-cov', *LOCAL_DEPS)\n session.install('-e', '.')\n@@ -34,8 +34,8 @@ def integration_tests(session, python_version):\n if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):\n session.skip('Credentials must be set via environment variable.')\n \n- session.interpreter = 'python{}'.format(python_version)\n- session.virtualenv_dirname = 'integration-' + python_version\n+ session.interpreter = f'python{python_version}'\n+ session.virtualenv_dirname = f'integration-{python_version}'\n \n session.install('pytest', 'pytest-mock', *LOCAL_DEPS)\n session.install('.')\n@@ -46,7 +46,7 @@ def integration_tests(session, python_version):\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def lint_setup_py(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n+ session.interpreter = f'python{python_version}'\n session.virtualenv_dirname = 'setup'\n \n session.install('docutils', 'Pygments')\n@@ -61,7 +61,7 @@ def lint_setup_py(session, python_version):\n @nox.session\n @nox.parametrize('python_version', ['3.6'])\n def cover(session, python_version):\n- session.interpreter = 'python{}'.format(python_version)\n+ session.interpreter = f'python{python_version}'\n session.virtualenv_dirname = 'cover'\n \n session.install('codecov', 'coverage', 'pytest-cov')\n", "test_patch": "diff --git a/core/tests/__init__.py b/core/tests/__init__.py\ndeleted file mode 100644\ndiff --git a/core/tests/unit/__init__.py b/core/tests/unit/__init__.py\ndeleted file mode 100644\ndiff --git a/core/tests/unit/aio_test.py b/core/tests/unit/aio_test.py\ndeleted file mode 100644\n--- a/core/tests/unit/aio_test.py\n+++ /dev/null\n@@ -1,5 +0,0 @@\n-import gcloud.aio.core.aio as aio # pylint: disable=unused-import\n-\n-\n-def test_importable():\n- assert True\ndiff --git a/core/tests/unit/astate_test.py b/core/tests/unit/astate_test.py\ndeleted file mode 100644\n--- a/core/tests/unit/astate_test.py\n+++ /dev/null\n@@ -1,5 +0,0 @@\n-import gcloud.aio.core.astate as astate # pylint: disable=unused-import\n-\n-\n-def test_importable():\n- assert True\ndiff --git a/core/tests/unit/http_test.py b/core/tests/unit/http_test.py\ndeleted file mode 100644\n--- a/core/tests/unit/http_test.py\n+++ /dev/null\n@@ -1,5 +0,0 @@\n-import gcloud.aio.core.http as http # pylint: disable=unused-import\n-\n-\n-def test_importable():\n- assert True\ndiff --git a/datastore/tests/integration/smoke_test.py b/datastore/tests/integration/smoke_test.py\n--- a/datastore/tests/integration/smoke_test.py\n+++ b/datastore/tests/integration/smoke_test.py\n@@ -24,7 +24,7 @@ def test_item_lifecycle():\n creds = os.environ['GOOGLE_APPLICATION_CREDENTIALS']\n \n kind_name = 'gcloud-aio-test'\n- object_name = 'test_record_{}'.format(uuid.uuid4())\n+ object_name = f'test_record_{uuid.uuid4()}'\n \n loop = asyncio.get_event_loop()\n loop.run_until_complete(\ndiff --git a/storage/tests/integration/smoke_test.py b/storage/tests/integration/smoke_test.py\n--- a/storage/tests/integration/smoke_test.py\n+++ b/storage/tests/integration/smoke_test.py\n@@ -21,7 +21,7 @@ def test_object_is_downloaded():\n call_id = '07fbe0cc-7f87-1235-06b0-0cc47a392728'\n side = 'callee'\n link = 0\n- object_name = '{}/{}/{}/rtp.pcap.wav.ctm'.format(call_id, side, link)\n+ object_name = f'{call_id}/{side}/{link}/rtp.pcap.wav.ctm'\n \n loop = asyncio.get_event_loop()\n loop.run_until_complete(\ndiff --git a/storage/tests/unit/blob_test.py b/storage/tests/unit/blob_test.py\nnew file mode 100644\n--- /dev/null\n+++ b/storage/tests/unit/blob_test.py\n@@ -0,0 +1,5 @@\n+import gcloud.aio.storage.blob as blob # pylint: disable=unused-import\n+\n+\n+def test_importable():\n+ assert True\ndiff --git a/storage/tests/unit/bucket_test.py b/storage/tests/unit/bucket_test.py\nnew file mode 100644\n--- /dev/null\n+++ b/storage/tests/unit/bucket_test.py\n@@ -0,0 +1,5 @@\n+import gcloud.aio.storage.bucket as bucket # pylint: disable=unused-import\n+\n+\n+def test_importable():\n+ assert True\ndiff --git a/storage/tests/unit/storage_test.py b/storage/tests/unit/storage_test.py\n--- a/storage/tests/unit/storage_test.py\n+++ b/storage/tests/unit/storage_test.py\n@@ -1,4 +1,4 @@\n-import gcloud.aio.storage as storage # pylint: disable=unused-import\n+import gcloud.aio.storage.storage as storage # pylint: disable=unused-import\n \n \n def test_importable():\ndiff --git a/storage/tests/unit/utils_test.py b/storage/tests/unit/utils_test.py\nnew file mode 100644\n--- /dev/null\n+++ b/storage/tests/unit/utils_test.py\n@@ -0,0 +1,5 @@\n+import gcloud.aio.storage.utils as utils # pylint: disable=unused-import\n+\n+\n+def test_importable():\n+ assert True\ndiff --git a/taskqueue/tests/integration/taskqueue_test.py b/taskqueue/tests/integration/taskqueue_test.py\n--- a/taskqueue/tests/integration/taskqueue_test.py\n+++ b/taskqueue/tests/integration/taskqueue_test.py\n@@ -26,8 +26,7 @@ async def do_task_lifecycle(project, creds, task_queue):\n assert inserted\n \n # GET\n- got = await tq.get(inserted['name'], full=True)\n- assert got == inserted\n+ assert inserted == await tq.get(inserted['name'], full=True)\n \n # LIST\n listed = await tq.list(full=True)\n@@ -36,14 +35,12 @@ async def do_task_lifecycle(project, creds, task_queue):\n \n # LEASE\n leased = await tq.lease(num_tasks=1, lease_seconds=10,\n- task_filter='tag={}'.format(encode(tag)))\n+ task_filter=f'tag={encode(tag)}')\n assert leased.get('tasks') and len(leased['tasks']) == 1\n \n leased_message = leased['tasks'][0]['pullMessage']\n- leased_payload = json.loads(decode(leased_message['payload']))\n- leased_tag = decode(leased_message['tag'])\n- assert leased_payload == payload\n- assert leased_tag == tag\n+ assert payload == json.loads(decode(leased_message['payload']))\n+ assert tag == decode(leased_message['tag'])\n \n # RENEW\n renewed = await tq.renew(leased['tasks'][0], lease_seconds=10)\n@@ -57,8 +54,7 @@ async def do_task_lifecycle(project, creds, task_queue):\n # cancel?\n \n # DELETE\n- result = await tq.delete(renewed['name'])\n- assert not result\n+ assert not await tq.delete(renewed['name'])\n \n \n def test_task_lifecycle():\n", "problem_statement": "", "hints_text": "", "created_at": "2018-05-01T17:56:36Z"}
PythonDataset/test/google-cloud-python-task-instances.jsonl.all ADDED
The diff for this file is too large to render. See raw diff
 
PythonDataset/test/isso-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "isso-comments/isso", "pull_number": 485, "instance_id": "isso-comments__isso-485", "issue_numbers": "", "base_commit": "8e37a88d6d2b5fa3485f9aff39ff4b452ce2f578", "patch": "diff --git a/isso/utils/html.py b/isso/utils/html.py\n--- a/isso/utils/html.py\n+++ b/isso/utils/html.py\n@@ -6,61 +6,53 @@\n \n from distutils.version import LooseVersion as Version\n \n-HTML5LIB_VERSION = Version(pkg_resources.get_distribution(\"html5lib\").version)\n-HTML5LIB_SIMPLETREE = Version(\"0.95\")\n-\n-import html5lib\n-from html5lib.sanitizer import HTMLSanitizer\n-from html5lib.serializer import HTMLSerializer\n-\n+import bleach\n import misaka\n \n \n-def Sanitizer(elements, attributes):\n-\n- class Inner(HTMLSanitizer):\n+class Sanitizer(object):\n \n+ def __init__(self, elements, attributes):\n # attributes found in Sundown's HTML serializer [1]\n # except for <img> tag,\n # because images are not generated anyways.\n #\n # [1] https://github.com/vmg/sundown/blob/master/html/html.c\n- allowed_elements = [\"a\", \"p\", \"hr\", \"br\", \"ol\", \"ul\", \"li\",\n+ self.elements = [\"a\", \"p\", \"hr\", \"br\", \"ol\", \"ul\", \"li\",\n \"pre\", \"code\", \"blockquote\",\n \"del\", \"ins\", \"strong\", \"em\",\n \"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\",\n \"table\", \"thead\", \"tbody\", \"th\", \"td\"] + elements\n \n # href for <a> and align for <table>\n- allowed_attributes = [\"align\", \"href\"] + attributes\n-\n- # remove disallowed tokens from the output\n- def disallowed_token(self, token, token_type):\n- return None\n+ self.attributes = [\"align\", \"href\"] + attributes\n \n- return Inner\n \n \n-def sanitize(tokenizer, document):\n+ def sanitize(self, text):\n+ clean_html = bleach.clean(text, tags=self.elements,\n+ attributes=self.attributes, strip=True)\n \n- parser = html5lib.HTMLParser(tokenizer=tokenizer)\n- domtree = parser.parseFragment(document)\n+ def set_links(attrs, new=False):\n+ href_key = (None, u'href')\n \n- if HTML5LIB_VERSION > HTML5LIB_SIMPLETREE:\n- builder = \"etree\"\n+ if href_key not in attrs:\n+ return attrs\n+ if attrs[href_key].startswith(u'mailto:'):\n+ return attrs\n \n- for link in domtree.findall(\".//{http://www.w3.org/1999/xhtml}a\"):\n- if link.get('href', None):\n- link.set(\"rel\", \"nofollow noopener\")\n+ rel_key = (None, u'rel')\n+ rel_values = [val for val in attrs.get(rel_key, u'').split(u' ') if val]\n \n- else:\n- builder = \"simpletree\"\n+ for value in [u'nofollow', u'noopener']:\n+ if value not in [rel_val.lower() for rel_val in rel_values]:\n+ rel_values.append(value)\n \n- stream = html5lib.treewalkers.getTreeWalker(builder)(domtree)\n- serializer = HTMLSerializer(\n- quote_attr_values=True, omit_optional_tags=False)\n+ attrs[rel_key] = u' '.join(rel_values)\n+ return attrs\n \n- return serializer.render(stream)\n+ linker = bleach.linkifier.Linker(callbacks=[set_links])\n+ return linker.linkify(clean_html)\n \n \n def Markdown(extensions=(\"strikethrough\", \"superscript\", \"autolink\",\n@@ -100,7 +92,7 @@ def __init__(self, conf):\n conf.getlist(\"allowed-elements\"),\n conf.getlist(\"allowed-attributes\"))\n \n- self._render = lambda text: sanitize(sanitizer, parser(text))\n+ self._render = lambda text: sanitizer.sanitize(parser(text))\n \n def render(self, text):\n return self._render(text)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,8 +5,8 @@\n \n from setuptools import setup, find_packages\n \n-requires = ['itsdangerous', 'Jinja2', 'misaka>=2.0,<3.0', 'html5lib<0.9999999',\n- 'werkzeug>=0.9']\n+requires = ['itsdangerous', 'Jinja2', 'misaka>=2.0,<3.0', 'html5lib',\n+ 'werkzeug>=0.9', 'bleach']\n \n if sys.version_info < (2, 7):\n raise SystemExit(\"Python 2 versions < 2.7 are not supported.\")\n", "test_patch": "diff --git a/isso/tests/test_html.py b/isso/tests/test_html.py\n--- a/isso/tests/test_html.py\n+++ b/isso/tests/test_html.py\n@@ -59,7 +59,6 @@ def test_github_flavoured_markdown(self):\n print(\"Hello, World\")\n </code></pre>\"\"\")\n \n- @unittest.skipIf(html.HTML5LIB_VERSION <= html.HTML5LIB_SIMPLETREE, \"backport\")\n def test_sanitizer(self):\n sanitizer = html.Sanitizer(elements=[], attributes=[])\n examples = [\n@@ -73,11 +72,10 @@ def test_sanitizer(self):\n \n for (input, expected) in examples:\n if isinstance(expected, list):\n- self.assertIn(html.sanitize(sanitizer, input), expected)\n+ self.assertIn(sanitizer.sanitize(input), expected)\n else:\n- self.assertEqual(html.sanitize(sanitizer, input), expected)\n+ self.assertEqual(sanitizer.sanitize(input), expected)\n \n- @unittest.skipIf(html.HTML5LIB_VERSION <= html.HTML5LIB_SIMPLETREE, \"backport\")\n def test_sanitizer_extensions(self):\n sanitizer = html.Sanitizer(elements=[\"img\"], attributes=[\"src\"])\n examples = [\n@@ -85,7 +83,7 @@ def test_sanitizer_extensions(self):\n ('<script src=\"doge.js\"></script>', '')]\n \n for (input, expected) in examples:\n- self.assertEqual(html.sanitize(sanitizer, input), expected)\n+ self.assertEqual(sanitizer.sanitize(input), expected)\n \n def test_render(self):\n conf = config.new({\n", "problem_statement": "", "hints_text": "", "created_at": "2018-10-01T02:51:13Z"}
PythonDataset/test/libcloud-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "apache/libcloud", "pull_number": 1280, "instance_id": "apache__libcloud-1280", "issue_numbers": "", "base_commit": "e8735a71023a46c24b715830358ee59039b45704", "patch": "diff --git a/libcloud/common/google.py b/libcloud/common/google.py\n--- a/libcloud/common/google.py\n+++ b/libcloud/common/google.py\n@@ -24,7 +24,7 @@\n Both are initially set up from the Cloud Console Console -\n https://cloud.google.com/console\n \n-Setting up Service Account authentication (note that you need the PyCrypto\n+Setting up Service Account authentication (note that you need the cryptography\n package installed to use this):\n \n - Go to the Console\n@@ -89,16 +89,13 @@\n LibcloudError)\n \n try:\n- from Crypto.Hash import SHA256\n- from Crypto.PublicKey import RSA\n- from Crypto.Signature import PKCS1_v1_5\n- import Crypto.Random\n- Crypto.Random.atfork()\n+ from cryptography.hazmat.backends import default_backend\n+ from cryptography.hazmat.primitives import serialization\n+ from cryptography.hazmat.primitives.hashes import SHA256\n+ from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15\n except ImportError:\n- # The pycrypto library is unavailable\n+ # The cryptography library is unavailable\n SHA256 = None\n- RSA = None\n- PKCS1_v1_5 = None\n \n UTC_TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ'\n \n@@ -472,8 +469,8 @@ class GoogleServiceAcctAuthConnection(GoogleBaseAuthConnection):\n \"\"\"Authentication class for \"Service Account\" authentication.\"\"\"\n def __init__(self, user_id, key, *args, **kwargs):\n \"\"\"\n- Check to see if PyCrypto is available, and convert key file path into a\n- key string if the key is in a file.\n+ Check to see if cryptography is available, and convert key file path\n+ into a key string if the key is in a file.\n \n :param user_id: Email address to be used for Service Account\n authentication.\n@@ -483,7 +480,7 @@ def __init__(self, user_id, key, *args, **kwargs):\n :type key: ``str``\n \"\"\"\n if SHA256 is None:\n- raise GoogleAuthError('PyCrypto library required for '\n+ raise GoogleAuthError('cryptography library required for '\n 'Service Account Authentication.')\n # Check to see if 'key' is a file and read the file if it is.\n if key.find(\"PRIVATE KEY---\") == -1:\n@@ -526,10 +523,17 @@ def get_new_token(self):\n # The message contains both the header and claim set\n message = b'.'.join((header_enc, claim_set_enc))\n # Then the message is signed using the key supplied\n- key = RSA.importKey(self.key)\n- hash_func = SHA256.new(message)\n- signer = PKCS1_v1_5.new(key)\n- signature = base64.urlsafe_b64encode(signer.sign(hash_func))\n+ key = serialization.load_pem_private_key(\n+ b(self.key),\n+ password=None,\n+ backend=default_backend()\n+ )\n+ signature = key.sign(\n+ data=b(message),\n+ padding=PKCS1v15(),\n+ algorithm=SHA256()\n+ )\n+ signature = base64.urlsafe_b64encode(signature)\n \n # Finally the message and signature are sent to get a token\n jwt = b'.'.join((message, signature))\ndiff --git a/libcloud/compute/drivers/softlayer.py b/libcloud/compute/drivers/softlayer.py\n--- a/libcloud/compute/drivers/softlayer.py\n+++ b/libcloud/compute/drivers/softlayer.py\n@@ -18,7 +18,9 @@\n \n import time\n try:\n- from Crypto.PublicKey import RSA\n+ from cryptography.hazmat.primitives.asymmetric import rsa\n+ from cryptography.hazmat.backends import default_backend\n+ from cryptography.hazmat.primitives import serialization\n crypto = True\n except ImportError:\n crypto = False\n@@ -386,17 +388,29 @@ def get_key_pair(self, name):\n def create_key_pair(self, name, ex_size=4096):\n if crypto is False:\n raise NotImplementedError('create_key_pair needs'\n- 'the pycrypto library')\n- key = RSA.generate(ex_size)\n+ 'the cryptography library')\n+ key = rsa.generate_private_key(\n+ public_exponent=65537,\n+ key_size=4096,\n+ backend=default_backend()\n+ )\n+ public_key = key.public_key().public_bytes(\n+ encoding=serialization.Encoding.OpenSSH,\n+ format=serialization.PublicFormat.OpenSSH\n+ )\n new_key = {\n- 'key': key.publickey().exportKey('OpenSSH'),\n+ 'key': public_key,\n 'label': name,\n 'notes': '',\n }\n result = self.connection.request(\n 'SoftLayer_Security_Ssh_Key', 'createObject', new_key\n ).object\n- result['private'] = key.exportKey('PEM')\n+ result['private'] = key.private_bytes(\n+ encoding=serialization.Encoding.PEM,\n+ format=serialization.PrivateFormat.TraditionalOpenSSL,\n+ encryption_algorithm=serialization.NoEncryption()\n+ )\n return self._to_key_pair(result)\n \n def import_key_pair_from_string(self, name, key_material):\ndiff --git a/libcloud/utils/publickey.py b/libcloud/utils/publickey.py\n--- a/libcloud/utils/publickey.py\n+++ b/libcloud/utils/publickey.py\n@@ -17,7 +17,7 @@\n import hashlib\n \n from libcloud.utils.py3 import hexadigits\n-from libcloud.utils.py3 import bchr\n+from libcloud.utils.py3 import b\n \n __all__ = [\n 'get_pubkey_openssh_fingerprint',\n@@ -26,11 +26,11 @@\n ]\n \n try:\n- from Crypto.Util.asn1 import DerSequence, DerObject\n- from Crypto.PublicKey.RSA import algorithmIdentifier, importKey\n- pycrypto_available = True\n+ from cryptography.hazmat.backends import default_backend\n+ from cryptography.hazmat.primitives import serialization\n+ cryptography_available = True\n except ImportError:\n- pycrypto_available = False\n+ cryptography_available = False\n \n \n def _to_md5_fingerprint(data):\n@@ -40,25 +40,33 @@ def _to_md5_fingerprint(data):\n \n def get_pubkey_openssh_fingerprint(pubkey):\n # We import and export the key to make sure it is in OpenSSH format\n- if not pycrypto_available:\n- raise RuntimeError('pycrypto is not available')\n- k = importKey(pubkey)\n- pubkey = k.exportKey('OpenSSH')[7:]\n- decoded = base64.decodestring(pubkey)\n- return _to_md5_fingerprint(decoded)\n+ if not cryptography_available:\n+ raise RuntimeError('cryptography is not available')\n+ public_key = serialization.load_ssh_public_key(\n+ b(pubkey),\n+ backend=default_backend()\n+ )\n+ pub_openssh = public_key.public_bytes(\n+ encoding=serialization.Encoding.OpenSSH,\n+ format=serialization.PublicFormat.OpenSSH,\n+ )[7:] # strip ssh-rsa prefix\n+ return _to_md5_fingerprint(base64.decodestring(pub_openssh))\n \n \n def get_pubkey_ssh2_fingerprint(pubkey):\n # This is the format that EC2 shows for public key fingerprints in its\n # KeyPair mgmt API\n- if not pycrypto_available:\n- raise RuntimeError('pycrypto is not available')\n- k = importKey(pubkey)\n- derPK = DerSequence([k.n, k.e])\n- bitmap = DerObject('BIT STRING')\n- bitmap.payload = bchr(0x00) + derPK.encode()\n- der = DerSequence([algorithmIdentifier, bitmap.encode()])\n- return _to_md5_fingerprint(der.encode())\n+ if not cryptography_available:\n+ raise RuntimeError('cryptography is not available')\n+ public_key = serialization.load_ssh_public_key(\n+ b(pubkey),\n+ backend=default_backend()\n+ )\n+ pub_der = public_key.public_bytes(\n+ encoding=serialization.Encoding.DER,\n+ format=serialization.PublicFormat.SubjectPublicKeyInfo,\n+ )\n+ return _to_md5_fingerprint(pub_der)\n \n \n def get_pubkey_comment(pubkey, default=None):\ndiff --git a/libcloud/utils/py3.py b/libcloud/utils/py3.py\n--- a/libcloud/utils/py3.py\n+++ b/libcloud/utils/py3.py\n@@ -126,7 +126,7 @@ def tostring(node):\n \n def hexadigits(s):\n # s needs to be a byte string.\n- return [format(x, \"x\") for x in s]\n+ return [format(x, \"02x\") for x in s]\n \n else:\n import httplib # NOQA\n", "test_patch": "diff --git a/libcloud/test/common/test_google.py b/libcloud/test/common/test_google.py\n--- a/libcloud/test/common/test_google.py\n+++ b/libcloud/test/common/test_google.py\n@@ -40,9 +40,9 @@\n from libcloud.utils.py3 import httplib\n \n \n-# Skip some tests if PyCrypto is unavailable\n+# Skip some tests if cryptography is unavailable\n try:\n- from Crypto.Hash import SHA256\n+ from cryptography.hazmat.primitives.hashes import SHA256\n except ImportError:\n SHA256 = None\n \ndiff --git a/libcloud/test/compute/test_softlayer.py b/libcloud/test/compute/test_softlayer.py\n--- a/libcloud/test/compute/test_softlayer.py\n+++ b/libcloud/test/compute/test_softlayer.py\n@@ -16,12 +16,6 @@\n import unittest\n import sys\n import pytest\n-try:\n- import Crypto\n- Crypto\n- crypto = True\n-except ImportError:\n- crypto = False\n \n from libcloud.common.types import InvalidCredsError\n \n@@ -170,17 +164,13 @@ def test_get_key_pair_does_not_exist(self):\n \n @pytest.mark.skip(reason=\"no way of currently testing this\")\n def test_create_key_pair(self):\n- if crypto:\n- key_pair = self.driver.create_key_pair(name='my-key-pair')\n- fingerprint = ('1f:51:ae:28:bf:89:e9:d8:1f:25:5d'\n- ':37:2d:7d:b8:ca:9f:f5:f1:6f')\n-\n- self.assertEqual(key_pair.name, 'my-key-pair')\n- self.assertEqual(key_pair.fingerprint, fingerprint)\n- self.assertTrue(key_pair.private_key is not None)\n- else:\n- self.assertRaises(NotImplementedError, self.driver.create_key_pair,\n- name='my-key-pair')\n+ key_pair = self.driver.create_key_pair(name='my-key-pair')\n+ fingerprint = ('1f:51:ae:28:bf:89:e9:d8:1f:25:5d'\n+ ':37:2d:7d:b8:ca:9f:f5:f1:6f')\n+\n+ self.assertEqual(key_pair.name, 'my-key-pair')\n+ self.assertEqual(key_pair.fingerprint, fingerprint)\n+ self.assertTrue(key_pair.private_key is not None)\n \n def test_delete_key_pair(self):\n success = self.driver.delete_key_pair('test1')\ndiff --git a/libcloud/test/test_utils.py b/libcloud/test/test_utils.py\n--- a/libcloud/test/test_utils.py\n+++ b/libcloud/test/test_utils.py\n@@ -47,6 +47,10 @@\n from libcloud.utils.networking import increment_ipv4_segments\n from libcloud.utils.decorators import wrap_non_libcloud_exceptions\n from libcloud.utils.connection import get_response_object\n+from libcloud.utils.publickey import (\n+ get_pubkey_openssh_fingerprint,\n+ get_pubkey_ssh2_fingerprint,\n+)\n from libcloud.common.types import LibcloudError\n from libcloud.storage.drivers.dummy import DummyIterator\n \n@@ -385,6 +389,26 @@ def test_increment_ipv4_segments(self):\n self.assertEqual(result, incremented_ip)\n \n \n+class TestPublicKeyUtils(unittest.TestCase):\n+\n+ PUBKEY = (\n+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDOfbWSXOlqvYjZmRO84/lIoV4gvuX+'\n+ 'P1lLg50MMg6jZjLZIlYY081XPRmuom0xY0+BO++J2KgLl7gxJ6xMsKK2VQ+TakdfAH20'\n+ 'XfMcTohd/zVCeWsbqZQvEhVXBo4hPIktcfNz0u9Ez3EtInO+kb7raLcRhOVi9QmOkOrC'\n+ 'WtQU9mS71AWJuqI9H0YAnTiI8Hs5bn2tpMIqmTXT3g2bwywC25x1Nx9Hy0/FP+KUL6Ag'\n+ 'vDXv47l+TgSDfTBEkvq+IF1ITrnaOG+nRE02oZC6cwHYTifM/IOollkujxIQmi2Z+j66'\n+ 'OHSrjnEQugr0FqGJF2ygKfIh/i2u3fVLM60qE2NN user@example'\n+ )\n+\n+ def test_pubkey_openssh_fingerprint(self):\n+ fp = get_pubkey_openssh_fingerprint(self.PUBKEY)\n+ self.assertEqual(fp, '35:22:13:5b:82:e2:5d:e1:90:8c:73:74:9f:ef:3b:d8')\n+\n+ def test_pubkey_ssh2_fingerprint(self):\n+ fp = get_pubkey_ssh2_fingerprint(self.PUBKEY)\n+ self.assertEqual(fp, '11:ad:5d:4c:5b:99:c9:80:7e:81:03:76:5a:25:9d:8c')\n+\n+\n def test_decorator():\n \n @wrap_non_libcloud_exceptions\n", "problem_statement": "", "hints_text": "", "created_at": "2019-03-11T14:45:18Z"}
PythonDataset/test/maintainer-tools-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "OCA/maintainer-tools", "pull_number": 262, "instance_id": "OCA__maintainer-tools-262", "issue_numbers": "", "base_commit": "405113684e304b2294782a3732618ff3a7ef5705", "patch": "diff --git a/tools/pypi_upload_wheels.py b/tools/pypi_upload_wheels.py\nnew file mode 100644\n--- /dev/null\n+++ b/tools/pypi_upload_wheels.py\n@@ -0,0 +1,182 @@\n+import anydbm\n+import contextlib\n+import logging\n+import os\n+import requests\n+import subprocess\n+from wheel.install import WheelFile\n+from ConfigParser import RawConfigParser\n+from pkg_resources import parse_version\n+\n+import click\n+\n+_logger = logging.getLogger(__name__)\n+\n+\n+def _split_wheelfilename(wheelfilename):\n+ wheelfile = WheelFile(wheelfilename)\n+ package_name = wheelfile.parsed_filename.group('name')\n+ package_name = package_name.replace('_', '-')\n+ package_ver = wheelfile.parsed_filename.group('ver')\n+ package_ver = parse_version(package_ver)\n+ return package_name, package_ver\n+\n+\n+class OcaPypi(object):\n+ \"\"\"A wrapper around twine, with caching\n+ to avoid multiple useless upload attempts for the same file.\"\"\"\n+\n+ def __init__(self, pypirc, repository, cache, dryrun):\n+ parser = RawConfigParser()\n+ parser.read(pypirc)\n+ self.pypirc = pypirc\n+ self.repository = repository\n+ self.repository_url = parser.get(repository, 'repository')\n+ self.cache = cache\n+ self.dryrun = dryrun\n+\n+ def _make_key(self, wheelfilename):\n+ return str(self.repository_url + '#' + os.path.basename(wheelfilename))\n+\n+ def _key_match(self, key):\n+ return key.startswith(self.repository_url + '#')\n+\n+ def _key_to_wheel(self, key):\n+ return key[len(self.repository_url) + 1:]\n+\n+ def _registered(self, wheelfilename):\n+ package_name, package_ver = _split_wheelfilename(wheelfilename)\n+ package_url = self.repository_url + '/' + package_name\n+ r = requests.head(package_url)\n+ return r.status_code == 200\n+\n+ def _register(self, wheelfilename):\n+ cmd = ['twine', 'register', '--config-file', self.pypirc,\n+ '-r', self.repository, wheelfilename]\n+ if not self.dryrun:\n+ try:\n+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n+ except subprocess.CalledProcessError as e:\n+ if \"HTTPError: 400 Client Error\" in e.output:\n+ return e.output\n+ raise\n+ else:\n+ _logger.info(\"dryrun: %s\", cmd)\n+\n+ def _upload(self, wheelfilename):\n+ cmd = ['twine', 'upload', '--config-file', self.pypirc,\n+ '-r', self.repository, '--skip-existing', wheelfilename]\n+ if not self.dryrun:\n+ try:\n+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n+ except subprocess.CalledProcessError as e:\n+ if \"HTTPError: 400 Client Error\" in e.output:\n+ return e.output\n+ raise\n+ else:\n+ _logger.info(\"dryrun: %s\", cmd)\n+\n+ def upload_wheel(self, wheelfilename):\n+ key = self._make_key(wheelfilename)\n+ with contextlib.closing(anydbm.open(self.cache, 'c')) as dbm:\n+ if key in dbm:\n+ value = dbm[key]\n+ detail = '' if not value else ' (with error)'\n+ _logger.debug(\"skipped %s: found in cache%s\",\n+ wheelfilename, detail)\n+ return\n+ if not self._registered(wheelfilename):\n+ _logger.info(\"registering %s to %s\",\n+ wheelfilename, self.repository_url)\n+ r = self._register(wheelfilename)\n+ if r:\n+ # registration failed, store the error in cache\n+ # so we don't try again, and do not try to upload\n+ _logger.error(\"registering %s to %s failed: %s\",\n+ wheelfilename, self.repository_url, r)\n+ if not self.dryrun:\n+ dbm[key] = r or ''\n+ return\n+ _logger.info(\"uploading %s to %s\",\n+ wheelfilename, self.repository_url)\n+ r = self._upload(wheelfilename)\n+ if r:\n+ _logger.error(\"uploading %s to %s failed: %s\",\n+ wheelfilename, self.repository_url, r)\n+ if not self.dryrun:\n+ dbm[key] = r or ''\n+\n+ def upload_wheels(self, wheelfilenames):\n+ to_upload = []\n+ for wheelfilename in wheelfilenames:\n+ if os.path.isfile(wheelfilename) and \\\n+ wheelfilename.lower().endswith('.whl'):\n+ to_upload.append(wheelfilename)\n+ else:\n+ _logger.warn(\"skipped %s: not a wheel file\", wheelfilename)\n+ for wheelfilename in sorted(to_upload, key=_split_wheelfilename):\n+ self.upload_wheel(wheelfilename)\n+\n+ def cache_print_errors(self):\n+ with contextlib.closing(anydbm.open(self.cache, 'r')) as dbm:\n+ for key, value in dbm.items():\n+ if not self._key_match(key):\n+ continue\n+ if value:\n+ wheel = self._key_to_wheel(key)\n+ click.echo(u\"{}: {}\".format(wheel, value))\n+\n+ def cache_rm_wheels(self, wheelfilenames):\n+ with contextlib.closing(anydbm.open(self.cache, 'w')) as dbm:\n+ for wheelfilename in wheelfilenames:\n+ wheelfilename = os.path.basename(wheelfilename)\n+ key = self._make_key(wheelfilename)\n+ if key in dbm:\n+ del dbm[key]\n+\n+\n+@click.group()\n+@click.option('--pypirc', required=True)\n+@click.option('--repository', required=True)\n+@click.option('--cache', required=True)\n+@click.option('--dryrun/--no-dryrun', default=False)\n+@click.option('--debug/--no-debug', default=False)\n+@click.pass_context\n+def cli(ctx, pypirc, repository, cache, dryrun, debug):\n+ if debug:\n+ level = logging.DEBUG\n+ else:\n+ level = logging.INFO\n+ logging.basicConfig(\n+ format='%(asctime)s:%(levelname)s:%(message)s',\n+ level=level)\n+ ctx.obj = OcaPypi(pypirc, repository, cache, dryrun)\n+\n+\n+@click.command()\n+@click.argument('wheels', nargs=-1)\n+@click.pass_context\n+def upload(ctx, wheels):\n+ ctx.obj.upload_wheels(wheels)\n+\n+\n+@click.command()\n+@click.pass_context\n+def cache_print_errors(ctx):\n+ ctx.obj.cache_print_errors()\n+\n+\n+@click.command()\n+@click.argument('wheels', nargs=-1)\n+@click.pass_context\n+def cache_rm_wheels(ctx, wheels):\n+ ctx.obj.cache_rm_wheels(wheels)\n+\n+\n+cli.add_command(upload)\n+cli.add_command(cache_print_errors)\n+cli.add_command(cache_rm_wheels)\n+\n+\n+if __name__ == '__main__':\n+ cli()\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2017-03-05T17:20:05Z"}
PythonDataset/test/mimic-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "rackerlabs/mimic", "pull_number": 538, "instance_id": "rackerlabs__mimic-538", "issue_numbers": "", "base_commit": "d3ae1a11db45e4c695f012e10762aaa0ec81941d", "patch": "diff --git a/mimic/model/clb_objects.py b/mimic/model/clb_objects.py\n--- a/mimic/model/clb_objects.py\n+++ b/mimic/model/clb_objects.py\n@@ -12,8 +12,6 @@\n \n import attr\n \n-from characteristic import attributes, Attribute\n-\n from six import text_type\n \n from toolz.dicttoolz import dissoc\n@@ -157,20 +155,40 @@ def full_json(self):\n return result\n \n \n-@attributes([\"keys\"])\n+@attr.s\n class BadKeysError(Exception):\n \"\"\"\n When trying to alter the settings of a load balancer, this exception will\n be raised if you attempt to alter an attribute which doesn't exist.\n \"\"\"\n+ keys = attr.ib()\n+ code = attr.ib(validator=attr.validators.instance_of(int), default=400)\n+\n+ def to_json(self):\n+ \"\"\"\n+ :return: a JSON dict representation of this error.\n+ \"\"\"\n+ return {'message': 'Attempt to alter a bad attribute',\n+ 'code': self.code}\n \n \n-@attributes([\"value\", \"accepted_values\"])\n+@attr.s\n class BadValueError(Exception):\n \"\"\"\n When trying to alter the settings of a load balancer, this exception will\n be raised if you attempt to set a valid attribute to an invalid setting.\n \"\"\"\n+ value = attr.ib()\n+ accepted_values = attr.ib()\n+ code = attr.ib(validator=attr.validators.instance_of(int), default=400)\n+\n+ def to_json(self):\n+ \"\"\"\n+ :return: a JSON dict representation of this error.\n+ \"\"\"\n+ return {'message': ('Unsupported status {0} not one of {1}'.format(\n+ self.value, self.accepted_values)),\n+ 'code': self.code}\n \n \n def node_feed_xml(events):\n@@ -296,7 +314,7 @@ def set_attributes(self, lb_id, kvpairs):\n if k not in supported_keys:\n badKeys.append(k)\n if len(badKeys) > 0:\n- raise BadKeysError(\"Attempt to alter a bad attribute\", keys=badKeys)\n+ raise BadKeysError(keys=badKeys)\n \n if \"status\" in kvpairs:\n supported_statuses = [\n@@ -305,9 +323,6 @@ def set_attributes(self, lb_id, kvpairs):\n s = kvpairs[\"status\"]\n if s not in supported_statuses:\n raise BadValueError(\n- \"Unsupported status {0} not one of {1}\".format(\n- s, supported_statuses\n- ),\n value=s, accepted_values=supported_statuses\n )\n \n@@ -624,8 +639,7 @@ def del_load_balancer(self, lb_id):\n return not_found_response(\"loadbalancer\"), 404\n \n \n-@attributes([\"clock\",\n- Attribute(\"regional_collections\", default_factory=dict)])\n+@attr.s\n class GlobalCLBCollections(object):\n \"\"\"\n A :obj:`GlobalCLBCollections` is a set of all the\n@@ -633,6 +647,8 @@ class GlobalCLBCollections(object):\n words, all the objects that a single tenant owns globally in a\n cloud load balancer service.\n \"\"\"\n+ clock = attr.ib()\n+ regional_collections = attr.ib(default=attr.Factory(dict))\n \n def collection_for_region(self, region_name):\n \"\"\"\ndiff --git a/mimic/model/customer_objects.py b/mimic/model/customer_objects.py\n--- a/mimic/model/customer_objects.py\n+++ b/mimic/model/customer_objects.py\n@@ -4,16 +4,19 @@\n \n from __future__ import absolute_import, division, unicode_literals\n \n-from characteristic import attributes, Attribute\n+import attr\n \n \n-@attributes([\"tenant_id\", \"email_address\", \"role\",\n- Attribute(\"first_name\", default_value=\"Test FirstName\"),\n- Attribute(\"last_name\", default_value=\"Test LastName\")])\n+@attr.s\n class Contact(object):\n \"\"\"\n A :obj:`Contact` is a representation for each contact for a tenant.\n \"\"\"\n+ tenant_id = attr.ib()\n+ email_address = attr.ib()\n+ role = attr.ib()\n+ first_name = attr.ib(default=\"Test FirstName\")\n+ last_name = attr.ib(default=\"Test LastName\")\n \n static_defaults = {\n \"firstName\": \"Pat\",\n@@ -72,11 +75,12 @@ def generate_contacts(self):\n return template\n \n \n-@attributes([Attribute(\"contacts_store\", default_factory=dict)])\n+@attr.s\n class ContactsStore(object):\n \"\"\"\n A collection of contact objects for a tenant.\n \"\"\"\n+ contacts_store = attr.ib(default=attr.Factory(dict))\n \n def add_to_contacts_store(self, tenant_id, contact_list):\n \"\"\"\ndiff --git a/mimic/model/flavor_collections.py b/mimic/model/flavor_collections.py\n--- a/mimic/model/flavor_collections.py\n+++ b/mimic/model/flavor_collections.py\n@@ -6,7 +6,7 @@\n \n from six import iteritems\n \n-from characteristic import attributes, Attribute\n+import attr\n from json import dumps\n from mimic.model.flavors import (\n RackspaceStandardFlavor, RackspaceComputeFlavor, RackspaceMemoryFlavor,\n@@ -16,14 +16,16 @@\n from mimic.model.nova_objects import not_found\n \n \n-@attributes(\n- [\"tenant_id\", \"region_name\", \"clock\",\n- Attribute(\"flavors_store\", default_factory=list)]\n-)\n+@attr.s\n class RegionalFlavorCollection(object):\n \"\"\"\n A collection of flavors, in a given region, for a given tenant.\n \"\"\"\n+ tenant_id = attr.ib()\n+ region_name = attr.ib()\n+ clock = attr.ib()\n+ flavors_store = attr.ib(default=attr.Factory(list))\n+\n def flavor_by_id(self, flavor_id):\n \"\"\"\n Retrieve a :obj:`Flavor` object by its ID.\n@@ -87,14 +89,16 @@ def get_flavor(self, http_get_request, flavor_id, absolutize_url):\n return dumps({\"flavor\": flavor.detailed_json(absolutize_url)})\n \n \n-@attributes([\"tenant_id\", \"clock\",\n- Attribute(\"regional_collections\", default_factory=dict)])\n+@attr.s\n class GlobalFlavorCollection(object):\n \"\"\"\n A :obj:`GlobalFlavorCollection` is a set of all the\n :obj:`RegionalFlavorCollection` objects owned by a given tenant. In other\n words, all the flavor objects that a single tenant owns globally.\n \"\"\"\n+ tenant_id = attr.ib()\n+ clock = attr.ib()\n+ regional_collections = attr.ib(default=attr.Factory(dict))\n \n def collection_for_region(self, region_name):\n \"\"\"\ndiff --git a/mimic/model/flavors.py b/mimic/model/flavors.py\n--- a/mimic/model/flavors.py\n+++ b/mimic/model/flavors.py\n@@ -4,14 +4,21 @@\n \n from __future__ import absolute_import, division, unicode_literals\n \n-from characteristic import attributes\n+import attr\n \n \n-@attributes(['flavor_id', 'tenant_id', 'name', 'ram', 'vcpus', 'rxtx', 'disk'])\n+@attr.s\n class Flavor(object):\n \"\"\"\n A Flavor object\n \"\"\"\n+ flavor_id = attr.ib()\n+ tenant_id = attr.ib()\n+ name = attr.ib()\n+ ram = attr.ib()\n+ vcpus = attr.ib()\n+ rxtx = attr.ib()\n+ disk = attr.ib()\n \n static_defaults = {\n \"swap\": \"\",\ndiff --git a/mimic/model/heat_objects.py b/mimic/model/heat_objects.py\n--- a/mimic/model/heat_objects.py\n+++ b/mimic/model/heat_objects.py\n@@ -2,23 +2,24 @@\n Model objects for the Heat mimic.\n \"\"\"\n \n-from characteristic import attributes, Attribute\n+import attr\n import json\n from random import randrange\n \n from mimic.model.behaviors import BehaviorRegistryCollection, EventDescription\n \n \n-@attributes(['collection', 'stack_name',\n- Attribute('stack_id',\n- default_factory=lambda: Stack.generate_stack_id()),\n- Attribute('action', default_factory=lambda: Stack.CREATE),\n- Attribute('status', default_factory=lambda: Stack.COMPLETE),\n- Attribute('tags', default_factory=list)])\n+@attr.s\n class Stack(object):\n \"\"\"\n A :obj:`Stack` is a representation of a Heat stack.\n \"\"\"\n+ collection = attr.ib()\n+ stack_name = attr.ib()\n+ stack_id = attr.ib(default=attr.Factory(lambda: Stack.generate_stack_id()))\n+ action = attr.ib(default=attr.Factory(lambda: Stack.CREATE))\n+ status = attr.ib(default=attr.Factory(lambda: Stack.COMPLETE))\n+ tags = attr.ib(default=attr.Factory(list))\n \n ACTIONS = (\n CREATE, DELETE, UPDATE, ROLLBACK, SUSPEND, RESUME, ADOPT,\n@@ -199,17 +200,17 @@ def default_delete_behavior(collection, request, stack_name, stack_id):\n return b''\n \n \n-@attributes(\n- [\"tenant_id\", \"region_name\",\n- Attribute(\"stacks\", default_factory=list),\n- Attribute(\n- \"behavior_registry_collection\",\n- default_factory=lambda: BehaviorRegistryCollection())]\n-)\n+@attr.s\n class RegionalStackCollection(object):\n \"\"\"\n A collection of :obj:`Stack` objects for a region.\n \"\"\"\n+ tenant_id = attr.ib()\n+ region_name = attr.ib()\n+ stacks = attr.ib(default=attr.Factory(list))\n+ behavior_registry_collection = attr.ib(default=attr.Factory(\n+ lambda: BehaviorRegistryCollection()))\n+\n def stack_by_id(self, stack_id):\n \"\"\"\n Retrieves a stack by its ID\n@@ -302,12 +303,14 @@ def request_deletion(self, request, stack_name, stack_id):\n stack_id=stack_id)\n \n \n-@attributes([\"tenant_id\",\n- Attribute(\"regional_collections\", default_factory=dict)])\n+@attr.s\n class GlobalStackCollections(object):\n \"\"\"\n A set of :obj:`RegionalStackCollection` objects owned by a tenant.\n \"\"\"\n+ tenant_id = attr.ib()\n+ regional_collections = attr.ib(default=attr.Factory(dict))\n+\n def collection_for_region(self, region_name):\n \"\"\"\n Retrieves a :obj:`RegionalStackCollection` for a region.\ndiff --git a/mimic/model/ironic_objects.py b/mimic/model/ironic_objects.py\n--- a/mimic/model/ironic_objects.py\n+++ b/mimic/model/ironic_objects.py\n@@ -4,7 +4,7 @@\n \n from __future__ import absolute_import, division, unicode_literals\n \n-from characteristic import attributes, Attribute\n+import attr\n from uuid import uuid4\n from json import dumps\n \n@@ -12,26 +12,26 @@\n from mimic.util.helper import random_hex_generator\n \n \n-@attributes([\"node_id\",\n- Attribute(\"chassis_uuid\", default_value=None),\n- Attribute(\"driver\", default_value=None),\n- Attribute(\"driver_info\", default_value=None),\n- Attribute(\"properties\", default_value=None),\n- Attribute(\"flavor_id\", default_value=\"onmetal-io1\"),\n- Attribute(\"power_state\", default_value=\"power on\"),\n- Attribute(\"provision_state\", default_value=\"available\"),\n- Attribute(\"instance_uuid\", default_value=None),\n- Attribute(\"maintenance\", default_value=False),\n- Attribute(\"cache_image_id\", default_value=None),\n- Attribute(\"memory_mb\", default_value=None),\n- Attribute(\"name\", default_value=None)\n- ])\n+@attr.s\n class Node(object):\n \"\"\"\n A :obj:`Node` is a representation of all the state associated with a ironic\n node. It can produce JSON-serializable objects for various pieces of\n state that are required for API responses.\n \"\"\"\n+ node_id = attr.ib()\n+ chassis_uuid = attr.ib(default=None)\n+ driver = attr.ib(default=None)\n+ driver_info = attr.ib(default=None)\n+ properties = attr.ib(default=None)\n+ flavor_id = attr.ib(default=\"onmetal-io1\")\n+ power_state = attr.ib(default=\"power on\")\n+ provision_state = attr.ib(default=\"available\")\n+ instance_uuid = attr.ib(default=None)\n+ maintenance = attr.ib(default=False)\n+ cache_image_id = attr.ib(default=None)\n+ memory_mb = attr.ib(default=None)\n+ name = attr.ib(default=None)\n \n static_defaults = {\n \"target_power_state\": None,\n@@ -178,11 +178,12 @@ def detail_json(self):\n return template\n \n \n-@attributes([Attribute(\"ironic_node_store\", default_factory=list)])\n+@attr.s\n class IronicNodeStore(object):\n \"\"\"\n A collection of ironic :obj:`Node` objects.\n \"\"\"\n+ ironic_node_store = attr.ib(default=attr.Factory(list))\n \n memory_to_flavor_map = {131072: \"onmetal-io1\",\n 32768: \"onmetal-compute1\",\ndiff --git a/mimic/model/keypair_objects.py b/mimic/model/keypair_objects.py\n--- a/mimic/model/keypair_objects.py\n+++ b/mimic/model/keypair_objects.py\n@@ -6,14 +6,17 @@\n \n import json\n \n-from characteristic import attributes, Attribute\n+import attr\n \n \n-@attributes(['name', 'public_key'])\n+@attr.s\n class KeyPair(object):\n \"\"\"\n A KeyPair object\n \"\"\"\n+ name = attr.ib()\n+ public_key = attr.ib()\n+\n fingerprint = \"aa:aa:aa:aa:aa:aa:aa:aa:aa:aa:aa:aa:aa:aa:aa:aa\"\n user_id = \"fake\"\n \n@@ -31,15 +34,16 @@ def key_json(self):\n }\n \n \n-@attributes(\n- [\"tenant_id\", \"region_name\", \"clock\",\n- Attribute(\"keypairs\", default_factory=list)]\n-)\n+@attr.s\n class RegionalKeyPairCollection(object):\n \"\"\"\n A :obj:`ReionalKeyPairCollection` is a collection of\n :obj:`KeyPair` objects owned by a given tenant for a region.\n \"\"\"\n+ tenant_id = attr.ib()\n+ region_name = attr.ib()\n+ clock = attr.ib()\n+ keypairs = attr.ib(default=attr.Factory(list))\n \n def create_keypair(self, keypair):\n \"\"\"\n@@ -82,14 +86,16 @@ def remove_keypair(self, name):\n self.keypairs.remove(kp_to_remove)\n \n \n-@attributes([\"tenant_id\", \"clock\",\n- Attribute(\"regional_collections\", default_factory=dict)])\n+@attr.s\n class GlobalKeyPairCollections(object):\n \"\"\"\n A :obj:`GlobalKeyPairCollections` is a set of all the\n :obj:`RegionalKeyPairCollection` objects owned by a given tenant. In other\n words, all the objects that a single tenant owns globally.\n \"\"\"\n+ tenant_id = attr.ib()\n+ clock = attr.ib()\n+ regional_collections = attr.ib(default=attr.Factory(dict))\n \n def collection_for_region(self, region_name):\n \"\"\"\ndiff --git a/mimic/model/mailgun_objects.py b/mimic/model/mailgun_objects.py\n--- a/mimic/model/mailgun_objects.py\n+++ b/mimic/model/mailgun_objects.py\n@@ -4,18 +4,23 @@\n \n from __future__ import absolute_import, division, unicode_literals\n \n+import attr\n import time\n-from characteristic import attributes, Attribute\n \n \n-@attributes([\"message_id\", \"to\", \"msg_from\", \"subject\", \"body\",\n- Attribute(\"custom_headers\", default_factory=dict)])\n+@attr.s\n class Message(object):\n \"\"\"\n A :obj:`Message` is a representation of an email in Mailgun.\n It can produce JSON-serializable objects for various pieces of\n state that are required for API responses.\n \"\"\"\n+ message_id = attr.ib()\n+ to = attr.ib()\n+ msg_from = attr.ib()\n+ subject = attr.ib()\n+ body = attr.ib()\n+ custom_headers = attr.ib(default=attr.Factory(dict))\n \n static_defaults = {\n \"tags\": [],\n@@ -76,11 +81,12 @@ def generate_events(self):\n return template\n \n \n-@attributes([Attribute(\"message_store\", default_factory=list)])\n+@attr.s\n class MessageStore(object):\n \"\"\"\n A collection of message objects.\n \"\"\"\n+ message_store = attr.ib(default=attr.Factory(list))\n \n def add_to_message_store(self, **attributes):\n \"\"\"\ndiff --git a/mimic/model/nova_image_collection.py b/mimic/model/nova_image_collection.py\n--- a/mimic/model/nova_image_collection.py\n+++ b/mimic/model/nova_image_collection.py\n@@ -4,7 +4,7 @@\n \n from __future__ import absolute_import, division, unicode_literals\n \n-from characteristic import attributes, Attribute\n+import attr\n from json import dumps\n from mimic.model.rackspace_images import OnMetalImage\n \n@@ -12,12 +12,16 @@\n from mimic.canned_responses.mimic_presets import get_presets\n \n \n-@attributes(\n- [\"tenant_id\", \"region_name\", \"clock\", \"image_store\"])\n+@attr.s\n class RegionalNovaImageCollection(object):\n \"\"\"\n A collection of nova images, in a given region, for a given tenant.\n \"\"\"\n+ tenant_id = attr.ib()\n+ region_name = attr.ib()\n+ clock = attr.ib()\n+ image_store = attr.ib()\n+\n def list_images(self, include_details, absolutize_url):\n \"\"\"\n Return a list of images.\n@@ -49,14 +53,16 @@ def get_image(self, http_get_request, image_id, absolutize_url):\n return dumps({\"image\": image.detailed_json(absolutize_url)})\n \n \n-@attributes([\"tenant_id\", \"clock\",\n- Attribute(\"regional_collections\", default_factory=dict)])\n+@attr.s\n class GlobalNovaImageCollection(object):\n \"\"\"\n A :obj:`GlobalNovaImageCollection` is a set of all the\n :obj:`RegionalNovaImageCollection` objects owned by a given tenant. In other\n words, all the image objects that a single tenant owns globally.\n \"\"\"\n+ tenant_id = attr.ib()\n+ clock = attr.ib()\n+ regional_collections = attr.ib(default=attr.Factory(dict))\n \n def collection_for_region(self, region_name, image_store):\n \"\"\"\ndiff --git a/mimic/model/nova_objects.py b/mimic/model/nova_objects.py\n--- a/mimic/model/nova_objects.py\n+++ b/mimic/model/nova_objects.py\n@@ -6,7 +6,7 @@\n \n import re\n import uuid\n-from characteristic import attributes, Attribute\n+import attr\n from random import randrange\n from json import loads, dumps\n from six.moves.urllib.parse import urlencode\n@@ -28,18 +28,20 @@\n from mimic.model.rackspace_images import RackspaceSavedImage\n \n \n-@attributes(['nova_message'])\n+@attr.s\n class LimitError(Exception):\n \"\"\"\n Error to be raised when a limit has been exceeded.\n \"\"\"\n+ nova_message = attr.ib()\n \n \n-@attributes(['nova_message'])\n+@attr.s\n class BadRequestError(Exception):\n \"\"\"\n Error to be raised when bad input has been received to Nova.\n \"\"\"\n+ nova_message = attr.ib()\n \n \n def _nova_error_message(msg_type, message, status_code, request):\n@@ -115,18 +117,29 @@ def conflicting(message, request):\n return _nova_error_message(\"conflictingRequest\", message, CONFLICT, request)\n \n \n-@attributes([\"collection\", \"server_id\", \"server_name\", \"metadata\",\n- \"creation_time\", \"update_time\", \"public_ips\", \"private_ips\",\n- \"status\", \"flavor_ref\", \"image_ref\", \"disk_config\", \"key_name\",\n- \"admin_password\", \"creation_request_json\",\n- Attribute('max_metadata_items', instance_of=int,\n- default_value=40)])\n+@attr.s\n class Server(object):\n \"\"\"\n A :obj:`Server` is a representation of all the state associated with a nova\n server. It can produce JSON-serializable objects for various pieces of\n state that are required for API responses.\n \"\"\"\n+ admin_password = attr.ib()\n+ collection = attr.ib()\n+ creation_request_json = attr.ib()\n+ creation_time = attr.ib()\n+ disk_config = attr.ib()\n+ flavor_ref = attr.ib()\n+ image_ref = attr.ib()\n+ key_name = attr.ib()\n+ metadata = attr.ib()\n+ private_ips = attr.ib()\n+ public_ips = attr.ib()\n+ server_id = attr.ib()\n+ server_name = attr.ib()\n+ status = attr.ib()\n+ update_time = attr.ib()\n+ max_metadata_items = attr.ib(validator=attr.validators.instance_of(int), default=40)\n \n static_defaults = {\n \"OS-EXT-STS:power_state\": 1,\n@@ -339,11 +352,12 @@ def from_creation_request_json(cls, collection, creation_json,\n return self\n \n \n-@attributes([\"address\"])\n+@attr.s\n class IPv4Address(object):\n \"\"\"\n An IPv4 address for a server.\n \"\"\"\n+ address = attr.ib()\n \n def json(self):\n \"\"\"\n@@ -352,11 +366,12 @@ def json(self):\n return {\"addr\": self.address, \"version\": 4}\n \n \n-@attributes([\"address\"])\n+@attr.s\n class IPv6Address(object):\n \"\"\"\n An IPv6 address for a server.\n \"\"\"\n+ address = attr.ib()\n \n def json(self):\n \"\"\"\n@@ -650,17 +665,17 @@ def metadata_to_creation_behavior(metadata):\n return None\n \n \n-@attributes(\n- [\"tenant_id\", \"region_name\", \"clock\",\n- Attribute(\"servers\", default_factory=list),\n- Attribute(\n- \"behavior_registry_collection\",\n- default_factory=lambda: BehaviorRegistryCollection())]\n-)\n+@attr.s\n class RegionalServerCollection(object):\n \"\"\"\n A collection of servers, in a given region, for a given tenant.\n \"\"\"\n+ tenant_id = attr.ib()\n+ region_name = attr.ib()\n+ clock = attr.ib()\n+ servers = attr.ib(default=attr.Factory(list))\n+ behavior_registry_collection = attr.ib(default=attr.Factory(\n+ lambda: BehaviorRegistryCollection()))\n \n def server_by_id(self, server_id):\n \"\"\"\n@@ -973,8 +988,7 @@ def request_action(self, http_action_request, server_id, absolutize_url,\n return dumps(bad_request(\"There is no such action currently supported\", http_action_request))\n \n \n-@attributes([\"tenant_id\", \"clock\",\n- Attribute(\"regional_collections\", default_factory=dict)])\n+@attr.s\n class GlobalServerCollections(object):\n \"\"\"\n A :obj:`GlobalServerCollections` is a set of all the\n@@ -982,6 +996,9 @@ class GlobalServerCollections(object):\n words, all the objects that a single tenant owns globally in a Nova\n service.\n \"\"\"\n+ tenant_id = attr.ib()\n+ clock = attr.ib()\n+ regional_collections = attr.ib(default=attr.Factory(dict))\n \n def collection_for_region(self, region_name):\n \"\"\"\ndiff --git a/mimic/model/rackspace_image_store.py b/mimic/model/rackspace_image_store.py\n--- a/mimic/model/rackspace_image_store.py\n+++ b/mimic/model/rackspace_image_store.py\n@@ -2,7 +2,7 @@\n An image store representing Rackspace specific images\n \"\"\"\n from __future__ import absolute_import, division, unicode_literals\n-from characteristic import attributes, Attribute\n+import attr\n from six import iteritems\n from mimic.model.rackspace_images import (RackspaceWindowsImage,\n RackspaceCentOSPVImage, RackspaceCentOSPVHMImage,\n@@ -19,12 +19,14 @@\n from mimic.model.rackspace_images import create_rackspace_images\n \n \n-@attributes([Attribute(\"image_list\", default_factory=list)])\n+@attr.s\n class RackspaceImageStore(object):\n \"\"\"\n A store for images to share between nova_api and glance_api\n :var image_list: list of Rackspace images\n \"\"\"\n+ image_list = attr.ib(default=attr.Factory(list))\n+\n def create_image_store(self, tenant_id):\n \"\"\"\n Generates the data for each image in each image class\ndiff --git a/mimic/model/rackspace_images.py b/mimic/model/rackspace_images.py\n--- a/mimic/model/rackspace_images.py\n+++ b/mimic/model/rackspace_images.py\n@@ -4,7 +4,7 @@\n \n from __future__ import absolute_import, division, unicode_literals\n \n-from characteristic import attributes, Attribute\n+import attr\n import uuid\n import random\n \n@@ -23,18 +23,7 @@ def random_image_size():\n return random.randint(250000, 80000000000)\n \n \n-@attributes([\n- # Prior to refactor; no default value\n- 'image_id', 'tenant_id', 'name', 'minDisk', 'minRam', 'image_size',\n- # Post-refactor; all have default values, but these should be slowly\n- # removed.\n- Attribute('flavor_classes', default_value=\"*\"),\n- Attribute('image_type', default_value=\"base\"),\n- Attribute('os_type', default_value=\"linux\"),\n- Attribute('os_distro', default_value=\"\"),\n- Attribute('vm_mode', default_value=\"hvm\"),\n- Attribute('auto_disk_config', default_value=\"disabled\"),\n-])\n+@attr.s\n class Image(object):\n \"\"\"\n A Image object\n@@ -54,6 +43,22 @@ class Image(object):\n with this image; possible values: \"True\", \"disabled\", and \"False\".\n (TODO: what does \"False\" mean?)\n \"\"\"\n+ # Prior to refactor; no default value\n+ image_id = attr.ib()\n+ tenant_id = attr.ib()\n+ name = attr.ib()\n+ minDisk = attr.ib()\n+ minRam = attr.ib()\n+ image_size = attr.ib()\n+\n+ # Post-refactor; all have default values, but these should be slowly\n+ # removed.\n+ flavor_classes = attr.ib(default=\"*\")\n+ image_type = attr.ib(default=\"base\")\n+ os_type = attr.ib(default=\"linux\")\n+ os_distro = attr.ib(default=\"\")\n+ vm_mode = attr.ib(default=\"hvm\")\n+ auto_disk_config = attr.ib(default=\"disabled\")\n \n is_default = False\n \n@@ -138,12 +143,25 @@ def detailed_json(self, absolutize_url):\n return template\n \n \n-@attributes(['image_id', 'tenant_id', 'name', 'minDisk', 'minRam', 'image_size', 'server_id', 'links',\n- 'flavor_classes', 'os_type', 'os_distro', 'vm_mode', 'disk_config'])\n+@attr.s\n class RackspaceSavedImage(object):\n \"\"\"\n A Rackspace saved image object representation\n \"\"\"\n+ image_id = attr.ib()\n+ tenant_id = attr.ib()\n+ name = attr.ib()\n+ minDisk = attr.ib()\n+ minRam = attr.ib()\n+ image_size = attr.ib()\n+ server_id = attr.ib()\n+ links = attr.ib()\n+ flavor_classes = attr.ib()\n+ os_type = attr.ib()\n+ os_distro = attr.ib()\n+ vm_mode = attr.ib()\n+ disk_config = attr.ib()\n+\n is_default = False\n \n def metadata_json(self):\n@@ -654,11 +672,17 @@ def metadata_json(self):\n }\n \n \n-@attributes(['image_id', 'tenant_id', 'name', 'minDisk', 'minRam', 'image_size'])\n+@attr.s\n class OnMetalImage(object):\n \"\"\"\n A Image object\n \"\"\"\n+ image_id = attr.ib()\n+ tenant_id = attr.ib()\n+ name = attr.ib()\n+ minDisk = attr.ib()\n+ minRam = attr.ib()\n+ image_size = attr.ib()\n \n is_default = False\n \ndiff --git a/mimic/model/valkyrie_objects.py b/mimic/model/valkyrie_objects.py\n--- a/mimic/model/valkyrie_objects.py\n+++ b/mimic/model/valkyrie_objects.py\n@@ -4,7 +4,7 @@\n \n from __future__ import absolute_import, division, unicode_literals\n \n-from characteristic import attributes, Attribute\n+import attr\n from json import dumps\n \n from mimic.util.helper import random_hex_generator\n@@ -67,7 +67,7 @@ def json(self):\n }\n \n \n-@attributes([Attribute(\"valkyrie_store\", default_factory=list)])\n+@attr.s\n class ValkyrieStore(object):\n \"\"\"\n \n@@ -85,6 +85,7 @@ class ValkyrieStore(object):\n http://localhost:8900/valkyrie/v2/account/123456/permissions/contacts/devices/by_contact/56/effective\n \n \"\"\"\n+ valkyrie_store = attr.ib(default=attr.Factory(list))\n \n permissions = []\n # Arguments are: account, contact, (direct) permission, item, item_type (1=account or 2=device)\ndiff --git a/mimic/rest/cloudfeeds.py b/mimic/rest/cloudfeeds.py\n--- a/mimic/rest/cloudfeeds.py\n+++ b/mimic/rest/cloudfeeds.py\n@@ -14,7 +14,7 @@\n from mimic.rest.mimicapp import MimicApp\n \n \n-from characteristic import attributes\n+import attr\n \n \n @implementer(IAPIMock, IPlugin)\n@@ -57,12 +57,14 @@ def resource_for_region(self, region, uri_prefix, session_store):\n \n \n @implementer(IAPIMock, IPlugin)\n-@attributes([\"cf_api\"])\n+@attr.s\n class CloudFeedsControlApi(object):\n \"\"\"\n This class registers the load balancer controller API in the service\n catalog.\n \"\"\"\n+ cf_api = attr.ib()\n+\n def catalog_entries(self, tenant_id):\n \"\"\"\n Cloud feeds controller endpoints.\n@@ -89,11 +91,15 @@ def resource_for_region(self, region, uri_prefix, session_store):\n return cfc_region.app.resource()\n \n \n-@attributes([\"api_mock\", \"uri_prefix\", \"session_store\", \"region\"])\n+@attr.s\n class CloudFeedsControlRegion(object):\n \"\"\"\n Klein routes for cloud feed's control API within a particular region.\n \"\"\"\n+ api_mock = attr.ib()\n+ uri_prefix = attr.ib()\n+ session_store = attr.ib()\n+ region = attr.ib()\n \n app = MimicApp()\n \ndiff --git a/mimic/rest/loadbalancer_api.py b/mimic/rest/loadbalancer_api.py\n--- a/mimic/rest/loadbalancer_api.py\n+++ b/mimic/rest/loadbalancer_api.py\n@@ -23,7 +23,7 @@\n \n from mimic.util.helper import invalid_resource, json_dump\n from mimic.util.helper import json_from_request\n-from characteristic import attributes\n+import attr\n \n \n @implementer(IAPIMock, IPlugin)\n@@ -78,12 +78,14 @@ def _get_session(self, session_store, tenant_id):\n \n \n @implementer(IAPIMock, IPlugin)\n-@attributes([\"lb_api\"])\n+@attr.s\n class LoadBalancerControlApi(object):\n \"\"\"\n This class registers the load balancer controller API in the service\n catalog.\n \"\"\"\n+ lb_api = attr.ib()\n+\n def catalog_entries(self, tenant_id):\n \"\"\"\n Cloud load balancer controller endpoints.\n@@ -108,11 +110,15 @@ def resource_for_region(self, region, uri_prefix, session_store):\n return lbc_region.app.resource()\n \n \n-@attributes([\"api_mock\", \"uri_prefix\", \"session_store\", \"region\"])\n+@attr.s\n class LoadBalancerControlRegion(object):\n \"\"\"\n Klein routes for load balancer's control API within a particular region.\n \"\"\"\n+ api_mock = attr.ib()\n+ uri_prefix = attr.ib()\n+ session_store = attr.ib()\n+ region = attr.ib()\n \n app = MimicApp()\n \n@@ -151,18 +157,9 @@ def set_attributes(self, request, tenant_id, clb_id):\n \n try:\n regional_lbs.set_attributes(clb_id, content)\n- except BadKeysError as bke:\n- request.setResponseCode(400)\n- return json.dumps({\n- \"message\": str(bke),\n- \"code\": 400,\n- })\n- except BadValueError as bve:\n- request.setResponseCode(400)\n- return json.dumps({\n- \"message\": str(bve),\n- \"code\": 400,\n- })\n+ except (BadKeysError, BadValueError) as err:\n+ request.setResponseCode(err.code)\n+ return json.dumps(err.to_json())\n else:\n request.setResponseCode(204)\n return b''\ndiff --git a/mimic/rest/maas_api.py b/mimic/rest/maas_api.py\n--- a/mimic/rest/maas_api.py\n+++ b/mimic/rest/maas_api.py\n@@ -14,7 +14,6 @@\n import attr\n from six import text_type\n \n-from characteristic import attributes\n from zope.interface import implementer\n \n from twisted.plugin import IPlugin\n@@ -1662,11 +1661,13 @@ def latest_alarm_states(self, request, tenant_id):\n \n \n @implementer(IAPIMock, IPlugin)\n-@attributes([\"maas_api\"])\n+@attr.s\n class MaasControlApi(object):\n \"\"\"\n This class registers the MaaS controller API in the service catalog.\n \"\"\"\n+ maas_api = attr.ib()\n+\n def catalog_entries(self, tenant_id):\n \"\"\"\n List catalog entries for the MaaS API.\n@@ -1693,11 +1694,14 @@ def resource_for_region(self, region, uri_prefix, session_store):\n return maas_controller.app.resource()\n \n \n-@attributes([\"api_mock\", \"session_store\", \"region\"])\n+@attr.s\n class MaasController(object):\n \"\"\"\n Klein routes for MaaS control API.\n \"\"\"\n+ api_mock = attr.ib()\n+ session_store = attr.ib()\n+ region = attr.ib()\n \n def _entity_cache_for_tenant(self, tenant_id):\n \"\"\"\ndiff --git a/mimic/rest/nova_api.py b/mimic/rest/nova_api.py\n--- a/mimic/rest/nova_api.py\n+++ b/mimic/rest/nova_api.py\n@@ -8,7 +8,7 @@\n from uuid import uuid4\n import json\n \n-from characteristic import attributes\n+import attr\n from six import text_type\n \n from zope.interface import implementer\n@@ -87,11 +87,12 @@ def _get_session(self, session_store, tenant_id):\n \n \n @implementer(IAPIMock, IPlugin)\n-@attributes([\"nova_api\"])\n+@attr.s\n class NovaControlApi(object):\n \"\"\"\n Rest endpoints for the Nova Control Api.\n \"\"\"\n+ nova_api = attr.ib()\n \n def catalog_entries(self, tenant_id):\n \"\"\"\n@@ -128,11 +129,16 @@ def resource_for_region(self, region, uri_prefix, session_store):\n \"\"\"\n \n \n-@attributes([\"api_mock\", \"uri_prefix\", \"session_store\", \"region\"])\n+@attr.s\n class NovaControlApiRegion(object):\n \"\"\"\n Klein resources for the Nova Control plane API\n \"\"\"\n+ api_mock = attr.ib()\n+ uri_prefix = attr.ib()\n+ session_store = attr.ib()\n+ region = attr.ib()\n+\n app = MimicApp()\n \n @app.route('/v2/<string:tenant_id>/behaviors', branch=True)\ndiff --git a/mimic/rest/rackconnect_v3_api.py b/mimic/rest/rackconnect_v3_api.py\n--- a/mimic/rest/rackconnect_v3_api.py\n+++ b/mimic/rest/rackconnect_v3_api.py\n@@ -11,7 +11,7 @@\n import json\n from uuid import uuid4, UUID\n \n-from characteristic import attributes, Attribute\n+import attr\n from six import text_type\n \n from twisted.plugin import IPlugin\n@@ -72,17 +72,7 @@ def resource_for_region(self, region, uri_prefix, session_store):\n default_pools=self.default_pools).app.resource()\n \n \n-@attributes(\n- [Attribute(\"id\", default_factory=lambda: text_type(uuid4()),\n- instance_of=text_type),\n- Attribute(\"name\", default_value=u\"default\", instance_of=text_type),\n- Attribute(\"port\", default_value=80, instance_of=int),\n- Attribute(\"status\", default_value=u\"ACTIVE\", instance_of=text_type),\n- Attribute(\"status_detail\", default_value=None),\n- Attribute(\"virtual_ip\", default_factory=random_ipv4,\n- instance_of=text_type),\n- Attribute('nodes', default_factory=list, instance_of=list)],\n- apply_with_cmp=False)\n+@attr.s(hash=False)\n class LoadBalancerPool(object):\n \"\"\"\n Represents a RackConnecvt v3 Load Balancer Pool.\n@@ -102,17 +92,25 @@ class LoadBalancerPool(object):\n :param text_type virtual_ip: The IP of the load balancer pool\n :param list nodes: :class:`LoadBalancerPoolNode`s\n \"\"\"\n+ id = attr.ib(default=attr.Factory(lambda: text_type(uuid4())),\n+ validator=attr.validators.instance_of(text_type))\n+ name = attr.ib(default=\"default\", validator=attr.validators.instance_of(text_type))\n+ port = attr.ib(default=80, validator=attr.validators.instance_of(int))\n+ status = attr.ib(default=\"ACTIVE\", validator=attr.validators.instance_of(text_type))\n+ status_detail = attr.ib(default=None)\n+ virtual_ip = attr.ib(default=attr.Factory(random_ipv4),\n+ validator=attr.validators.instance_of(text_type))\n+ nodes = attr.ib(default=attr.Factory(list), validator=attr.validators.instance_of(list))\n+\n def as_json(self):\n \"\"\"\n Create a JSON-serializable representation of the contents of this\n object, which can be used in a REST response for a request for the\n details of this particular object\n \"\"\"\n- # no dictionary comprehensions in py2.6\n- response = dict([\n- (attr.name, getattr(self, attr.name))\n- for attr in LoadBalancerPool.characteristic_attributes\n- if attr.name != \"nodes\"])\n+ response = {aa.name: getattr(self, aa.name)\n+ for aa in attr.fields(LoadBalancerPool)\n+ if aa.name != \"nodes\"}\n response['node_counts'] = {\n \"cloud_servers\": len(self.nodes),\n \"external\": 0,\n@@ -141,13 +139,7 @@ def node_by_id(self, node_id):\n return next((node for node in self.nodes if node.id == node_id), None)\n \n \n-@attributes([\"created\", \"load_balancer_pool\", \"cloud_server\",\n- Attribute(\"id\", default_factory=lambda: text_type(uuid4()),\n- instance_of=text_type),\n- Attribute(\"updated\", default_value=None),\n- Attribute(\"status\", default_value=text_type(\"ACTIVE\"),\n- instance_of=text_type),\n- Attribute(\"status_detail\", default_value=None)])\n+@attr.s\n class LoadBalancerPoolNode(object):\n \"\"\"\n Represents a Load Balancer Pool Node.\n@@ -173,6 +165,15 @@ class LoadBalancerPoolNode(object):\n in theory can also be some external (not a cloud server) resource,\n but that is not supported yet on the API.\n \"\"\"\n+ created = attr.ib()\n+ load_balancer_pool = attr.ib()\n+ cloud_server = attr.ib()\n+ id = attr.ib(default=attr.Factory(lambda: text_type(uuid4())),\n+ validator=attr.validators.instance_of(text_type))\n+ updated = attr.ib(default=None)\n+ status = attr.ib(default=\"ACTIVE\", validator=attr.validators.instance_of(text_type))\n+ status_detail = attr.ib(default=None)\n+\n def short_json(self):\n \"\"\"\n Create a short JSON-serializable representation of the contents of\n@@ -184,11 +185,9 @@ def short_json(self):\n GET /v3/{tenant_id}/load_balancer_pools/{load_balancer_pool_id}/nodes\n (list load balancer pool nodes)\n \"\"\"\n- # no dictionary comprehensions in py2.6\n- response = dict([\n- (attr.name, getattr(self, attr.name))\n- for attr in LoadBalancerPoolNode.characteristic_attributes\n- if attr.name not in ('load_balancer_pool', 'cloud_server')])\n+ response = {aa.name: getattr(self, aa.name)\n+ for aa in attr.fields(LoadBalancerPoolNode)\n+ if aa.name not in ('load_balancer_pool', 'cloud_server')}\n response['load_balancer_pool'] = {'id': self.load_balancer_pool.id}\n response['cloud_server'] = {'id': self.cloud_server}\n return response\n@@ -202,12 +201,17 @@ def update(self, now, status, status_detail=None):\n self.status_detail = status_detail\n \n \n-@attributes([\"iapi\", \"uri_prefix\", \"session_store\", \"region_name\",\n- \"default_pools\"])\n+@attr.s\n class RackConnectV3Region(object):\n \"\"\"\n A set of ``klein`` routes representing a RackConnect V3 endpoint.\n \"\"\"\n+ iapi = attr.ib()\n+ uri_prefix = attr.ib()\n+ session_store = attr.ib()\n+ region_name = attr.ib()\n+ default_pools = attr.ib()\n+\n app = MimicApp()\n \n @app.route(\"/v3/<string:tenant_id>/load_balancer_pools\", branch=True)\n@@ -232,12 +236,15 @@ def get_tenant_lb_pools(self, request, tenant_id):\n # exclude all the attributes from comparison so that equality has to be\n # determined by identity, since lbpools is mutable and we don't want to\n # compare clocks\n-@attributes([\"lbpools\", \"clock\"], apply_with_cmp=False)\n+@attr.s(hash=False)\n class LoadBalancerPoolsInRegion(object):\n \"\"\"\n A set of ``klein`` routes handling RackConnect V3 Load Balancer Pools\n collections.\n \"\"\"\n+ lbpools = attr.ib()\n+ clock = attr.ib()\n+\n app = MimicApp()\n \n def _pool_by_id(self, id):\n@@ -389,12 +396,14 @@ def delegate_to_one_pool_handler(self, request, id):\n return \"Load Balancer Pool {0} does not exist\".format(id)\n \n \n-@attributes([\"pool\"])\n+@attr.s\n class OneLoadBalancerPool(object):\n \"\"\"\n A set of ``klein`` routes handling the RackConnect V3 API for a single\n load balancer pool\n \"\"\"\n+ pool = attr.ib()\n+\n app = MimicApp()\n \n @app.route(\"/\", methods=[\"GET\"])\ndiff --git a/mimic/rest/swift_api.py b/mimic/rest/swift_api.py\n--- a/mimic/rest/swift_api.py\n+++ b/mimic/rest/swift_api.py\n@@ -9,7 +9,7 @@\n from uuid import uuid4, uuid5, NAMESPACE_URL\n from six import text_type\n \n-from characteristic import attributes, Attribute\n+import attr\n from json import dumps\n \n from mimic.imimic import IAPIMock\n@@ -79,12 +79,15 @@ def resource_for_region(self, region, uri_prefix, session_store):\n session_store=session_store).app.resource()\n \n \n-@attributes(\"api uri_prefix session_store\".split())\n+@attr.s\n class SwiftRegion(object):\n \"\"\"\n :obj:`SwiftRegion` is a set of klein routes and application representing a\n Swift endpoint.\n \"\"\"\n+ api = attr.ib()\n+ uri_prefix = attr.ib()\n+ session_store = attr.ib()\n \n app = MimicApp()\n \n@@ -99,12 +102,15 @@ def get_one_tenant_resource(self, request, tenant_id):\n SwiftTenantInRegion().app.resource()))\n \n \n-@attributes([\"name\", \"content_type\", \"data\"])\n+@attr.s\n class Object(object):\n \"\"\"\n A Python object (i.e. instance) representing a Swift object (i.e. bag of\n octets).\n \"\"\"\n+ name = attr.ib()\n+ content_type = attr.ib()\n+ data = attr.ib()\n \n def as_json(self):\n \"\"\"\n@@ -118,11 +124,13 @@ def as_json(self):\n }\n \n \n-@attributes([\"name\", Attribute(\"objects\", default_factory=dict)])\n+@attr.s\n class Container(object):\n \"\"\"\n A Swift container (collection of :obj:`Object`.)\n \"\"\"\n+ name = attr.ib()\n+ objects = attr.ib(default=attr.Factory(dict))\n \n \n class SwiftTenantInRegion(object):\ndiff --git a/mimic/session.py b/mimic/session.py\n--- a/mimic/session.py\n+++ b/mimic/session.py\n@@ -10,17 +10,21 @@\n from uuid import uuid4\n from datetime import datetime, timedelta\n \n-from characteristic import attributes, Attribute\n+import attr\n \n \n-@attributes(['username', 'token', 'tenant_id', 'expires',\n- Attribute('impersonator_session_map', default_factory=dict),\n- Attribute('_api_objects', default_factory=dict)])\n+@attr.s\n class Session(object):\n \"\"\"\n A mimic Session is a record of an authentication token for a particular\n username and tenant_id.\n \"\"\"\n+ username = attr.ib()\n+ token = attr.ib()\n+ tenant_id = attr.ib()\n+ expires = attr.ib()\n+ impersonator_session_map = attr.ib(default=attr.Factory(dict))\n+ _api_objects = attr.ib(default=attr.Factory(dict))\n \n @property\n def user_id(self):\n@@ -46,12 +50,13 @@ def data_for_api(self, api_mock, data_factory):\n return self._api_objects[api_mock]\n \n \n-@attributes([Attribute('session', instance_of=Session),\n- 'desired_tenant'])\n+@attr.s\n class NonMatchingTenantError(Exception):\n \"\"\"\n A session's tenant ID does not match the desired tenant ID.\n \"\"\"\n+ session = attr.ib(validator=attr.validators.instance_of(Session))\n+ desired_tenant = attr.ib()\n \n \n class SessionStore(object):\n", "test_patch": "diff --git a/mimic/test/test_rackconnect_v3.py b/mimic/test/test_rackconnect_v3.py\n--- a/mimic/test/test_rackconnect_v3.py\n+++ b/mimic/test/test_rackconnect_v3.py\n@@ -4,6 +4,7 @@\n \n from __future__ import absolute_import, division, unicode_literals\n \n+import attr\n import json\n from random import randint\n from uuid import uuid4\n@@ -201,14 +202,14 @@ def test_list_pools_default_one(self):\n pool_json = response_json[0]\n # has the right JSON\n self.assertTrue(all(\n- attr.name in pool_json\n- for attr in LoadBalancerPool.characteristic_attributes\n- if attr.name != \"nodes\"))\n+ aa.name in pool_json\n+ for aa in attr.fields(LoadBalancerPool)\n+ if aa.name != \"nodes\"))\n # Generated values\n self.assertTrue(all(\n- pool_json.get(attr.name)\n- for attr in LoadBalancerPool.characteristic_attributes\n- if attr.name not in (\"nodes\", \"status_detail\")))\n+ pool_json.get(aa.name)\n+ for aa in attr.fields(LoadBalancerPool)\n+ if aa.name not in (\"nodes\", \"status_detail\")))\n \n self.assertEqual(\n {\n", "problem_statement": "", "hints_text": "", "created_at": "2016-02-17T07:33:08Z"}
PythonDataset/test/mixpanel-python-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "mixpanel/mixpanel-python", "pull_number": 64, "instance_id": "mixpanel__mixpanel-python-64", "issue_numbers": ["63"], "base_commit": "40c98e0b285898384cc4aa6cc803d8d0f46f6218", "patch": "diff --git a/mixpanel/__init__.py b/mixpanel/__init__.py\n--- a/mixpanel/__init__.py\n+++ b/mixpanel/__init__.py\n@@ -345,6 +345,7 @@ def send(self, endpoint, json_message, api_key=None):\n :param endpoint: the Mixpanel API endpoint appropriate for the message\n :type endpoint: \"events\" | \"people\" | \"imports\"\n :param str json_message: a JSON message formatted for the endpoint\n+ :param str api_key: your Mixpanel project's API key\n :raises MixpanelException: if the endpoint doesn't exist, the server is\n unreachable, or the message cannot be processed\n \"\"\"\n@@ -412,6 +413,7 @@ def __init__(self, max_size=50, events_url=None, people_url=None, import_url=Non\n 'imports': [],\n }\n self._max_size = min(50, max_size)\n+ self._api_key = None\n \n def send(self, endpoint, json_message, api_key=None):\n \"\"\"Record an event or profile update.\n@@ -424,16 +426,22 @@ def send(self, endpoint, json_message, api_key=None):\n :param endpoint: the Mixpanel API endpoint appropriate for the message\n :type endpoint: \"events\" | \"people\" | \"imports\"\n :param str json_message: a JSON message formatted for the endpoint\n+ :param str api_key: your Mixpanel project's API key\n :raises MixpanelException: if the endpoint doesn't exist, the server is\n unreachable, or any buffered message cannot be processed\n+\n+ .. versionadded:: 4.3.2\n+ The *api_key* parameter.\n \"\"\"\n if endpoint not in self._buffers:\n raise MixpanelException('No such endpoint \"{0}\". Valid endpoints are one of {1}'.format(endpoint, self._buffers.keys()))\n \n buf = self._buffers[endpoint]\n buf.append(json_message)\n+ if api_key is not None:\n+ self._api_key = api_key\n if len(buf) >= self._max_size:\n- self._flush_endpoint(endpoint, api_key)\n+ self._flush_endpoint(endpoint)\n \n def flush(self):\n \"\"\"Immediately send all buffered messages to Mixpanel.\n@@ -444,13 +452,13 @@ def flush(self):\n for endpoint in self._buffers.keys():\n self._flush_endpoint(endpoint)\n \n- def _flush_endpoint(self, endpoint, api_key=None):\n+ def _flush_endpoint(self, endpoint):\n buf = self._buffers[endpoint]\n while buf:\n batch = buf[:self._max_size]\n batch_json = '[{0}]'.format(','.join(batch))\n try:\n- self._consumer.send(endpoint, batch_json, api_key)\n+ self._consumer.send(endpoint, batch_json, self._api_key)\n except MixpanelException as orig_e:\n mp_e = MixpanelException(orig_e)\n mp_e.message = batch_json\n", "test_patch": "diff --git a/test_mixpanel.py b/test_mixpanel.py\n--- a/test_mixpanel.py\n+++ b/test_mixpanel.py\n@@ -353,40 +353,32 @@ class TestBufferedConsumer:\n def setup_class(cls):\n cls.MAX_LENGTH = 10\n cls.consumer = mixpanel.BufferedConsumer(cls.MAX_LENGTH)\n- cls.mock = Mock()\n- cls.mock.read.return_value = six.b('{\"status\":1, \"error\": null}')\n+ cls.consumer._consumer = LogConsumer()\n+ cls.log = cls.consumer._consumer.log\n \n- def test_buffer_hold_and_flush(self):\n- with patch('six.moves.urllib.request.urlopen', return_value=self.mock) as urlopen:\n- self.consumer.send('events', '\"Event\"')\n- assert not self.mock.called\n- self.consumer.flush()\n+ def setup_method(self):\n+ del self.log[:]\n \n- assert urlopen.call_count == 1\n-\n- (call_args, kwargs) = urlopen.call_args\n- (request,) = call_args\n- timeout = kwargs.get('timeout', None)\n-\n- assert request.get_full_url() == 'https://api.mixpanel.com/track'\n- assert qs(request.data) == qs('ip=0&data=WyJFdmVudCJd&verbose=1')\n- assert timeout is None\n+ def test_buffer_hold_and_flush(self):\n+ self.consumer.send('events', '\"Event\"')\n+ assert len(self.log) == 0\n+ self.consumer.flush()\n+ assert self.log == [('events', ['Event'])]\n \n def test_buffer_fills_up(self):\n- with patch('six.moves.urllib.request.urlopen', return_value=self.mock) as urlopen:\n- for i in range(self.MAX_LENGTH - 1):\n- self.consumer.send('events', '\"Event\"')\n- assert not self.mock.called\n-\n- self.consumer.send('events', '\"Last Event\"')\n+ for i in range(self.MAX_LENGTH - 1):\n+ self.consumer.send('events', '\"Event\"')\n+ assert len(self.log) == 0\n \n- assert urlopen.call_count == 1\n- ((request,), _) = urlopen.call_args\n- assert request.get_full_url() == 'https://api.mixpanel.com/track'\n- assert qs(request.data) == \\\n- qs('ip=0&data=WyJFdmVudCIsIkV2ZW50IiwiRXZlbnQiLCJFdmVudCIsIkV2ZW50IiwiRXZlbnQiLCJFdmVudCIsIkV2ZW50IiwiRXZlbnQiLCJMYXN0IEV2ZW50Il0%3D&verbose=1')\n+ self.consumer.send('events', '\"Last Event\"')\n+ assert len(self.log) == 1\n+ assert self.log == [('events', [\n+ 'Event', 'Event', 'Event', 'Event', 'Event',\n+ 'Event', 'Event', 'Event', 'Event', 'Last Event',\n+ ])]\n \n- def test_unknown_endpoint(self):\n+ def test_unknown_endpoint_raises_on_send(self):\n+ # Ensure the exception isn't hidden until a flush.\n with pytest.raises(mixpanel.MixpanelException):\n self.consumer.send('unknown', '1')\n \n@@ -394,17 +386,19 @@ def test_useful_reraise_in_flush_endpoint(self):\n error_mock = Mock()\n error_mock.read.return_value = six.b('{\"status\": 0, \"error\": \"arbitrary error\"}')\n broken_json = '{broken JSON'\n+ consumer = mixpanel.BufferedConsumer(2)\n with patch('six.moves.urllib.request.urlopen', return_value=error_mock):\n- self.consumer.send('events', broken_json)\n+ consumer.send('events', broken_json)\n with pytest.raises(mixpanel.MixpanelException) as excinfo:\n- self.consumer.flush()\n+ consumer.flush()\n assert excinfo.value.message == '[%s]' % broken_json\n assert excinfo.value.endpoint == 'events'\n \n- def test_import_data_receives_api_key(self):\n- # Ensure BufferedConsumer.send accepts the API_KEY parameter needed for\n- # import_data; see #62.\n+ def test_send_remembers_api_key(self):\n self.consumer.send('imports', '\"Event\"', api_key='MY_API_KEY')\n+ assert len(self.log) == 0\n+ self.consumer.flush()\n+ assert self.log == [('imports', ['Event'], 'MY_API_KEY')]\n \n \n class TestFunctional:\n", "problem_statement": "flush function for Buffered Consumer not working\nHi,\nin class BufferedConsumer the flush function in line 338 should change to \ndef flush (self,api_key=None) \n\nand then in line 444-445 should change to:\n for endpoint in self._buffers.keys():\n self._flush_endpoint(endpoint,api_key=api_key)\n\n", "hints_text": "+1\n\nI have the same issue. The exception is: \"Mixpanel error: token, missing or empty\" because of this bug.\n\n+1 I also just ran into this. Is it worth submitting a PR for this? I see 3 unmerged PRs that are a few years old.", "created_at": "2016-12-22T00:07:05Z"}
PythonDataset/test/monet-task-instances.jsonl.all ADDED
The diff for this file is too large to render. See raw diff
 
PythonDataset/test/msmtools-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "markovmodel/msmtools", "pull_number": 117, "instance_id": "markovmodel__msmtools-117", "issue_numbers": "", "base_commit": "e6ea742c9a17d57071d1651647915caf724d439f", "patch": "diff --git a/msmtools/estimation/sparse/effective_counts.py b/msmtools/estimation/sparse/effective_counts.py\n--- a/msmtools/estimation/sparse/effective_counts.py\n+++ b/msmtools/estimation/sparse/effective_counts.py\n@@ -199,12 +199,7 @@ def statistical_inefficiencies(dtrajs, lag, C=None, truncate_acf=True, mact=2.0,\n # compute inefficiencies\n I, J = C.nonzero()\n if n_jobs > 1:\n- try:\n- from multiprocess.pool import Pool, MapResult\n- except ImportError:\n- raise RuntimeError('using multiple jobs requires the multiprocess library. '\n- 'Install it with conda or pip')\n-\n+ from multiprocessing.pool import Pool, MapResult\n from contextlib import closing\n import tempfile\n \n", "test_patch": "diff --git a/msmtools/estimation/tests/test_effective_count_matrix.py b/msmtools/estimation/tests/test_effective_count_matrix.py\n--- a/msmtools/estimation/tests/test_effective_count_matrix.py\n+++ b/msmtools/estimation/tests/test_effective_count_matrix.py\n@@ -27,13 +27,6 @@\n \n \"\"\"Unit tests for the transition_matrix module\"\"\"\n \n-have_multiprocess_lib = True\n-try:\n- import multiprocess\n- del multiprocess\n-except ImportError:\n- have_multiprocess_lib = False\n-\n \n class TestEffectiveCountMatrix(unittest.TestCase):\n \n@@ -70,9 +63,7 @@ def test_multitraj(self):\n assert np.array_equal(C.nonzero(), Ceff.nonzero())\n assert np.all(Ceff.toarray() <= C.toarray())\n \n- @unittest.skipIf(not have_multiprocess_lib, 'multiprocess lib missing')\n def test_multitraj_njobs(self):\n- import _multiprocess\n dtrajs = [[1, 0, 1, 0, 1, 1, 0, 0, 0, 1], [2], [0, 1, 0, 1]]\n # lag 1\n C = count_matrix(dtrajs, 1)\n@@ -94,8 +85,8 @@ def test_multitraj_njobs(self):\n assert np.array_equal(Ceff2.shape, C.shape)\n assert np.array_equal(C.nonzero(), Ceff2.nonzero())\n assert np.all(Ceff2.toarray() <= C.toarray())\n-\n- @unittest.skipIf(os.getenv('CI', False), 'need physical processors >=2, dont have on CI')\n+ \n+ @unittest.skipIf(os.getenv('CI', False), 'need physical cores')\n def test_njobs_speedup(self):\n artificial_dtraj = [np.random.randint(0, 100, size=10000) for _ in range(10)]\n import time\n", "problem_statement": "", "hints_text": "", "created_at": "2018-11-09T13:28:34Z"}
PythonDataset/test/py-evm-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "ethereum/py-evm", "pull_number": 527, "instance_id": "ethereum__py-evm-527", "issue_numbers": "", "base_commit": "3f42b6dcf794d7200118e85e8251e4c17f2bca1c", "patch": "diff --git a/evm/db/backends/base.py b/evm/db/backends/base.py\n--- a/evm/db/backends/base.py\n+++ b/evm/db/backends/base.py\n@@ -8,6 +8,10 @@ class BaseDB(metaclass=ABCMeta):\n \n @abstractmethod\n def get(self, key):\n+ \"\"\"Return the value for the given key.\n+\n+ Raises KeyError if key doesn't exist.\n+ \"\"\"\n raise NotImplementedError(\n \"The `get` method must be implemented by subclasses of BaseDB\"\n )\n@@ -20,6 +24,7 @@ def set(self, key, value):\n \n @abstractmethod\n def exists(self, key):\n+ \"\"\"Return True if the key exists or False if it doesn't.\"\"\"\n raise NotImplementedError(\n \"The `exists` method must be implemented by subclasses of BaseDB\"\n )\ndiff --git a/evm/db/backends/level.py b/evm/db/backends/level.py\n--- a/evm/db/backends/level.py\n+++ b/evm/db/backends/level.py\n@@ -10,23 +10,24 @@ def __init__(self, db_path=None):\n if not db_path:\n raise TypeError(\"Please specifiy a valid path for your database.\")\n try:\n- import leveldb\n+ import plyvel\n except ImportError:\n- raise ImportError(\"LevelDB requires the leveldb \\\n+ raise ImportError(\"LevelDB requires the plyvel \\\n library which is not available for import.\")\n self.db_path = db_path\n- self.db = leveldb.LevelDB(db_path, create_if_missing=True, error_if_exists=False)\n+ self.db = plyvel.DB(db_path, create_if_missing=True, error_if_exists=False)\n \n def get(self, key):\n- # 'Get' Returns a bytearray which needs to be converted to straight bytes\n- return bytes(self.db.Get(key))\n+ v = self.db.get(key)\n+ if v is None:\n+ raise KeyError(key)\n+ return v\n \n def set(self, key, value):\n- self.db.Put(key, value)\n+ self.db.put(key, value)\n \n- # Returns False instead of KeyError if key doesn't exist\n def exists(self, key):\n- return bool(self.db.Get(key, default=False))\n+ return self.db.get(key) is not None\n \n def delete(self, key):\n- self.db.Delete(key)\n+ self.db.delete(key)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,10 +34,10 @@\n \"coincurve>=7.0.0,<8.0.0\",\n ],\n 'leveldb': [\n- \"leveldb>=0.194,<1.0.0\",\n+ \"plyvel==1.0.4\",\n ],\n 'trinity': [\n- \"leveldb>=0.194,<1.0.0\",\n+ \"plyvel==1.0.4\",\n \"coincurve>=7.0.0,<8.0.0\",\n \"web3>=4.0.0b11,<5.0.0\",\n ],\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2018-04-03T11:01:52Z"}
PythonDataset/test/py-trello-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "sarumont/py-trello", "pull_number": 52, "instance_id": "sarumont__py-trello-52", "issue_numbers": "", "base_commit": "81f93a2272a7855410c78b194316f64e757f6bf7", "patch": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,7 +4,7 @@\n \n setup(\n name = \"py-trello\",\n- version = \"0.1.5\",\n+ version = \"0.2.2\",\n \n description = 'Python wrapper around the Trello API',\n long_description = open('README.rst').read(),\n@@ -19,8 +19,12 @@\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n+ 'Programming Language :: Python 2',\n+ 'Programming Language :: Python 2.7',\n+ 'Programming Language :: Python 3',\n+ 'Programming Language :: Python 3.3',\n ],\n- install_requires = ['httplib2 >= 0.9', 'oauth2',],\n+ install_requires = [\"requests\", \"requests-oauthlib >= 0.4.1\",],\n packages = find_packages(),\n include_package_data = True,\n ) \ndiff --git a/trello/__init__.py b/trello/__init__.py\n--- a/trello/__init__.py\n+++ b/trello/__init__.py\n@@ -1,14 +1,7 @@\n-from httplib2 import Http\n-from urllib import urlencode\n from datetime import datetime\n-import exceptions\n import json\n-import oauth2 as oauth\n-import os\n-import random\n-import time\n-import urlparse\n-import urllib2\n+import requests\n+from requests_oauthlib import OAuth1\n \n \n class ResourceUnavailable(Exception):\n@@ -17,10 +10,10 @@ class ResourceUnavailable(Exception):\n def __init__(self, msg, http_response):\n Exception.__init__(self)\n self._msg = msg\n- self._status = http_response.status\n+ self._status = http_response.status_code\n \n def __str__(self):\n- return \"Resource unavailable: %s (HTTP status: %s)\" % (\n+ return \"%s (HTTP status: %s)\" % (\n self._msg, self._status)\n \n \n@@ -46,22 +39,18 @@ def __init__(self, api_key, api_secret=None, token=None, token_secret=None):\n :token_secret: the OAuth client secret for the given OAuth token\n \"\"\"\n \n- if api_key and api_secret and token and token_secret:\n- # oauth\n- self.oauth_consumer = oauth.Consumer(key=api_key, secret=api_secret)\n- self.oauth_token = oauth.Token(key=token, secret=token_secret)\n- self.client = oauth.Client(self.oauth_consumer, self.oauth_token)\n-\n- elif api_key:\n- self.client = Http()\n-\n- if token is None:\n- self.public_only = True\n+ # client key and secret for oauth1 session\n+ if api_key or token:\n+ self.oauth = OAuth1(client_key=api_key, client_secret=api_secret,\n+ resource_owner_key=token, resource_owner_secret=token_secret)\n else:\n- self.public_only = False\n+ self.oauth = None\n \n+ self.public_only = token is None\n self.api_key = api_key\n- self.auth_token = token\n+ self.api_secret = api_secret\n+ self.resource_owner_key = token\n+ self.resource_owner_secret = token_secret\n \n def info_for_all_boards(self, actions):\n \"\"\"\n@@ -81,33 +70,6 @@ def logout(self):\n \n raise NotImplementedError()\n \n- def build_url(self, path, query={}):\n- \"\"\"\n- Builds a Trello URL.\n-\n- :path: URL path\n- :params: dict of key-value pairs for the query string\n- \"\"\"\n- url = 'https://api.trello.com/1'\n- if path[0:1] != '/':\n- url += '/'\n- url += path\n-\n- if hasattr(self, 'oauth_token'):\n- url += '?'\n- url += \"key=\" + self.oauth_consumer.key\n- url += \"&token=\" + self.oauth_token.key\n- else:\n- url += '?'\n- url += \"key=\" + self.api_key\n- if self.public_only is False:\n- url += \"&token=\" + self.auth_token\n-\n- if len(query) > 0:\n- url += '&' + urlencode(query)\n-\n- return url\n-\n def list_boards(self):\n \"\"\"\n Returns all boards for your Trello user\n@@ -135,14 +97,14 @@ def get_board(self, board_id):\n def add_board(self, board_name):\n obj = self.fetch_json('/boards', http_method='POST',\n post_args={'name': board_name})\n- board = Board(self, obj['id'], name=obj['name'].encode('utf-8'))\n+ board = Board(self, obj['id'], name=obj['name'])\n board.closed = obj['closed']\n return board\n \n def get_list(self, list_id):\n obj = self.fetch_json('/lists/' + list_id)\n list = List(self.get_board(obj['idBoard']), obj['id'],\n- name=obj['name'].encode('utf-8'))\n+ name=obj['name'])\n list.closed = obj['closed']\n return list\n \n@@ -153,32 +115,43 @@ def fetch_json(\n self,\n uri_path,\n http_method='GET',\n- headers={},\n- query_params={},\n- post_args={}):\n+ headers=None,\n+ query_params=None,\n+ post_args=None):\n \"\"\" Fetch some JSON from Trello \"\"\"\n \n- if http_method in (\"POST\", \"PUT\", \"DELETE\"):\n- headers['Content-Type'] = 'application/json'\n+ # explicit values here to avoid mutable default values\n+ if headers is None:\n+ headers = {}\n+ if query_params is None:\n+ query_params = {}\n+ if post_args is None:\n+ post_args = {}\n \n+ # set content type and accept headers to handle JSON\n+ if http_method in (\"POST\", \"PUT\", \"DELETE\"):\n+ headers['Content-Type'] = 'application/json; charset=utf-8'\n headers['Accept'] = 'application/json'\n- url = self.build_url(uri_path, query_params)\n- response, content = self.client.request(\n- url,\n- http_method,\n- headers=headers,\n- body=json.dumps(post_args))\n \n- # error checking\n- if response.status == 401:\n- raise Unauthorized(url, response)\n- if response.status != 200:\n- raise ResourceUnavailable(url, response)\n- return json.loads(content)\n+ # construct the full URL without query parameters\n+ if uri_path[0] == '/':\n+ uri_path = uri_path[1:]\n+ url = 'https://api.trello.com/1/%s' % uri_path\n+\n+ # perform the HTTP requests, if possible uses OAuth authentication\n+ response = requests.request(http_method, url, params=query_params,\n+ headers=headers, data=json.dumps(post_args), auth=self.oauth)\n+\n+ if response.status_code == 401:\n+ raise Unauthorized(\"%s at %s\" % (response.text, url), response)\n+ if response.status_code != 200:\n+ raise ResourceUnavailable(\"%s at %s\" % (response.text, url), response)\n+\n+ return response.json()\n \n def _board_from_json(self, json):\n- board = Board(self, json['id'], name=json['name'].encode('utf-8'))\n- board.description = json.get('desc', '').encode('utf-8')\n+ board = Board(self, json['id'], name=json['name'])\n+ board.description = json.get('desc', '')\n board.closed = json['closed']\n board.url = json['url']\n return board\n@@ -188,13 +161,13 @@ def list_hooks(self, token=None):\n Returns a list of all hooks associated with a specific token. If you don't pass in a token,\n it tries to use the token associated with the TrelloClient object (if it exists)\n \"\"\"\n+ token = token or self.resource_owner_key\n \n- if token is None and self.auth_token is None:\n+ if token is None:\n raise TokenError(\"You need to pass an auth token in to list hooks.\")\n else:\n- using_token = token if self.auth_token is None else self.auth_token\n- url = \"/tokens/%s/webhooks\" % using_token\n- return self._existing_hook_objs(self.fetch_json(url), using_token)\n+ url = \"/tokens/%s/webhooks\" % token\n+ return self._existing_hook_objs(self.fetch_json(url), token)\n \n def _existing_hook_objs(self, hooks, token):\n \"\"\"\n@@ -216,29 +189,22 @@ def create_hook(self, callback_url, id_model, desc=None, token=None):\n There seems to be some sort of bug that makes you unable to create a\n hook using httplib2, so I'm using urllib2 for that instead.\n \"\"\"\n+ token = token or self.resource_owner_key\n \n- if token is None and self.auth_token is None:\n- raise TokenError(\n- \"You need to pass an auth token in to create a hook.\")\n+ if token is None:\n+ raise TokenError(\"You need to pass an auth token in to create a hook.\")\n+\n+ url = \"https://trello.com/1/tokens/%s/webhooks/\" % token\n+ data = {'callbackURL': callback_url, 'idModel': id_model,\n+ 'description': desc}\n+\n+ response = requests.post(url, data=data, auth=self.oauth)\n+\n+ if response.status_code == 200:\n+ hook_id = response.json()['id']\n+ return WebHook(self, token, hook_id, desc, id_model, callback_url, True)\n else:\n- using_token = token if self.auth_token is None else self.auth_token\n- url = \"https://trello.com/1/tokens/%s/webhooks/?key=%s\" % (\n- using_token, self.api_key)\n- data = urlencode({'callbackURL': callback_url, 'idModel': id_model,\n- \"description\": desc})\n-\n- # TODO - error checking for invalid responses\n- # Before spending too much time doing that with urllib2, might be worth trying\n- # and getting it working with urllib2 for consistency\n- req = urllib2.Request(url, data)\n- response = urllib2.urlopen(req)\n-\n- if response.code == 200:\n- hook_id = json.loads(response.read())['id']\n- return WebHook(self, using_token, hook_id, desc, id_model,\n- callback_url, True)\n- else:\n- return False\n+ return False\n \n \n class Board(object):\n@@ -263,7 +229,7 @@ def __repr__(self):\n def fetch(self):\n \"\"\"Fetch all attributes for this board\"\"\"\n json_obj = self.client.fetch_json('/boards/' + self.id)\n- self.name = json_obj['name'].encode('utf-8')\n+ self.name = json_obj['name']\n self.description = json_obj.get('desc', '')\n self.closed = json_obj['closed']\n self.url = json_obj['url']\n@@ -298,7 +264,7 @@ def get_lists(self, list_filter):\n query_params={'cards': 'none', 'filter': list_filter})\n lists = list()\n for obj in json_obj:\n- l = List(self, obj['id'], name=obj['name'].encode('utf-8'))\n+ l = List(self, obj['id'], name=obj['name'])\n l.closed = obj['closed']\n lists.append(l)\n \n@@ -314,7 +280,7 @@ def add_list(self, name):\n '/lists',\n http_method='POST',\n post_args={'name': name, 'idBoard': self.id}, )\n- list = List(self, obj['id'], name=obj['name'].encode('utf-8'))\n+ list = List(self, obj['id'], name=obj['name'])\n list.closed = obj['closed']\n return list\n \n@@ -358,9 +324,9 @@ def get_cards(self, filters=None):\n cards = list()\n for card_json in json_obj:\n card = Card(self, card_json['id'],\n- name=card_json['name'].encode('utf-8'))\n+ name=card_json['name'])\n \n- for card_key, card_val in card_json.iteritems():\n+ for card_key, card_val in card_json.items():\n if card_key in ['id', 'name']:\n continue\n \n@@ -400,7 +366,7 @@ def __repr__(self):\n def fetch(self):\n \"\"\"Fetch all attributes for this list\"\"\"\n json_obj = self.client.fetch_json('/lists/' + self.id)\n- self.name = json_obj['name'].encode('utf-8')\n+ self.name = json_obj['name']\n self.closed = json_obj['closed']\n \n def list_cards(self):\n@@ -408,8 +374,8 @@ def list_cards(self):\n json_obj = self.client.fetch_json('/lists/' + self.id + '/cards')\n cards = list()\n for c in json_obj:\n- card = Card(self, c['id'], name=c['name'].encode('utf-8'))\n- card.description = c.get('desc', '').encode('utf-8')\n+ card = Card(self, c['id'], name=c['name'])\n+ card.description = c.get('desc', '')\n card.closed = c['closed']\n card.url = c['url']\n card.member_ids = c['idMembers']\n@@ -506,14 +472,14 @@ def fetch(self):\n json_obj = self.client.fetch_json(\n '/cards/' + self.id,\n query_params={'badges': False})\n- self.name = json_obj['name'].encode('utf-8')\n+ self.name = json_obj['name']\n self.description = json_obj.get('desc', '')\n self.closed = json_obj['closed']\n self.url = json_obj['url']\n- self.member_ids = json_obj['idMembers']\n- self.short_id = json_obj['idShort']\n- self.list_id = json_obj['idList']\n- self.board_id = json_obj['idBoard']\n+ self.idMembers = json_obj['idMembers']\n+ self.idShort = json_obj['idShort']\n+ self.idList = json_obj['idList']\n+ self.idBoard = json_obj['idBoard']\n self.labels = json_obj['labels']\n self.badges = json_obj['badges']\n self.due = json_obj['due']\n@@ -600,7 +566,7 @@ def change_board(self, board_id, list_id=None):\n http_method='PUT',\n post_args=args)\n \n- def add_checklist(self, title, items, itemstates=[]):\n+ def add_checklist(self, title, items, itemstates=None):\n \n \"\"\"Add a checklist to this card\n \n@@ -609,6 +575,9 @@ def add_checklist(self, title, items, itemstates=[]):\n :itemstates: a list of the state (True/False) of each item\n :return: the checklist\n \"\"\"\n+ if itemstates is None:\n+ itemstates = []\n+\n json_obj = self.client.fetch_json(\n '/cards/' + self.id + '/checklists',\n http_method='POST',\n@@ -649,13 +618,13 @@ def fetch(self):\n json_obj = self.client.fetch_json(\n '/members/' + self.id,\n query_params={'badges': False})\n- self.status = json_obj['status'].encode('utf-8')\n+ self.status = json_obj['status']\n self.id = json_obj.get('id', '')\n self.bio = json_obj.get('bio', '')\n self.url = json_obj.get('url', '')\n- self.username = json_obj['username'].encode('utf-8')\n- self.full_name = json_obj['fullName'].encode('utf-8')\n- self.initials = json_obj['initials'].encode('utf-8')\n+ self.username = json_obj['username']\n+ self.full_name = json_obj['fullName']\n+ self.initials = json_obj['initials']\n return self\n \n \ndiff --git a/trello/util.py b/trello/util.py\n--- a/trello/util.py\n+++ b/trello/util.py\n@@ -1,10 +1,8 @@\n import os\n-import urlparse\n \n-import oauth2 as oauth\n+from requests_oauthlib import OAuth1Session\n \n-\n-def create_oauth_token():\n+def create_oauth_token(expiration=None, scope=None, key=None, secret=None):\n \"\"\"\n Script to obtain an OAuth token from Trello.\n \n@@ -19,44 +17,46 @@ def create_oauth_token():\n authorize_url = 'https://trello.com/1/OAuthAuthorizeToken'\n access_token_url = 'https://trello.com/1/OAuthGetAccessToken'\n \n- expiration = os.environ.get('TRELLO_EXPIRATION', None)\n- scope = os.environ.get('TRELLO_SCOPE', 'read,write')\n- trello_key = os.environ['TRELLO_API_KEY']\n- trello_secret = os.environ['TRELLO_API_SECRET']\n-\n- consumer = oauth.Consumer(trello_key, trello_secret)\n- client = oauth.Client(consumer)\n+ expiration = expiration or os.environ.get('TRELLO_EXPIRATION', \"30days\")\n+ scope = scope or os.environ.get('TRELLO_SCOPE', 'read,write')\n+ trello_key = key or os.environ['TRELLO_API_KEY']\n+ trello_secret = secret or os.environ['TRELLO_API_SECRET']\n \n # Step 1: Get a request token. This is a temporary token that is used for\n # having the user authorize an access token and to sign the request to obtain\n # said access token.\n \n- resp, content = client.request(request_token_url, \"GET\")\n- if resp['status'] != '200':\n- raise Exception(\"Invalid response %s.\" % resp['status'])\n-\n- request_token = dict(urlparse.parse_qsl(content))\n+ session = OAuth1Session(client_key=trello_key, client_secret=trello_secret)\n+ response = session.fetch_request_token(request_token_url)\n+ resource_owner_key, resource_owner_secret = response.get('oauth_token'), response.get('oauth_token_secret')\n \n- print \"Request Token:\"\n- print \" - oauth_token = %s\" % request_token['oauth_token']\n- print \" - oauth_token_secret = %s\" % request_token['oauth_token_secret']\n- print\n+ print(\"Request Token:\")\n+ print(\" - oauth_token = %s\" % resource_owner_key)\n+ print(\" - oauth_token_secret = %s\" % resource_owner_secret)\n+ print(\"\")\n \n # Step 2: Redirect to the provider. Since this is a CLI script we do not\n # redirect. In a web application you would redirect the user to the URL\n # below.\n \n- print \"Go to the following link in your browser:\"\n- print \"{authorize_url}?oauth_token={oauth_token}&scope={scope}&expiration={expiration}\".format(\n+ print(\"Go to the following link in your browser:\")\n+ print(\"{authorize_url}?oauth_token={oauth_token}&scope={scope}&expiration={expiration}\".format(\n authorize_url=authorize_url,\n- oauth_token=request_token['oauth_token'],\n+ oauth_token=resource_owner_key,\n expiration=expiration,\n scope=scope,\n- )\n+ ))\n \n # After the user has granted access to you, the consumer, the provider will\n # redirect you to whatever URL you have told them to redirect to. You can\n # usually define this in the oauth_callback argument as well.\n+\n+ # Python 3 compatibility (raw_input was renamed to input)\n+ try:\n+ raw_input\n+ except NameError:\n+ raw_input = input\n+\n accepted = 'n'\n while accepted.lower() == 'n':\n accepted = raw_input('Have you authorized me? (y/n) ')\n@@ -67,20 +67,17 @@ def create_oauth_token():\n # request token to sign this request. After this is done you throw away the\n # request token and use the access token returned. You should store this\n # access token somewhere safe, like a database, for future use.\n- token = oauth.Token(request_token['oauth_token'],\n- request_token['oauth_token_secret'])\n- token.set_verifier(oauth_verifier)\n- client = oauth.Client(consumer, token)\n-\n- resp, content = client.request(access_token_url, \"POST\")\n- access_token = dict(urlparse.parse_qsl(content))\n-\n- print \"Access Token:\"\n- print \" - oauth_token = %s\" % access_token['oauth_token']\n- print \" - oauth_token_secret = %s\" % access_token['oauth_token_secret']\n- print\n- print \"You may now access protected resources using the access tokens above.\"\n- print\n+ session = OAuth1Session(client_key=trello_key, client_secret=trello_secret,\n+ resource_owner_key=resource_owner_key, resource_owner_secret=resource_owner_secret,\n+ verifier=oauth_verifier)\n+ access_token = session.fetch_access_token(access_token_url)\n+\n+ print(\"Access Token:\")\n+ print(\" - oauth_token = %s\" % access_token['oauth_token'])\n+ print(\" - oauth_token_secret = %s\" % access_token['oauth_token_secret'])\n+ print(\"\")\n+ print(\"You may now access protected resources using the access tokens above.\")\n+ print(\"\")\n \n if __name__ == '__main__':\n create_oauth_token()\n", "test_patch": "diff --git a/test/test_trello.py b/test/test_trello.py\n--- a/test/test_trello.py\n+++ b/test/test_trello.py\n@@ -2,133 +2,163 @@\n import unittest\n import os\n \n-class TrelloClientTestCase(unittest.TestCase):\n \n-\t\"\"\"\n+class TrelloClientTestCase(unittest.TestCase):\n+ \"\"\"\n \tTests for TrelloClient API. Note these test are in order to preserve dependencies, as an API\n \tintegration cannot be tested independently.\n \t\"\"\"\n \n-\tdef setUp(self):\n-\t\tself._trello = TrelloClient(os.environ['TRELLO_API_KEY'],\n+ def setUp(self):\n+ self._trello = TrelloClient(os.environ['TRELLO_API_KEY'],\n token=os.environ['TRELLO_TOKEN'])\n \n-\tdef test01_list_boards(self):\n-\t\tself.assertEquals(\n-\t\t\t\tlen(self._trello.list_boards()),\n-\t\t\t\tint(os.environ['TRELLO_TEST_BOARD_COUNT']))\n-\n-\tdef test10_board_attrs(self):\n-\t\tboards = self._trello.list_boards()\n-\t\tfor b in boards:\n-\t\t\tself.assertIsNotNone(b.id, msg=\"id not provided\")\n-\t\t\tself.assertIsNotNone(b.name, msg=\"name not provided\")\n-\t\t\tself.assertIsNotNone(b.description, msg=\"description not provided\")\n-\t\t\tself.assertIsNotNone(b.closed, msg=\"closed not provided\")\n-\t\t\tself.assertIsNotNone(b.url, msg=\"url not provided\")\n-\n-\tdef test20_board_all_lists(self):\n-\t\tboards = self._trello.list_boards()\n-\t\tfor b in boards:\n-\t\t\ttry:\n-\t\t\t\tb.all_lists()\n-\t\t\texcept Exception as e:\n-\t\t\t\tself.fail(\"Caught Exception getting lists\")\n-\n-\tdef test21_board_open_lists(self):\n-\t\tboards = self._trello.list_boards()\n-\t\tfor b in boards:\n-\t\t\ttry:\n-\t\t\t\tb.open_lists()\n-\t\t\texcept Exception as e:\n-\t\t\t\tself.fail(\"Caught Exception getting open lists\")\n-\n-\tdef test22_board_closed_lists(self):\n-\t\tboards = self._trello.list_boards()\n-\t\tfor b in boards:\n-\t\t\ttry:\n-\t\t\t\tb.closed_lists()\n-\t\t\texcept Exception as e:\n-\t\t\t\tself.fail(\"Caught Exception getting closed lists\")\n-\n-\tdef test30_list_attrs(self):\n-\t\tboards = self._trello.list_boards()\n-\t\tfor b in boards:\n-\t\t\tfor l in b.all_lists():\n-\t\t\t\tself.assertIsNotNone(l.id, msg=\"id not provided\")\n-\t\t\t\tself.assertIsNotNone(l.name, msg=\"name not provided\")\n-\t\t\t\tself.assertIsNotNone(l.closed, msg=\"closed not provided\")\n-\t\t\tbreak # only need to test one board's lists\n-\n-\tdef test40_list_cards(self):\n-\t\tboards = self._trello.list_boards()\n-\t\tfor b in boards:\n-\t\t\tfor l in b.all_lists():\n-\t\t\t\tfor c in l.list_cards():\n-\t\t\t\t\tself.assertIsNotNone(c.id, msg=\"id not provided\")\n-\t\t\t\t\tself.assertIsNotNone(c.name, msg=\"name not provided\")\n-\t\t\t\t\tself.assertIsNotNone(c.description, msg=\"description not provided\")\n-\t\t\t\t\tself.assertIsNotNone(c.closed, msg=\"closed not provided\")\n-\t\t\t\t\tself.assertIsNotNone(c.url, msg=\"url not provided\")\n-\t\t\t\tbreak\n-\t\t\tbreak\n-\t\tpass\n-\n-\tdef test50_add_card(self):\n-\t\tboards = self._trello.list_boards()\n-\t\tboard_id = None\n-\t\tfor b in boards:\n-\t\t\tif b.name != os.environ['TRELLO_TEST_BOARD_NAME']:\n-\t\t\t\tcontinue\n-\n-\t\t\tfor l in b.open_lists():\n-\t\t\t\ttry:\n-\t\t\t\t\tname = \"Testing from Python - no desc\"\n-\t\t\t\t\tcard = l.add_card(name)\n-\t\t\t\texcept Exception as e:\n-\t\t\t\t\tprint str(e)\n-\t\t\t\t\tself.fail(\"Caught Exception adding card\")\n-\n-\t\t\t\tself.assertIsNotNone(card, msg=\"card is None\")\n-\t\t\t\tself.assertIsNotNone(card.id, msg=\"id not provided\")\n-\t\t\t\tself.assertEquals(card.name, name)\n-\t\t\t\tself.assertIsNotNone(card.closed, msg=\"closed not provided\")\n-\t\t\t\tself.assertIsNotNone(card.url, msg=\"url not provided\")\n-\t\t\t\tbreak\n-\t\t\tbreak\n-\t\tif not card:\n-\t\t\tself.fail(\"No card created\")\n-\n-\tdef test51_add_card(self):\n-\t\tboards = self._trello.list_boards()\n-\t\tboard_id = None\n-\t\tfor b in boards:\n-\t\t\tif b.name != os.environ['TRELLO_TEST_BOARD_NAME']:\n-\t\t\t\tcontinue\n-\n-\t\t\tfor l in b.open_lists():\n-\t\t\t\ttry:\n-\t\t\t\t\tname = \"Testing from Python\"\n-\t\t\t\t\tdescription = \"Description goes here\"\n-\t\t\t\t\tcard = l.add_card(name, description)\n-\t\t\t\texcept Exception as e:\n-\t\t\t\t\tprint str(e)\n-\t\t\t\t\tself.fail(\"Caught Exception adding card\")\n-\n-\t\t\t\tself.assertIsNotNone(card, msg=\"card is None\")\n-\t\t\t\tself.assertIsNotNone(card.id, msg=\"id not provided\")\n-\t\t\t\tself.assertEquals(card.name, name)\n-\t\t\t\tself.assertEquals(card.description, description)\n-\t\t\t\tself.assertIsNotNone(card.closed, msg=\"closed not provided\")\n-\t\t\t\tself.assertIsNotNone(card.url, msg=\"url not provided\")\n-\t\t\t\tbreak\n-\t\t\tbreak\n-\t\tif not card:\n-\t\t\tself.fail(\"No card created\")\n+ def test01_list_boards(self):\n+ self.assertEquals(\n+ len(self._trello.list_boards()),\n+ int(os.environ['TRELLO_TEST_BOARD_COUNT']))\n+\n+ def test10_board_attrs(self):\n+ boards = self._trello.list_boards()\n+ for b in boards:\n+ self.assertIsNotNone(b.id, msg=\"id not provided\")\n+ self.assertIsNotNone(b.name, msg=\"name not provided\")\n+ self.assertIsNotNone(b.description, msg=\"description not provided\")\n+ self.assertIsNotNone(b.closed, msg=\"closed not provided\")\n+ self.assertIsNotNone(b.url, msg=\"url not provided\")\n+\n+ def test20_board_all_lists(self):\n+ boards = self._trello.list_boards()\n+ for b in boards:\n+ try:\n+ b.all_lists()\n+ except Exception as e:\n+ self.fail(\"Caught Exception getting lists\")\n+\n+ def test21_board_open_lists(self):\n+ boards = self._trello.list_boards()\n+ for b in boards:\n+ try:\n+ b.open_lists()\n+ except Exception as e:\n+ self.fail(\"Caught Exception getting open lists\")\n+\n+ def test22_board_closed_lists(self):\n+ boards = self._trello.list_boards()\n+ for b in boards:\n+ try:\n+ b.closed_lists()\n+ except Exception as e:\n+ self.fail(\"Caught Exception getting closed lists\")\n+\n+ def test30_list_attrs(self):\n+ boards = self._trello.list_boards()\n+ for b in boards:\n+ for l in b.all_lists():\n+ self.assertIsNotNone(l.id, msg=\"id not provided\")\n+ self.assertIsNotNone(l.name, msg=\"name not provided\")\n+ self.assertIsNotNone(l.closed, msg=\"closed not provided\")\n+ break # only need to test one board's lists\n+\n+ def test40_list_cards(self):\n+ boards = self._trello.list_boards()\n+ for b in boards:\n+ for l in b.all_lists():\n+ for c in l.list_cards():\n+ self.assertIsNotNone(c.id, msg=\"id not provided\")\n+ self.assertIsNotNone(c.name, msg=\"name not provided\")\n+ self.assertIsNotNone(c.description, msg=\"description not provided\")\n+ self.assertIsNotNone(c.closed, msg=\"closed not provided\")\n+ self.assertIsNotNone(c.url, msg=\"url not provided\")\n+ break\n+ break\n+ pass\n+\n+\n+ def test50_add_card(self):\n+ boards = self._trello.list_boards()\n+ board_id = None\n+ for b in boards:\n+ if b.name != os.environ['TRELLO_TEST_BOARD_NAME']:\n+ continue\n+\n+ for l in b.open_lists():\n+ try:\n+ name = \"Testing from Python - no desc\"\n+ card = l.add_card(name)\n+ except Exception as e:\n+ print(str(e))\n+ self.fail(\"Caught Exception adding card\")\n+\n+ self.assertIsNotNone(card, msg=\"card is None\")\n+ self.assertIsNotNone(card.id, msg=\"id not provided\")\n+ self.assertEquals(card.name, name)\n+ self.assertIsNotNone(card.closed, msg=\"closed not provided\")\n+ self.assertIsNotNone(card.url, msg=\"url not provided\")\n+ break\n+ break\n+ if not card:\n+ self.fail(\"No card created\")\n+\n+ def test51_add_card(self):\n+ boards = self._trello.list_boards()\n+ board_id = None\n+ for b in boards:\n+ if b.name != os.environ['TRELLO_TEST_BOARD_NAME']:\n+ continue\n+\n+ for l in b.open_lists():\n+ try:\n+ name = \"Testing from Python\"\n+ description = \"Description goes here\"\n+ card = l.add_card(name, description)\n+ except Exception as e:\n+ print(str(e))\n+ self.fail(\"Caught Exception adding card\")\n+\n+ self.assertIsNotNone(card, msg=\"card is None\")\n+ self.assertIsNotNone(card.id, msg=\"id not provided\")\n+ self.assertEquals(card.name, name)\n+ self.assertEquals(card.description, description)\n+ self.assertIsNotNone(card.closed, msg=\"closed not provided\")\n+ self.assertIsNotNone(card.url, msg=\"url not provided\")\n+ break\n+ break\n+ if not card:\n+ self.fail(\"No card created\")\n+\n+\n+ def test52_get_cards(self):\n+ boards = [board for board in self._trello.list_boards() if board.name == os.environ['TRELLO_TEST_BOARD_NAME']]\n+ self.assertEquals(len(boards), 1, msg=\"Test board not found\")\n+\n+ board = boards[0]\n+ cards = board.get_cards()\n+ self.assertEqual(len(cards), 2, msg=\"Unexpected number of cards in testboard\")\n+\n+ for card in cards:\n+ if card.name == 'Testing from Python':\n+ self.assertEqual(card.description, 'Description goes here')\n+ elif card.name == 'Testing from Python - no desc':\n+ self.assertEqual(card.description, '')\n+ else:\n+ self.fail(msg='Unexpected card found')\n+\n+\n+ def test60_delete_cards(self):\n+ boards = [board for board in self._trello.list_boards() if board.name == os.environ['TRELLO_TEST_BOARD_NAME']]\n+ self.assertEquals(len(boards), 1, msg=\"Test board not found\")\n+\n+ board = boards[0]\n+ cards = board.get_cards()\n+ for card in cards:\n+ card.delete()\n+\n \n def suite():\n-\ttests = ['test01_list_boards', 'test10_board_attrs', 'test20_add_card']\n-\treturn unittest.TestSuite(map(TrelloClientTestCase, tests))\n+ tests = ['test01_list_boards', 'test10_board_attrs', 'test20_add_card']\n+ return unittest.TestSuite(map(TrelloClientTestCase, tests))\n+\n \n if __name__ == \"__main__\":\n-\tunittest.main()\n+ unittest.main()\n", "problem_statement": "", "hints_text": "", "created_at": "2014-07-11T14:34:33Z"}
PythonDataset/test/python-gvm-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "greenbone/python-gvm", "pull_number": 198, "instance_id": "greenbone__python-gvm-198", "issue_numbers": "", "base_commit": "5a85d62b76db8cc726864e981e4a1f1fbd6bb3f9", "patch": "diff --git a/gvm/__init__.py b/gvm/__init__.py\n--- a/gvm/__init__.py\n+++ b/gvm/__init__.py\n@@ -18,23 +18,7 @@\n \"\"\"\n Main module of python-gvm.\n \"\"\"\n-from pathlib import Path\n-\n-from pkg_resources import safe_version\n-\n-import toml\n-\n-\n-def get_version_from_pyproject_toml() -> str:\n- path = Path(__file__)\n- pyproject_toml_path = path.parent.parent / 'pyproject.toml'\n-\n- if pyproject_toml_path.exists():\n- pyproject_toml = toml.loads(pyproject_toml_path.read_text())\n- if 'tool' in pyproject_toml and 'poetry' in pyproject_toml['tool']:\n- return pyproject_toml['tool']['poetry']['version']\n-\n- raise RuntimeError('Version information not found in pyproject.toml file.')\n+from .__version__ import __version__\n \n \n def get_version() -> str:\n@@ -47,5 +31,4 @@ def get_version() -> str:\n .. _PEP440:\n https://www.python.org/dev/peps/pep-0440\n \"\"\"\n- str_version = get_version_from_pyproject_toml()\n- return safe_version(str_version)\n+ return __version__\ndiff --git a/gvm/__version__.py b/gvm/__version__.py\nnew file mode 100644\n--- /dev/null\n+++ b/gvm/__version__.py\n@@ -0,0 +1,5 @@\n+# pylint: disable=invalid-name\n+\n+# THIS IS AN AUTOGENERATED FILE. DO NOT TOUCH!\n+\n+__version__ = \"20.4.dev1\"\ndiff --git a/gvm/utils.py b/gvm/utils.py\n--- a/gvm/utils.py\n+++ b/gvm/utils.py\n@@ -1,5 +1,5 @@\n # -*- coding: utf-8 -*-\n-# Copyright (C) 2018 - 2019 Greenbone Networks GmbH\n+# Copyright (C) 2018 - 2020 Greenbone Networks GmbH\n #\n # SPDX-License-Identifier: GPL-3.0-or-later\n #\n@@ -21,25 +21,3 @@\n \n def deprecation(message: str):\n warnings.warn(message, DeprecationWarning, stacklevel=2)\n-\n-\n-def get_version_string(version: tuple) -> str:\n- \"\"\"Create a version string from a version tuple\n-\n- Arguments:\n- version: version as tuple e.g. (1, 2, 0, dev, 5)\n-\n- Returns:\n- The version tuple converted into a string representation\n- \"\"\"\n- if len(version) > 4:\n- ver = \".\".join(str(x) for x in version[:4])\n- ver += str(version[4])\n-\n- if len(version) > 5:\n- # support (1, 2, 3, 'beta', 2, 'dev', 1)\n- ver += \".{0}{1}\".format(str(version[5]), str(version[6]))\n-\n- return ver\n- else:\n- return \".\".join(str(x) for x in version)\ndiff --git a/gvm/version.py b/gvm/version.py\nnew file mode 100644\n--- /dev/null\n+++ b/gvm/version.py\n@@ -0,0 +1,280 @@\n+# -*- coding: utf-8 -*-\n+# Copyright (C) 2020 Greenbone Networks GmbH\n+#\n+# SPDX-License-Identifier: GPL-3.0-or-later\n+#\n+# This program is free software: you can redistribute it and/or modify\n+# it under the terms of the GNU General Public License as published by\n+# the Free Software Foundation, either version 3 of the License, or\n+# (at your option) any later version.\n+#\n+# This program is distributed in the hope that it will be useful,\n+# but WITHOUT ANY WARRANTY; without even the implied warranty of\n+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n+# GNU General Public License for more details.\n+#\n+# You should have received a copy of the GNU General Public License\n+# along with this program. If not, see <http://www.gnu.org/licenses/>.\n+\n+import argparse\n+import re\n+import sys\n+\n+from pathlib import Path\n+\n+import tomlkit\n+\n+from packaging.version import Version, InvalidVersion\n+\n+\n+from gvm import get_version\n+\n+\n+def strip_version(version: str) -> str:\n+ \"\"\"\n+ Strips a leading 'v' from a version string\n+\n+ E.g. v1.2.3 will be converted to 1.2.3\n+ \"\"\"\n+ if version and version[0] == 'v':\n+ return version[1:]\n+\n+ return version\n+\n+\n+def safe_version(version: str) -> str:\n+ \"\"\"\n+ Returns the version as a string in `PEP440`_ compliant\n+ format.\n+\n+ .. _PEP440:\n+ https://www.python.org/dev/peps/pep-0440\n+ \"\"\"\n+ try:\n+ return str(Version(version))\n+ except InvalidVersion:\n+ version = version.replace(' ', '.')\n+ return re.sub('[^A-Za-z0-9.]+', '-', version)\n+\n+\n+def get_version_from_pyproject_toml(pyproject_toml_path: Path = None) -> str:\n+ \"\"\"\n+ Return the version information from the [tool.poetry] section of the\n+ pyproject.toml file. The version may be in non standardized form.\n+ \"\"\"\n+ if not pyproject_toml_path:\n+ path = Path(__file__)\n+ pyproject_toml_path = path.parent.parent / 'pyproject.toml'\n+\n+ if not pyproject_toml_path.exists():\n+ raise RuntimeError('pyproject.toml file not found.')\n+\n+ pyproject_toml = tomlkit.parse(pyproject_toml_path.read_text())\n+ if (\n+ 'tool' in pyproject_toml\n+ and 'poetry' in pyproject_toml['tool']\n+ and 'version' in pyproject_toml['tool']['poetry']\n+ ):\n+ return pyproject_toml['tool']['poetry']['version']\n+\n+ raise RuntimeError('Version information not found in pyproject.toml file.')\n+\n+\n+def get_version_string(version: tuple) -> str:\n+ \"\"\"Create a version string from a version tuple\n+\n+ Arguments:\n+ version: version as tuple e.g. (1, 2, 0, dev, 5)\n+\n+ Returns:\n+ The version tuple converted into a string representation\n+ \"\"\"\n+ if len(version) > 4:\n+ ver = \".\".join(str(x) for x in version[:4])\n+ ver += str(version[4])\n+\n+ if len(version) > 5:\n+ # support (1, 2, 3, 'beta', 2, 'dev', 1)\n+ ver += \".{0}{1}\".format(str(version[5]), str(version[6]))\n+\n+ return ver\n+ else:\n+ return \".\".join(str(x) for x in version)\n+\n+\n+def print_version(pyproject_toml_path: Path = None) -> None:\n+ pyproject_version = get_version_from_pyproject_toml(\n+ pyproject_toml_path=pyproject_toml_path\n+ )\n+\n+ print(pyproject_version)\n+\n+\n+def versions_equal(new_version: str, old_version: str) -> bool:\n+ \"\"\"\n+ Checks if new_version and old_version are equal\n+ \"\"\"\n+ return safe_version(old_version) == safe_version(new_version)\n+\n+\n+def is_version_pep440_compliant(version: str) -> bool:\n+ \"\"\"\n+ Checks if the provided version is a PEP 440 compliant version string\n+ \"\"\"\n+ return version == safe_version(version)\n+\n+\n+def update_pyproject_version(\n+ new_version: str, pyproject_toml_path: Path,\n+) -> None:\n+ \"\"\"\n+ Update the version in the pyproject.toml file\n+ \"\"\"\n+ version = safe_version(new_version)\n+\n+ pyproject_toml = tomlkit.parse(pyproject_toml_path.read_text())\n+\n+ if 'tool' not in pyproject_toml:\n+ tool_table = tomlkit.table()\n+ pyproject_toml['tool'] = tool_table\n+\n+ if 'poetry' not in pyproject_toml['tool']:\n+ poetry_table = tomlkit.table()\n+ pyproject_toml['tool'].add('poetry', poetry_table)\n+\n+ pyproject_toml['tool']['poetry']['version'] = version\n+\n+ pyproject_toml_path.write_text(tomlkit.dumps(pyproject_toml))\n+\n+\n+def update_version_file(new_version: str, version_file_path: Path) -> None:\n+ \"\"\"\n+ Update the version file with the new version\n+ \"\"\"\n+ version = safe_version(new_version)\n+\n+ text = \"\"\"# pylint: disable=invalid-name\n+\n+# THIS IS AN AUTOGENERATED FILE. DO NOT TOUCH!\n+\n+__version__ = \"{}\"\\n\"\"\".format(\n+ version\n+ )\n+ version_file_path.write_text(text)\n+\n+\n+def _update_python_gvm_version(\n+ new_version: str, pyproject_toml_path: Path, *, force: bool = False\n+):\n+ if not pyproject_toml_path.exists():\n+ sys.exit(\n+ 'Could not find pyproject.toml file in the current working dir.'\n+ )\n+\n+ cwd_path = Path.cwd()\n+ python_gvm_version = get_version()\n+ pyproject_version = get_version_from_pyproject_toml(\n+ pyproject_toml_path=pyproject_toml_path\n+ )\n+ version_file_path = cwd_path / 'gvm' / '__version__.py'\n+\n+ if not pyproject_toml_path.exists():\n+ sys.exit(\n+ 'Could not find __version__.py file at {}.'.format(\n+ version_file_path\n+ )\n+ )\n+\n+ if not force and versions_equal(new_version, python_gvm_version):\n+ print('Version is already up-to-date.')\n+ sys.exit(0)\n+\n+ update_pyproject_version(\n+ new_version=new_version, pyproject_toml_path=pyproject_toml_path\n+ )\n+\n+ update_version_file(\n+ new_version=new_version, version_file_path=version_file_path,\n+ )\n+\n+ print(\n+ 'Updated version from {} to {}'.format(\n+ pyproject_version, safe_version(new_version)\n+ )\n+ )\n+\n+\n+def _verify_version(version: str, pyproject_toml_path: Path) -> None:\n+ python_gvm_version = get_version()\n+ pyproject_version = get_version_from_pyproject_toml(\n+ pyproject_toml_path=pyproject_toml_path\n+ )\n+ if not is_version_pep440_compliant(python_gvm_version):\n+ sys.exit(\"The version in gvm/__version__.py is not PEP 440 compliant.\")\n+\n+ if pyproject_version != python_gvm_version:\n+ sys.exit(\n+ \"The version set in the pyproject.toml file \\\"{}\\\" doesn't \"\n+ \"match the python-gvm version \\\"{}\\\"\".format(\n+ pyproject_version, python_gvm_version\n+ )\n+ )\n+\n+ if version != 'current':\n+ provided_version = strip_version(version)\n+ if provided_version != python_gvm_version:\n+ sys.exit(\n+ \"Provided version \\\"{}\\\" does not match the python-gvm \"\n+ \"version \\\"{}\\\"\".format(provided_version, python_gvm_version)\n+ )\n+\n+ print('OK')\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(\n+ description='Version handling utilities for python-gvm.', prog='version'\n+ )\n+\n+ subparsers = parser.add_subparsers(\n+ title='subcommands',\n+ description='valid subcommands',\n+ help='additional help',\n+ dest='command',\n+ )\n+\n+ verify_parser = subparsers.add_parser('verify')\n+ verify_parser.add_argument('version', help='version string to compare')\n+\n+ subparsers.add_parser('show')\n+\n+ update_parser = subparsers.add_parser('update')\n+ update_parser.add_argument('version', help='version string to use')\n+ update_parser.add_argument(\n+ '--force',\n+ help=\"don't check if version is already set\",\n+ action=\"store_true\",\n+ )\n+\n+ args = parser.parse_args()\n+\n+ if not getattr(args, 'command', None):\n+ parser.print_usage()\n+ sys.exit(0)\n+\n+ pyproject_toml_path = Path.cwd() / 'pyproject.toml'\n+\n+ if args.command == 'update':\n+ _update_python_gvm_version(\n+ args.version,\n+ pyproject_toml_path=pyproject_toml_path,\n+ force=args.force,\n+ )\n+ elif args.command == 'show':\n+ print_version(pyproject_toml_path=pyproject_toml_path)\n+ elif args.command == 'verify':\n+ _verify_version(args.version, pyproject_toml_path=pyproject_toml_path)\n+\n+\n+if __name__ == '__main__':\n+ main()\n", "test_patch": "diff --git a/tests/version/__init__.py b/tests/version/__init__.py\nnew file mode 100644\ndiff --git a/tests/version/test_get_version_from_pyproject_toml.py b/tests/version/test_get_version_from_pyproject_toml.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/version/test_get_version_from_pyproject_toml.py\n@@ -0,0 +1,79 @@\n+# -*- coding: utf-8 -*-\n+# Copyright (C) 2020 Greenbone Networks GmbH\n+#\n+# SPDX-License-Identifier: GPL-3.0-or-later\n+#\n+# This program is free software: you can redistribute it and/or modify\n+# it under the terms of the GNU General Public License as published by\n+# the Free Software Foundation, either version 3 of the License, or\n+# (at your option) any later version.\n+#\n+# This program is distributed in the hope that it will be useful,\n+# but WITHOUT ANY WARRANTY; without even the implied warranty of\n+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n+# GNU General Public License for more details.\n+#\n+# You should have received a copy of the GNU General Public License\n+# along with this program. If not, see <http://www.gnu.org/licenses/>.\n+\n+import unittest\n+\n+from pathlib import Path\n+from unittest.mock import MagicMock\n+\n+from gvm.version import get_version_from_pyproject_toml\n+\n+\n+class GetVersionFromPyprojectTomlTestCase(unittest.TestCase):\n+ def test_pyproject_toml_file_not_exists(self):\n+ fake_path_class = MagicMock(spec=Path)\n+ fake_path = fake_path_class.return_value\n+ fake_path.exists.return_value = False\n+\n+ with self.assertRaisesRegex(\n+ RuntimeError, 'pyproject.toml file not found'\n+ ):\n+ get_version_from_pyproject_toml(fake_path)\n+\n+ fake_path.exists.assert_called_with()\n+\n+ def test_no_poerty_section(self):\n+ fake_path_class = MagicMock(spec=Path)\n+ fake_path = fake_path_class.return_value\n+ fake_path.exists.return_value = True\n+ fake_path.read_text.return_value = ''\n+\n+ with self.assertRaisesRegex(\n+ RuntimeError, 'Version information not found in pyproject.toml file'\n+ ):\n+ get_version_from_pyproject_toml(fake_path)\n+\n+ fake_path.exists.assert_called_with()\n+ fake_path.read_text.assert_called_with()\n+\n+ def test_empty_poerty_section(self):\n+ fake_path_class = MagicMock(spec=Path)\n+ fake_path = fake_path_class.return_value\n+ fake_path.exists.return_value = True\n+ fake_path.read_text.return_value = '[tool.poetry]'\n+\n+ with self.assertRaisesRegex(\n+ RuntimeError, 'Version information not found in pyproject.toml file'\n+ ):\n+ get_version_from_pyproject_toml(fake_path)\n+\n+ fake_path.exists.assert_called_with()\n+ fake_path.read_text.assert_called_with()\n+\n+ def test_get_version(self):\n+ fake_path_class = MagicMock(spec=Path)\n+ fake_path = fake_path_class.return_value\n+ fake_path.exists.return_value = True\n+ fake_path.read_text.return_value = '[tool.poetry]\\nversion = \"1.2.3\"'\n+\n+ version = get_version_from_pyproject_toml(fake_path)\n+\n+ self.assertEqual(version, '1.2.3')\n+\n+ fake_path.exists.assert_called_with()\n+ fake_path.read_text.assert_called_with()\ndiff --git a/tests/utils/test_get_version_string.py b/tests/version/test_get_version_string.py\nsimilarity index 97%\nrename from tests/utils/test_get_version_string.py\nrename to tests/version/test_get_version_string.py\n--- a/tests/utils/test_get_version_string.py\n+++ b/tests/version/test_get_version_string.py\n@@ -18,7 +18,7 @@\n \n import unittest\n \n-from gvm.utils import get_version_string\n+from gvm.version import get_version_string\n \n \n class TestGetVersionString(unittest.TestCase):\ndiff --git a/tests/version/test_is_version_pep440_compliant.py b/tests/version/test_is_version_pep440_compliant.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/version/test_is_version_pep440_compliant.py\n@@ -0,0 +1,45 @@\n+# -*- coding: utf-8 -*-\n+# Copyright (C) 2020 Greenbone Networks GmbH\n+#\n+# SPDX-License-Identifier: GPL-3.0-or-later\n+#\n+# This program is free software: you can redistribute it and/or modify\n+# it under the terms of the GNU General Public License as published by\n+# the Free Software Foundation, either version 3 of the License, or\n+# (at your option) any later version.\n+#\n+# This program is distributed in the hope that it will be useful,\n+# but WITHOUT ANY WARRANTY; without even the implied warranty of\n+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n+# GNU General Public License for more details.\n+#\n+# You should have received a copy of the GNU General Public License\n+# along with this program. If not, see <http://www.gnu.org/licenses/>.\n+\n+import unittest\n+\n+from gvm.version import is_version_pep440_compliant\n+\n+\n+class IsVersionPep440CompliantTestCase(unittest.TestCase):\n+ def test_is_compliant(self):\n+ self.assertTrue(is_version_pep440_compliant('1.2.3.dev1'))\n+ self.assertTrue(is_version_pep440_compliant('1.2.3.dev0'))\n+ self.assertTrue(is_version_pep440_compliant('20.4'))\n+ self.assertTrue(is_version_pep440_compliant('1.2'))\n+ self.assertTrue(is_version_pep440_compliant('1.2.0a0'))\n+ self.assertTrue(is_version_pep440_compliant('1.2.0a1'))\n+ self.assertTrue(is_version_pep440_compliant('1.2.0b0'))\n+ self.assertTrue(is_version_pep440_compliant('1.2.0b1'))\n+\n+ def test_is_not_compliant(self):\n+ self.assertFalse(is_version_pep440_compliant('1.2.3dev1'))\n+ self.assertFalse(is_version_pep440_compliant('1.2.3dev'))\n+ self.assertFalse(is_version_pep440_compliant('1.2.3dev0'))\n+ self.assertFalse(is_version_pep440_compliant('1.2.3alpha'))\n+ self.assertFalse(is_version_pep440_compliant('1.2.3alpha0'))\n+ self.assertFalse(is_version_pep440_compliant('1.2.3.a0'))\n+ self.assertFalse(is_version_pep440_compliant('1.2.3beta'))\n+ self.assertFalse(is_version_pep440_compliant('1.2.3beta0'))\n+ self.assertFalse(is_version_pep440_compliant('1.2.3.b0'))\n+ self.assertFalse(is_version_pep440_compliant('20.04'))\ndiff --git a/tests/version/test_safe_version.py b/tests/version/test_safe_version.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/version/test_safe_version.py\n@@ -0,0 +1,55 @@\n+# -*- coding: utf-8 -*-\n+# Copyright (C) 2020 Greenbone Networks GmbH\n+#\n+# SPDX-License-Identifier: GPL-3.0-or-later\n+#\n+# This program is free software: you can redistribute it and/or modify\n+# it under the terms of the GNU General Public License as published by\n+# the Free Software Foundation, either version 3 of the License, or\n+# (at your option) any later version.\n+#\n+# This program is distributed in the hope that it will be useful,\n+# but WITHOUT ANY WARRANTY; without even the implied warranty of\n+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n+# GNU General Public License for more details.\n+#\n+# You should have received a copy of the GNU General Public License\n+# along with this program. If not, see <http://www.gnu.org/licenses/>.\n+\n+import unittest\n+\n+from gvm.version import safe_version\n+\n+\n+class SafeVersionTestCase(unittest.TestCase):\n+ def test_dev_versions(self):\n+ self.assertEqual(safe_version('1.2.3dev'), '1.2.3.dev0')\n+ self.assertEqual(safe_version('1.2.3dev1'), '1.2.3.dev1')\n+ self.assertEqual(safe_version('1.2.3.dev'), '1.2.3.dev0')\n+\n+ def test_alpha_versions(self):\n+ self.assertEqual(safe_version('1.2.3alpha'), '1.2.3a0')\n+ self.assertEqual(safe_version('1.2.3.alpha'), '1.2.3a0')\n+ self.assertEqual(safe_version('1.2.3a'), '1.2.3a0')\n+ self.assertEqual(safe_version('1.2.3.a1'), '1.2.3a1')\n+ self.assertEqual(safe_version('1.2.3a1'), '1.2.3a1')\n+\n+ def test_beta_versions(self):\n+ self.assertEqual(safe_version('1.2.3beta'), '1.2.3b0')\n+ self.assertEqual(safe_version('1.2.3.beta'), '1.2.3b0')\n+ self.assertEqual(safe_version('1.2.3b'), '1.2.3b0')\n+ self.assertEqual(safe_version('1.2.3.b1'), '1.2.3b1')\n+ self.assertEqual(safe_version('1.2.3b1'), '1.2.3b1')\n+\n+ def test_caldav_versions(self):\n+ self.assertEqual(safe_version('22.04'), '22.4')\n+ self.assertEqual(safe_version('22.4'), '22.4')\n+ self.assertEqual(safe_version('22.10'), '22.10')\n+ self.assertEqual(safe_version('22.04dev1'), '22.4.dev1')\n+ self.assertEqual(safe_version('22.10dev1'), '22.10.dev1')\n+\n+ def test_release_versions(self):\n+ self.assertEqual(safe_version('1'), '1')\n+ self.assertEqual(safe_version('1.2'), '1.2')\n+ self.assertEqual(safe_version('1.2.3'), '1.2.3')\n+ self.assertEqual(safe_version('22.4'), '22.4')\ndiff --git a/tests/version/test_strip_version.py b/tests/version/test_strip_version.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/version/test_strip_version.py\n@@ -0,0 +1,31 @@\n+# -*- coding: utf-8 -*-\n+# Copyright (C) 2020 Greenbone Networks GmbH\n+#\n+# SPDX-License-Identifier: GPL-3.0-or-later\n+#\n+# This program is free software: you can redistribute it and/or modify\n+# it under the terms of the GNU General Public License as published by\n+# the Free Software Foundation, either version 3 of the License, or\n+# (at your option) any later version.\n+#\n+# This program is distributed in the hope that it will be useful,\n+# but WITHOUT ANY WARRANTY; without even the implied warranty of\n+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n+# GNU General Public License for more details.\n+#\n+# You should have received a copy of the GNU General Public License\n+# along with this program. If not, see <http://www.gnu.org/licenses/>.\n+\n+import unittest\n+\n+from gvm.version import strip_version\n+\n+\n+class StripVersionTestCase(unittest.TestCase):\n+ def test_version_string_without_v(self):\n+ self.assertEqual(strip_version('1.2.3'), '1.2.3')\n+ self.assertEqual(strip_version('1.2.3dev'), '1.2.3dev')\n+\n+ def test_version_string_with_v(self):\n+ self.assertEqual(strip_version('v1.2.3'), '1.2.3')\n+ self.assertEqual(strip_version('v1.2.3dev'), '1.2.3dev')\ndiff --git a/tests/version/test_update_pyproject_version.py b/tests/version/test_update_pyproject_version.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/version/test_update_pyproject_version.py\n@@ -0,0 +1,79 @@\n+# -*- coding: utf-8 -*-\n+# Copyright (C) 2020 Greenbone Networks GmbH\n+#\n+# SPDX-License-Identifier: GPL-3.0-or-later\n+#\n+# This program is free software: you can redistribute it and/or modify\n+# it under the terms of the GNU General Public License as published by\n+# the Free Software Foundation, either version 3 of the License, or\n+# (at your option) any later version.\n+#\n+# This program is distributed in the hope that it will be useful,\n+# but WITHOUT ANY WARRANTY; without even the implied warranty of\n+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n+# GNU General Public License for more details.\n+#\n+# You should have received a copy of the GNU General Public License\n+# along with this program. If not, see <http://www.gnu.org/licenses/>.\n+\n+import unittest\n+\n+from pathlib import Path\n+from unittest.mock import MagicMock\n+\n+import tomlkit\n+from gvm.version import update_pyproject_version\n+\n+\n+class UpdatePyprojectVersionTestCase(unittest.TestCase):\n+ def test_empty_pyproject_toml(self):\n+ fake_path_class = MagicMock(spec=Path)\n+ fake_path = fake_path_class.return_value\n+ fake_path.read_text.return_value = \"\"\n+\n+ update_pyproject_version('20.04dev1', pyproject_toml_path=fake_path)\n+\n+ text = fake_path.write_text.call_args[0][0]\n+\n+ toml = tomlkit.parse(text)\n+\n+ self.assertEqual(toml['tool']['poetry']['version'], '20.4.dev1')\n+\n+ def test_empty_tool_section(self):\n+ fake_path_class = MagicMock(spec=Path)\n+ fake_path = fake_path_class.return_value\n+ fake_path.read_text.return_value = \"[tool]\"\n+\n+ update_pyproject_version('20.04dev1', pyproject_toml_path=fake_path)\n+\n+ text = fake_path.write_text.call_args[0][0]\n+\n+ toml = tomlkit.parse(text)\n+\n+ self.assertEqual(toml['tool']['poetry']['version'], '20.4.dev1')\n+\n+ def test_empty_tool_poetry_section(self):\n+ fake_path_class = MagicMock(spec=Path)\n+ fake_path = fake_path_class.return_value\n+ fake_path.read_text.return_value = \"[tool.poetry]\"\n+\n+ update_pyproject_version('20.04dev1', pyproject_toml_path=fake_path)\n+\n+ text = fake_path.write_text.call_args[0][0]\n+\n+ toml = tomlkit.parse(text)\n+\n+ self.assertEqual(toml['tool']['poetry']['version'], '20.4.dev1')\n+\n+ def test_override_existing_version(self):\n+ fake_path_class = MagicMock(spec=Path)\n+ fake_path = fake_path_class.return_value\n+ fake_path.read_text.return_value = '[tool.poetry]\\nversion = \"1.2.3\"'\n+\n+ update_pyproject_version('20.04dev1', pyproject_toml_path=fake_path)\n+\n+ text = fake_path.write_text.call_args[0][0]\n+\n+ toml = tomlkit.parse(text)\n+\n+ self.assertEqual(toml['tool']['poetry']['version'], '20.4.dev1')\ndiff --git a/verify-version.py b/tests/version/test_update_version_file.py\nsimilarity index 57%\nrename from verify-version.py\nrename to tests/version/test_update_version_file.py\n--- a/verify-version.py\n+++ b/tests/version/test_update_version_file.py\n@@ -16,32 +16,23 @@\n # You should have received a copy of the GNU General Public License\n # along with this program. If not, see <http://www.gnu.org/licenses/>.\n \n-import sys\n+import unittest\n \n-from gvm import get_version\n+from pathlib import Path\n+from unittest.mock import MagicMock\n \n+from gvm.version import update_version_file\n \n-def strip_version(version: str) -> str:\n- if not version:\n- return version\n \n- if version[0] == 'v':\n- return version[1:]\n+class UpdateVersionFileTestCase(unittest.TestCase):\n+ def test_update_version_file(self):\n+ fake_path_class = MagicMock(spec=Path)\n+ fake_path = fake_path_class.return_value\n \n+ update_version_file('22.04dev1', fake_path)\n \n-def main():\n- if len(sys.argv) < 2:\n- sys.exit('Missing argument for version.')\n- return\n+ text = fake_path.write_text.call_args[0][0]\n \n- p_version = strip_version(sys.argv[1])\n- version = get_version()\n- if p_version != version:\n- sys.exit(\n- \"Provided version: {} does not match the python-gvm \"\n- \"version: {}\".format(p_version, version)\n- )\n+ *_, version_line, _last_line = text.split('\\n')\n \n-\n-if __name__ == '__main__':\n- main()\n+ self.assertEqual(version_line, '__version__ = \"22.4.dev1\"')\ndiff --git a/tests/version/test_versions_equal.py b/tests/version/test_versions_equal.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/version/test_versions_equal.py\n@@ -0,0 +1,37 @@\n+# -*- coding: utf-8 -*-\n+# Copyright (C) 2020 Greenbone Networks GmbH\n+#\n+# SPDX-License-Identifier: GPL-3.0-or-later\n+#\n+# This program is free software: you can redistribute it and/or modify\n+# it under the terms of the GNU General Public License as published by\n+# the Free Software Foundation, either version 3 of the License, or\n+# (at your option) any later version.\n+#\n+# This program is distributed in the hope that it will be useful,\n+# but WITHOUT ANY WARRANTY; without even the implied warranty of\n+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n+# GNU General Public License for more details.\n+#\n+# You should have received a copy of the GNU General Public License\n+# along with this program. If not, see <http://www.gnu.org/licenses/>.\n+\n+import unittest\n+\n+from gvm.version import versions_equal\n+\n+\n+class VersionEqualTestCase(unittest.TestCase):\n+ def test_version_equal(self):\n+ self.assertTrue(versions_equal('1.2.3', '1.2.3'))\n+ self.assertTrue(versions_equal('1.2.3a', '1.2.3a0'))\n+ self.assertTrue(versions_equal('1.2.3a0', '1.2.3.a0'))\n+ self.assertTrue(versions_equal('1.2.3dev1', '1.2.3.dev1'))\n+\n+ def test_version_not_equal(self):\n+ self.assertFalse(versions_equal('1.2.3', '1.2'))\n+ self.assertFalse(versions_equal('1.2.3a', '1.2.3a1'))\n+ self.assertFalse(versions_equal('1.2.3a0', '1.2.3.a1'))\n+ self.assertFalse(versions_equal('1.2.3dev', '1.2.3dev1'))\n+ self.assertFalse(versions_equal('1.2.3dev', '1.2.3.dev1'))\n+ self.assertFalse(versions_equal('1.2.3.dev1', '1.2.3.dev2'))\n", "problem_statement": "", "hints_text": "", "created_at": "2020-04-01T13:55:00Z"}
PythonDataset/test/python-slack-sdk-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "slackapi/python-slack-sdk", "pull_number": 836, "instance_id": "slackapi__python-slack-sdk-836", "issue_numbers": "", "base_commit": "2defee516b105112af592dc545a376a893345002", "patch": "diff --git a/tutorial/PythOnBoardingBot/app.py b/tutorial/PythOnBoardingBot/app.py\n--- a/tutorial/PythOnBoardingBot/app.py\n+++ b/tutorial/PythOnBoardingBot/app.py\n@@ -1,7 +1,7 @@\n import os\n import logging\n from flask import Flask\n-from slack import WebClient\n+from slack_sdk.web import WebClient\n from slackeventsapi import SlackEventAdapter\n from onboarding_tutorial import OnboardingTutorial\n \ndiff --git a/tutorial/PythOnBoardingBot/async_app.py b/tutorial/PythOnBoardingBot/async_app.py\n--- a/tutorial/PythOnBoardingBot/async_app.py\n+++ b/tutorial/PythOnBoardingBot/async_app.py\n@@ -4,7 +4,8 @@\n import ssl as ssl_lib\n \n import certifi\n-import slack\n+from slack_sdk.web import WebClient\n+from slack_sdk.rtm import RTMClient\n \n from onboarding_tutorial import OnboardingTutorial\n \n@@ -15,7 +16,7 @@\n onboarding_tutorials_sent = {}\n \n \n-async def start_onboarding(web_client: slack.WebClient, user_id: str, channel: str):\n+async def start_onboarding(web_client: WebClient, user_id: str, channel: str):\n # Create a new onboarding tutorial.\n onboarding_tutorial = OnboardingTutorial(channel)\n \n@@ -39,7 +40,7 @@ async def start_onboarding(web_client: slack.WebClient, user_id: str, channel: s\n # ================ Team Join Event =============== #\n # When the user first joins a team, the type of the event will be 'team_join'.\n # Here we'll link the onboarding_message callback to the 'team_join' event.\n-@slack.RTMClient.run_on(event=\"team_join\")\n+@RTMClient.run_on(event=\"team_join\")\n async def onboarding_message(**payload):\n \"\"\"Create and send an onboarding welcome message to new users. Save the\n time stamp of this message so we can update this message in the future.\n@@ -62,7 +63,7 @@ async def onboarding_message(**payload):\n # When a users adds an emoji reaction to the onboarding message,\n # the type of the event will be 'reaction_added'.\n # Here we'll link the update_emoji callback to the 'reaction_added' event.\n-@slack.RTMClient.run_on(event=\"reaction_added\")\n+@RTMClient.run_on(event=\"reaction_added\")\n async def update_emoji(**payload):\n \"\"\"Update the onboarding welcome message after receiving a \"reaction_added\"\n event from Slack. Update timestamp for welcome message as well.\n@@ -91,7 +92,7 @@ async def update_emoji(**payload):\n # =============== Pin Added Events ================ #\n # When a users pins a message the type of the event will be 'pin_added'.\n # Here we'll link the update_pin callback to the 'reaction_added' event.\n-@slack.RTMClient.run_on(event=\"pin_added\")\n+@RTMClient.run_on(event=\"pin_added\")\n async def update_pin(**payload):\n \"\"\"Update the onboarding welcome message after receiving a \"pin_added\"\n event from Slack. Update timestamp for welcome message as well.\n@@ -120,7 +121,7 @@ async def update_pin(**payload):\n # ============== Message Events ============= #\n # When a user sends a DM, the event type will be 'message'.\n # Here we'll link the message callback to the 'message' event.\n-@slack.RTMClient.run_on(event=\"message\")\n+@RTMClient.run_on(event=\"message\")\n async def message(**payload):\n \"\"\"Display the onboarding welcome message after receiving a message\n that contains \"start\".\n@@ -143,7 +144,7 @@ async def message(**payload):\n slack_token = os.environ[\"SLACK_BOT_TOKEN\"]\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n- rtm_client = slack.RTMClient(\n+ rtm_client = RTMClient(\n token=slack_token, ssl=ssl_context, run_async=True, loop=loop\n )\n loop.run_until_complete(rtm_client.start())\ndiff --git a/tutorial/PythOnBoardingBot/onboarding_tutorial.py b/tutorial/PythOnBoardingBot/onboarding_tutorial.py\n--- a/tutorial/PythOnBoardingBot/onboarding_tutorial.py\n+++ b/tutorial/PythOnBoardingBot/onboarding_tutorial.py\n@@ -1,9 +1,6 @@\n class OnboardingTutorial:\n \"\"\"Constructs the onboarding message and stores the state of which tasks were completed.\"\"\"\n \n- # TODO: Create a better message builder:\n- # https://github.com/slackapi/python-slackclient/issues/392\n- # https://github.com/slackapi/python-slackclient/pull/400\n WELCOME_BLOCK = {\n \"type\": \"section\",\n \"text\": {\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2020-10-07T06:40:16Z"}
PythonDataset/test/python-sshpubkeys-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "ojarva/python-sshpubkeys", "pull_number": 39, "instance_id": "ojarva__python-sshpubkeys-39", "issue_numbers": "", "base_commit": "2a3cce903ddedddd6f0f3cb231730e8c7f875430", "patch": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n \n setup(\n name='sshpubkeys',\n- version='2.2.0',\n+ version='2.3.0',\n description='SSH public key parser',\n long_description=long_description,\n url='https://github.com/ojarva/python-sshpubkeys',\n@@ -37,7 +37,7 @@\n keywords='ssh pubkey public key openssh ssh-rsa ssh-dss ssh-ed25519',\n packages=[\"sshpubkeys\"],\n test_suite=\"tests\",\n- install_requires=['pycrypto>=2.6', 'ecdsa>=0.13'],\n+ install_requires=['cryptography>=2.1.4', 'ecdsa>=0.13'],\n \n extras_require={\n 'dev': ['twine', 'wheel'],\ndiff --git a/sshpubkeys/keys.py b/sshpubkeys/keys.py\n--- a/sshpubkeys/keys.py\n+++ b/sshpubkeys/keys.py\n@@ -22,8 +22,9 @@\n import sys\n import warnings\n import ecdsa\n-\n-from Crypto.PublicKey import RSA, DSA\n+from cryptography.hazmat.backends import default_backend\n+from cryptography.hazmat.primitives.asymmetric.dsa import DSAPublicNumbers, DSAParameterNumbers\n+from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers\n \n from .exceptions import * # pylint:disable=wildcard-import,unused-wildcard-import\n \n@@ -43,7 +44,7 @@ class SSHKey(object): # pylint:disable=too-many-instance-attributes\n DSA_MIN_LENGTH_STRICT = 1024\n DSA_MAX_LENGTH_STRICT = 1024\n DSA_MIN_LENGTH_LOOSE = 1\n- DSA_MAX_LENGTH_LOOSE = 16384\n+ DSA_MAX_LENGTH_LOOSE = 3072\n \n DSA_N_LENGTH = 160\n \n@@ -274,8 +275,8 @@ def _process_ssh_rsa(self, data):\n unpacked_e = self._parse_long(raw_e)\n unpacked_n = self._parse_long(raw_n)\n \n- self.rsa = RSA.construct((unpacked_n, unpacked_e))\n- self.bits = self.rsa.size() + 1\n+ self.rsa = RSAPublicNumbers(unpacked_e, unpacked_n).public_key(default_backend())\n+ self.bits = self.rsa.key_size\n \n if self.strict_mode:\n min_length = self.RSA_MIN_LENGTH_STRICT\n@@ -297,10 +298,8 @@ def _process_ssh_dss(self, data):\n current_position, value = self._unpack_by_int(data, current_position)\n data_fields[item] = self._parse_long(value)\n \n- self.dsa = DSA.construct((data_fields[\"y\"], data_fields[\"g\"], data_fields[\"p\"], data_fields[\"q\"]))\n- self.bits = self.dsa.size() + 1\n-\n q_bits = self._bits_in_number(data_fields[\"q\"])\n+ p_bits = self._bits_in_number(data_fields[\"p\"])\n if q_bits != self.DSA_N_LENGTH:\n raise InvalidKeyError(\"Incorrect DSA key parameters: bits(p)=%s, q=%s\" % (self.bits, q_bits))\n if self.strict_mode:\n@@ -309,10 +308,15 @@ def _process_ssh_dss(self, data):\n else:\n min_length = self.DSA_MIN_LENGTH_LOOSE\n max_length = self.DSA_MAX_LENGTH_LOOSE\n- if self.bits < min_length:\n- raise TooShortKeyError(\"%s key can not be shorter than %s bits (was %s)\" % (self.key_type, min_length, self.bits))\n- if self.bits > max_length:\n- raise TooLongKeyError(\"%s key data can not be longer than %s bits (was %s)\" % (self.key_type, max_length, self.bits))\n+ if p_bits < min_length:\n+ raise TooShortKeyError(\"%s key can not be shorter than %s bits (was %s)\" % (self.key_type, min_length, p_bits))\n+ if p_bits > max_length:\n+ raise TooLongKeyError(\"%s key data can not be longer than %s bits (was %s)\" % (self.key_type, max_length, p_bits))\n+\n+ dsa_parameters = DSAParameterNumbers(data_fields[\"p\"], data_fields[\"q\"], data_fields[\"g\"])\n+ self.dsa = DSAPublicNumbers(data_fields[\"y\"], dsa_parameters).public_key(default_backend())\n+ self.bits = self.dsa.key_size\n+\n return current_position\n \n def _process_ecdsa_sha(self, data):\n", "test_patch": "diff --git a/tests/invalid_keys.py b/tests/invalid_keys.py\n--- a/tests/invalid_keys.py\n+++ b/tests/invalid_keys.py\n@@ -7,7 +7,8 @@\n [\"ssh-dss AAAAB3NzaC1yc2EAAAADAQABAAAEAgDGrGaNv7i+sGSelzf+7JsCECa9a0sqSg8q4foGkjeV6RkS2tWvKXoT9rICjEdXXodj0CCVhe/V7dmAO0AK8KM0mcvPfTSC8zH1ZBsqaFFTWwmBD01fbH9axrrg3hM0f+AL4bMMWUdxdNrVo90s8PKU6k/HmUNLVx4gC6uQ4A6YczvOVZkuJ4f7HDYK/v1LNTRNeAkw94YpSIZVAoTOZN943+fRCE9cm155pwmFsS+wfzK9+jjhGXNEK0xooiVBRwQM7qetN076vV5FiiM0LO1qYi5JrIqK/70ske86x2mMhMkOe6jqQQbt32PFVmYqYJWcAYXz+bhcQw6oru0c6gNq53aGOnuqI0uh/zV2XH+cN4c8ABcOplzH5YQEUepNVzxylkvpWxdg/ZzR1pvyu5C8RkJWrE3AlCwpix1ak2xTDzgc3rwTTggNSYqvzmYq0mYJhZk2VWsLVxUgdxfwC3LvIHMXSTU9iU2Aqrlhy7bJAqxQFKWy05wsIOI6raPBLqZnPmJ76Ld9aXTrhBFfIDiigr9ZVsVAdOvmyAGCIj4x3Xnlol/3lN0M2+OSV1SU/5ZrS6dIlXPZDak/OXHU0iIFIODhYU5r8EI1M6BI/jsgQ8HatXmOJkfnIkVP0HxD1KvoAFKjVG5sM9KG12LqsnzfD1KL6PzxpOOgoVgznpOjSzVmPKAkU8N/r6R4VIAmZqxpF8Hlzqg/Gfh5kf6CJXXx8OQt1Z/DAsfnl3LvHFNuE8GgXgrUE022W9pV4oONgojc97JSgBXaFkK885UnJKTceAdGQvChEhsU1j3TiyKPox6ICGpoC2nGONJoDE8VQ8dE/YiZmqkZ1lJWX07EwevrIcnz1UBHFaR72aiAADRYClsitLA5+1mnydVstkQ8XQuuKNOFT7miaWUzRHwj9BYGb7oGhNd9oi1VTVjH/5Yq1UiHHESGaIjeLi5uG2KguDFpcvy2ngtUy3ZbvDj+DVOLL+3vAlycRPjN0nBE4e/J6UqdpLg0DbG56zNj86aU0ZgL8kL8NRkFHyV+5zG5iLFkGklbm4nwCxSW/bVT0PFD1is6JbtIk5i+liS+hiuzSF6NGouSuxDy95yWSG8/84fgPDFtvXtOD7Kl4P7EpEAL+VBZnremT9I8tRl1wOHxJKe7jbEcWC2zkuHNlju0Nv5SFijF9c+krRbHDYEzsxPpdqlI4gPtDFdkKwaKN6BrsxBsz9u+PhS1AloUYcxKRqWbqHuDBrKmxnhOgFqJ9ITX0RajtrApt1LfkSBXcFrVEx2nhQkGa6VwjcX/zw2I2iuJFOCQmc9udHIlyaCCSe1PqOIbOlOk5h/Gl1QvRNwSIgf9dZ05lZr6dc+VX8YGdyHsjQ==\", InvalidTypeError, \"rsa_key_no_match\", [\"loose\", \"strict\"]],\n [\"ssh-dss AAAAB3NzaC1kc3MAAACAwQBAoKCAoECggSAgAOAAwOCg4AEAIMBAgMBAYSBg4GBggKAgwOBAwEDBAMCgwECg4GCBAAEhAABhIOCBIIEAQECg4GEAoQEgQQBgAGEAYQEAgCDAoCDgoIDBICAAAOCgwGDhAAAAwQEBIKCAYEBA4ODg4EDggSAgAGEgAQCAAOAAAAAgAAAAAAAAAAAAAAAAAACEAYKAgICCAIMAAYCBAYICAoAAAACAgKEgQCEhIKDgISCAAIDgwGEgASEg4GAgwQDAoCEAAEDBICDhAAABACCgIACBAIBAwOEgQEAA4KBBAOAAIIDAIQAgoGBgYGCgYCCAYEEgoEEgAABAIACAIAChAKCgoKAhIMDAgMEBIIDgAQEgwCDAAMBA4KAgACBAgCCgIGBAoGAAAACAwKAgAIBgIIBhAIBgQSAAgMAAQICggQDg4SCgoACgASCAwKCAYSAAoMCAYAEAoKBgoQEBIAAgICBAoMDBAMEgYIAgYKDggSDAoQDgQEAgAOBgQAAggQCAoCAgwAEBIEDgYABAgOEAwECAQECAAQDAAECBIGBAgEBA4AAgQSEBAKA=\", InvalidKeyError, \"invalid_q_for_dsa\", [\"loose\", \"strict\"]],\n [\"ssh-dss AAAAB3NzaC1kc3MAAACAgIBAAIBgQJAwgJCAQCBwICAwYFAwcEBQMBBwUHAgcCAAkEAQQECQkBAgkGAwcEAAkIAAMAAwAJAAcABQQCAwIHAwIIAwYAAwICCQkAAAUHAwIBCAMBBQcCAQAFBggEBAQDAgIHAwUJAwkFCQUDBwkIAQEGAwICAQADAwcJAAEJAAAAAgAAAAAAAAAAAAAAAAhIIAggODBAAEgwIAhIGChIICBAAAAACAwQJBAcECQQBAQIGBAEHBQEGBgYCAAQGCAIECQMHBwAIAAIDBAMIBgEEAQADBwIDAgcABAYHBwMDAgcABQgABwAHAAkBBwYAAwYJBwgAAwQFCQICAQgFAQICAgAIAwQHBQYJBQADAgAIAgMHBwkJAQEFBAMGAQAGBQQHAggBAQIA=\", MalformedDataError, \"missing_y_from_dsa\", [\"loose\", \"strict\"]],\n-\n+ ['ssh-dss AAAAB3NzaC1kc3MAAAIBANmIbHMDdrRAyd9rMdYoAAxxTIIke8xxS7txH16etsax8YlUEojvIEKRS/C5hulXw+fZYjdz0vjhTamoRo76ML2qz4X2lh96tJxoHVMVtXTdSDKfra4VuTp1mkYr6ycH9h7PEop+9WuB/Jx9ua/OCQ6cB6QePvg/XEaoKETHqrH7IilfJfBeLGgZc76q6+xjw5Qwtmu5nyImd17dWCa4ECdGw7NlbuuZHUs0/TfcJ/UHHuBgj1FKHEINBu3bcZdYKsyOm0VmxRw9Kkvh7qobBMXaNN9l3YvySTsSSbEWboK7jcFEC904i+hHxuYRa/K1x4v8SDaiOkxuJHGOCbfK/TaQdnyUBA2OEgt3MIuQGzc0xSXE5rEkLcjjkadkLA79Z/kajTNWzx54PT1qv6jP8kaa9OCzI+9aml+Lw2dtyDdBN3UDdSZj/823ABEm/zEgifziFrJbz32y3cdza9Jj/EcHCxDiN66Ghj2uLBERhXDxBWPZPoPlKeH+Bk/X/2CqA2H51RgLHtsVvqdrIWSXpAYYpGDaLnsG0yfsSrjGGIs6S9My+1ZSJPE0x568g6vbsTkuLJ70HwY4nhb/yuT0CuzPXsHnzKifzWMZldJki2KDowbJXye2ZJkz6PoknxC1ygT76AFxFvnZUHBMQQxrzV7j3op9jVhsvutyxueHB0efAAAAFQD8bGMGSx8iUNR+mdFoociqqhTuHQAAAgBEg7s1ZDHOuD04RlPNl9dMKOVZpDTdvVI0sKyWvWoxbImwQIBdE+DGZnrWD7/IxosU43J0hE+HcoADou2uJLGcxks8lcRBvdH4d4bVisT/+DsOwPuJKbHNwkV4De9ZmdGEvahQcHFI7KO9HNWWOWMl7opb6KUjRFYWkzF77vVRUw+FdZe4k3zrdHLrv3/+nLPuI7qxqyr+4HpntA4udx9mMVvKBL6ZgaDiXjET43NaFGmnIg9XXoiVj1xtk+dpFDc02gvskHr3zl2G79+FRBYHhMjzlJMrH85bmIIhGPZwFsCWBOupbeXTkDhzDBjcn5NYBhTHmpIcISv5N/anscs4ptUBnS9QbaZNaLTC/4IaNgLvI+iEvGxOml2uMHLu5njJENhB/T1nlDIiTMCk/8cfT8+Rnx/AUjdT3cOxnygYwHeFAc935EmPzh/li2CY+3p43Ihbd5V30MNWTvGGQzs4+7eCqOe+bhkh8EefCnh9yGCKkFZSH0JEoqkTr8842vXBhaQG4cT9e5UTKSkDigJdMb2Fp26jI3EdRbAWD46/Zp7CQkbKrQJxo7OzWUC7UY0oM2ka4jXvrf+SIL7/ith2gILOwzIH5MSM7A9AgQ6TdEvFQ7mQoWnCB2kTjB831qrPFKO21EP+iUrtCslDri2KfSw91l8Fv2Aro8A4pC7/wwAAAgB/hMtSA+nXw7691YNE7howbEEBdUS+5xP5KygfzSkruA6NvzQxC/V+6hHJ8i5oCgdhJ8KvVx2QeXe7M5QHDu3Tj3PWRi9oINwMlN1ZKRHY3FPoHBHje3uvL9hgOZUOqWTY7iuqEuX9TaL5Mu9dXsdBkjWuQYKfh1rnhnRWaH+ViPpQxv8ZMVQ0+tWSQWVpkM72lZIyZtgVOuHcRyImN1zHeps6ibNpfJ7UaHY1IA7DczWD0qGR8yawyhCVQ0KtbVVBscA18SO5c4VroJI/je9/ls61d190BAWEJnacEXHEtAuVJRbUWIzldlWy6NM4E+mg1R7Wy9FESxvviXUap2jAh+60og6TVvpgOHhw+BGc6pBgzkxhY46MvgOuOSr7V9ikk6h11IFTn2wHTB14NibS+fA8NOadAHB6OklY7pEMgadSgFC3kOzKpHeoEPosnHr7TgtiGwOhfAVlr/DbsgQnoQaMHdMAaJ1iTAMhwDZNpFSzgU5WjUnMQjydJKkDo/3OUksrMyEXR4kHt0vbmLBKmUVSX02MpQqEzXNPqxEbjfg7jYezZfjbsN36bD87BLpjZhlxkDECvTKPD45Af3di0Cams7tlc0RJxnj8TvMhG9NTjsIjaqLGCLZHeF0rnWE9nteKO8JLGBXgv/12nOUy66NwIbiTohTJvsIYaIyXCA==',InvalidKeyLengthError, \"too_long_dsa_4096\", [\"loose\", \"strict\"]],\n+ ['ssh-dss AAAAB3NzaC1kc3MAAAgBAP9jN4kCDNCxQ6hKKIswzKPtiP2HTA/yHDOe4Vs4xfXINs464QV3ah0gIgx4i0uNidZL07msPbXihrmJT97fPhT/6Qa9SsqKQ3k9fxJIe1CyEQ+/2dJtGO1uwrLkymTSqa+PyQJ8WS+bVEgYfmGGvGyxClYkK1Qcny0h9+k5GEHnUzVKe0giwWk2DGFHBZqjaskTNJtcgGVdm9jYbZDnSPC+ovJ04rTnzgPwHY/GqBSt/lk8MLidEqgZ5RFN7Ytlx1/hZIw1jpiQmQms9lrLVXN1PqA+WSoIPjhLKOUoG6HMBNvH3XSdA00dxRJ92F4QX+hGKUt/I5fLf2JluObjRqodfTua5iU254TTQ9XNk0JvVmty2FvJ+8E2MPd2VQ5KnTSghkn0JodBBxLoEbt9EatZn4wCzWw82rtvxzPnRS50fgw6UDbt4H9JEIlVOMKzxLY9C4G3+Es0qe7MlWzEEZsGQdFU9jjSBxj1Tg5Wz8ZzsCDZ9nZtAY9crhknFOj7/NAW56n+i1TZTmbEK0WtMO8Vjcl2MXOyQxo8PhAzshNBcX4KHcnkRjZOFGnKBAo02K9WUg1ATKvhGKhXOC6BGQ1a+7vdaqcZ+YeV9ByDK+TbnN5Jv8f2JvNzwFwIjTScmZ+nVY+NmANOEu71JDy4YzndpxodHjtT6ewT1oYXQGBi4w8XiC6R6tGTCLu3ho9dm7egeF2J1Llrf1HhHGTudYh+dbg3OoN9VLpYSA2erD3m5+2LQ2zzYmYoMWL0RUk00dPdosrkuoRTEYG6SXILFlXoar83VOhn01Kdh6mMzRFUvBX3wGFU5t7G9PR6KzPCreLpFDvuBmNrMwgkVspOhn4palbsbshm4zhfL7W1tovrfhkXk81LYYHj7Ir+uv0sDwOY2kgH/5RUxaz2SH5zQwbyc21PftDPIyNGvGBUm6Srn4slaeC9egHn/oKCZ0PZwb/FIBafl8I9QJ7rATX03Aq1QSF5/iNvJDeEZTuK6WzvW4CSHgDM+TfYJzUHo/YfUS8SQ1/HeES99rpB+7qdmmT5WSbN4Nr4+CGWEpdAnIknB6jaf8zZBDHUPOMnwjReHxIe/cWDBua/+PTwPGp5/314gKYz+iQJWI6WDHoX87YiWZNOq43QCxX8KRHfYB8e3Bg1ftEIz8bg0VrH1ucsnuJ3L31eoHGmgr3J2qIzctwA74zzNR+BparwxgHxKD21UzIHekmA0kkC5A1Pt7+HLW570WFphMMmh1xBpKEiP0R/h76NYkbfA1FYpsAVvwUGdLNI0qC/UrTAXpw1B6I0oyqH5hypityc0ys+LG2RSa9n0kyx6+EbtCTep5nzaBO53jTJCzqnUeloFW9fM76yWg7Sgm9Upt/g+Fw1XDpZZF+54EnRfLj6ajBRcDN+o9gQ9oBXGv4fiUtRipapEwdjlQyhoWe6Wt49pYyA8ai+ASK3DAuA6czVpbltlbVt7ouv4wawxLvFd+MapvH4R28Uyb2BM3Ceq9afY9mPOyWUv26BpBJcq6LRnYlz5asaw2nsJ6v5y8Y6POjKfAb6q9qvyNxVC8AtIesMLLheL3RBUFTY0Iz7B1oRCtLEjOi4KEc+440c7jrNUxPM4Gw9hZbosuCR2yzIafjRfvmPfmFgGVFrPTVW1pP8yKSdrpnYkZbUeCI+QENbqJHe55hB0kacCsLChNRuGUPg84F7spGlLV/l7f554kjoDgaI+rO3Vb59mDCnr1zGVzdsPCfeyHz+s9LL1+KwnXCX0DDJfET90axiPCZ36IIhaBdCRJJwkmxN6koHSMf6Jbcby6IWOuwJDjK/502Z59Isx7r6R7VLiomi4lkOfk/j8Sg6dmH1bb9jAXduQSSsofxRRFX2rRbcGr7HNWbWmat4a4X4a+2dRwm/JlHBtgiDeuWNSIz8laNJCnuFvbFds+sB4WczL4s1h/VAdMiMFXXwVfoyMAh2CsYPnsUbKTy4/J3AfIleX+5kdkipAed4a9EWU+auyIdqpSiNItv/ewQvbcnknbG82TmsG5whsO4A0D37OasbRE23P2w9rMqD6Wg8w5O4bKabX5AHdMOWn1kVNThsCsTar5eC9PIOMgic17Vofq4p31GE78VxS4tQlgzCFp3x9HxiFZGmkmJqBn+CaWIQxHaujRr4eQ1GVhmMgrxck+Oe0cAx7Cd1BWPHGRps1LO3EetG3JJE6qrGtkJmp4Dvq9abmKY1m3qXDxEJfMLFe6et+y7tPrmtsm/zMkpLFF8qQCHTetwWzkHzZcfyQMLdzJMb1IWmbn7KXS4AsU18KDfa2V7PZhiUn2fHMZFMjGdJmBs9LoX1aZ4Wf3UuTv5nU76xE3MXxQmVzM3itWs/yhmeFwkHFdA93Vlr/70uxiZSBf+Uz/9o6xJZi+urdPw9TBfYRvfHEP0Ifcl/TDCA5ZF9M/X2LGimGXRhlMIMXK+ng2sa9OTpYgiEzP8etpGK2kB8GvTdm5ymdUXm21uZ8Q6VjGViOohVz9V8R2pBrcNKQN6Z5eJ5eyR9tPyseye0OAXWFrkSztx+IrghbzalHC5udjkJdO7tYPJqpK0oxdLWTttJxw5a1ZprF4s7spvimGEGlXiBZbs2gMWY8UHj+tAwFZstdhGWeCYt9QaVP7pn5+FoROJy5Zmki2WWWaJAhwDn/Q7N0guGzvjpioprlMGgAfhGicyMZvbcLfNt+65Deozu9ztQoZpCiJRHZSUesbt7hF1fAAAAFQCwLEFVVTPnDmAyuuZ3DbxwYtiggQAACAEAp/cjRofm8fa1N+vcONr/FV1z0b9TabyWsjmw8OKFlYhvAZ+l9iHw2VYK8SR9IBOiUDAz4WsF01wT5UKNW1M4HGDoNDIawTqjBERb+81vbTr5lmU0pMaCcJD3Y1iku1ioNQsKV/tnRClNnQ4SvV+hmCwjEJGTFAISn1I5yn7WbwElC9BzpGH3OaShl+t8twsXx/H4uenegZIZBoRyrd7DlRo3epIcSsFx3xS3XAeu6yIcPcf9xONm87ZElLNEBQ+TqZq5w9reH4HFD0GgYq7ZcJ8Ux2zaPPnPEiCsPkZN0aVfOumM6qnrGijlEBOSuqmSPNoRboM3LSfgnSECgiLCVvbRlAIGqEIaCGbnw1p/DcprelAHf7bwSHfehjYf0FVTq7vvXFDz/vO2+8EojLbmhnE3ZFi/0opLa54X16Dcpe25prkIv92VIWLuMsPbHCazroNujejv3U+1jvQ4kJn+jATx5LSo1k9rlJtIQsxRvu5000Ac+HtBZq7dM8IahItp4ltEcHWUVhpQUhoRpz2NDqW4CgosKMnTwRKArbgEXy0wlbVtI3E9vQqvAcklaP4kaiLazJaOqNYP/V+WTCuan3weArOu+GC30QfCaSGtUDVj0TxHkGWjGW4l8rYnyktW1AtGGcHiVXM/aAV6WAAPzYHdJF3RFO79aJnVaXEbW38etFy5A2I2Va5zyJwMduIFKK5yLCG30B9BtG8NpPzcTd4cq7UeFcSQX79Fw/ifpZvzj3b4nT/JmudVDfimLSAgO5Q5oSbCHVHkZEvFzWBtue0KinoGs/ykqLmXN+q4BZIGucYsU/CNBmrgWPoXHAJEmHr3GzVH6lvAmuve5Ye1xco89CPHouUAezHuliJ6+7yXJ3+AP0y3zH3AcdLRIvz7yGDXKCD/m+VZpo8eq0WM0BWHT8Zze15Kf6iMLS60vHRanutOiLumh+AWQDwDc0abtG6tYOPGw5lKcmou/HuDOXJUyQYeba2l/Y761kVzVLC8FluqZovu7WaM9Wzl/Ofr/H4aNOMWwALvWDsd6wn+Hzc+wwpiZiGL59anNOZMecgye6nMZMoIIx8b2h0tR5TJffX2tCDwlGAudTCCU8Tna/+IzWytS7zUdQEE5UBfwi7pAeIfu4P/KlYP4LLocEN5kM+xxas1xM/MIQxXbvhr4LBq8paHl/Ed2LzjxLnix1C+i0REkyX7CADvsw8uvxDk3VqAVOO2hVyFOmtmaqex/lDa0ga486OJWw+i9Ei4Cxb/c8sno2C20ABjiHnXbZVKJWV7bSvMqgq4VBcDyhH9s/OUVIz3sdZw7BFok2RzQ8GtGAXNmDrtqUT3zGo+4NeYucIMnZ4zamhdz+krS+aMuNEcth1xsnX0omN4COMtIkIQKuPrYDfi220KGFUP4sUJ3zxkzVENFiF+c4bh6aMZrh096sZx+Sf+ZNWm/8ZpuOEJOuhpcvaGsLmbN2FjuyJoa0ZcsUi7w0NpClJzVHnnaXrWOmaVGAeNRQ0YIQHiXmAmlX6ZqCwiWD1rNJHkDunhwRDerexExdy7qkORMKwHQOenyy+L6YUflT4rj8lw9exHA6MpPpFvLCv9rN31sDO26WzI1i4Iekz8WGyMgsoY4a8iSrNFbh59dku+NmVc/MrQ3LCgXQ2NZTZwmGwxfQfRxDnBVy8aMZga9OQQ372GWPJeYnGpOSeTMQOidT6khTEoraY32Cq9jqQpCuRdAOjPSBu3uufxORjZHBFbCetT7khHwfsqQlcXpsDfy06leNbPT2tXKlcGzmq1zElTZq0Vdlx9dOX3NwrJBMmYJ8hR6TJeTfMPABwg8OqU/RPDAsKfC6QziDt+iTkH1JFkobrG9x3C48ydzB3FZi/6ulPO558kpA3xkVFDx1V7yQyXu+8z96qvgrpig84wzCQbVIAr4P6dVyPBGrkc6Z/mElo2wTRRM4bYoGdkBFA0H4mMXSDUPc4QbgAafbFiI9aS/dV6cx4LQrpp/XwpyMlhT9ZFF0pj3/6mev44h2qq/P2fGS2B6ntZZXrdQ5a7q3AZ4bz+ggvOC3qJHsUl9MD158ABf29rSzUU2YcP3C1snKNrMw1aWCN+aT9tWzb0WztupAeHqEqGxiWEPxZNaQkU/BEqL8QkzhmV3xuqxQ9Jc72HDptQJthIPtrHnDzGoj7x3pu24W/PbHa8iDZz8dHf8U068M0QsopuOSY59TwDKUkwsBrHbgglos/2D57duWkq+xo36LK+uiKenvoEGqry7Vgz8xEMpQqvyY/9m1H3J/NLQCeE/si/o2bUPNaBYIO6acjqCk+JDObhL1RDJeeN1h602hQ0r5+FqEJDwwZxllYBM2bPPMZVD51yPm1GcbXHWA0JTKuJyLqk0XtLiUokHCqCezLm4Uwr44lS3nqNOMyyG0GdBItJms1X1ZPSR6pnIwbc4OF9fCWJQMN1BNRFpoOpFxvsPhAi6Y8MgHrKxBtR4xUEWVXJ1oJIDhDRtXsBgOUmi0kPWMZ7yCaql30OlAq/rSpkIm4wTQhMQO7kJrRzB8qmvWGzjjC85aMxbXMnOYu0xYlhoeqspnrqUXGJcgB+4o0w/olkbEKp0XNSEWcB8DRVhvsUaK2hCc+DpQeg4te0Hf3SPhGbl2aKeFBl3TnYmpMa+2H87QK4+9Vhs29Gsd7MYmSNWby36lOmc6Y7m1QWnWdn7srFzhLppumIWKWjRdcWjewW0zGaPjdX/cNik/oAAAgBANkBODfTinYNlvuuzT5FGsG1ZSu1YWgbrPw959qGRpuBgMUtFsf1PKe5UM76P9EbTNBMPCt4k+PFFwtpEtLZKeQ3Ieng+XPvXGhMISa5aV/7mB+/JGY0dd3F9Vux15sNqM8mrCniGjCps+lE6nkm6suQJB2SVnmfdM4UJ6FnR+4yDlmc6eZVyz1Qk8gh5ujz6/o/scwEp5VfUspj9u34TP7bSzp1CDjsI//3c3rgoaMwIiFH7SVenxI4c7a3e0Yxj+NRDuOruswTNwSmcSAvhiro0ZboALQRirwCLA/0JUckWYOYd1WM5H7+PQZAuyFHXe/hK61fP8pjFf3hcG7lLfLEk6gki+u91l0ZXrWb2AgMMHxNf3myUqK1SbIGm3q05JbXfCPPk7rra26gPKa48b9J81AIWrVbS5AK7Pe1Hp+mmZaGFBgBcZR/5sZxkb5BNeyipI71Vtq4EW1duBeVQ++Z+1wgIg/NDvW1JS1TrCvvwBqcZ02KWrD3XKtnfICoXhAUCeof+QbxM36GVwnF4OUnxhV/7+SOyl3xh+nGW/28Ir9400UPWIhu2B5CPuW8SUelpWJG3Mkn13yLwBC1bcHspi3lSTdS8nETarAiBZqq5l0ZftqaJ5pRWayPtWI0oq4XNoQrAEW3lqtUDOPUXO/d7hJuYTNOigs3o6bAjzsj4VhGp6Q92icjR3neeRtshN76cb2hX+Hang+WYgme1r6OGhsifr0uHwslVuMYHyUAWEaKTylz9MzY8lqz9+gtKXFy1AcII0KnCMIYggH2QFHC9hnlXyWNkshne/euNj4tI7cxzZNyHsJp6zxqmfLI+JvpM3pVBz/ANJ/cQMbRQsGEuqgiPOP2Pbje4KMdRLDAFI43F4U9Bkjd/3Qe/6oK7/ZrQ/nfxGpdX97z0Ek4kfxdHu3HLZeegVZgz5s1rgZfAqAMnXoc7UEZHtKYicwiWg5Suzwhsbb8H4z6maJt/6o8tVHbxqSqYBszIdciBGYebQXvJDCCjIclNdHY224ejeEIB6zNm4lcin7yLxW2cuxoYsNeWUT3oBt1x3cvAjQdx+839yIONUKbTmUyKrcrIRYIp2jqdQQx+UqeyhflSkbySNCNv1V2YSy4WkbukzjQ/w9CNhYm01kHAZDDd5nDm5tkrDZJxKxwwqHazukaLOk0FgjnYtTieMrATNFS9H61u0NdYdkgLfJ8ap9qhJZsZ3I+LJOJAuu4rP5YCUrgRaWjAzhDINhfucorSHOu/qXs5wwc7MnhxaeAkhGMqm2V/t5GDo1sl5LkH47ER4jG3h2l+KDlYcH2AGyQfV9CQSt/Tyq5z6beXhWCouJZp1HhFVyZEwKXsVYxReWjz5h3dvAlENY9QFI1yeSWLzuJYdvr+WPiYbWvr2zDhQK5s7YEcbVLDZTI51gjL6emvEr3mg5g2PHAkYl2lPx5xRxexcAL9uk8JUGUeJCulso2RSK/hFYM2dG7FW+RcROW2H9xtVHtDSVxbyX520BB4oh9aaMLK7PK4gA3fyKaQBEzxUaKbpdpsU8lu066DV7xQIYVOG4HRl1RCBJgjFm8D/XiSYLUsa2iuF64zthagWkeRCLyr+MQ8MThL5SP3ryTYqxOsFiLIJGsI7zEdMuaoq2BI6N0KqTlb2iVa+BgOcFPE0QGe50ICkHXy3jda/5h1oH/TjvS7DsWoAcMQFZt+XP5KJOCz33uvVQrVsDDyCoCk5Iyj2jWzSxyO8lhYrdGUezY41sWaHQM4bbqARRrnZFymA5Bl3kBxfkKvtXATbIcBtR8OET5VxUeUaUfjvka2hoFSTf8JyqNUb8/iNie3jr1alo5/rXL8QaC2X3mJ3liCuyen1EoQvQqzbGuHg238+gVznlGjlGIMBw1MgNuvzOziTZd8Oh0NtIwMn4FMcMk6sx3EbYIHvfVaSkceAylMf5zpMXymuNvZzXccBtwkzMGT4i3XazdC9a9tdd5SzAMKrjkaviM9+Z8gA2SQgLUR75gozqeh9QsuOnOZxmkOB+YVfnRUej53cm5e7fbUm4a6pv2+r7/Xpi9GCXIeX6Sfdk5jmr9jTQ++7f5WeAhPPgGJEHkP1NhAldK5NpnBo5nYRpvxrNkAhk1sHlDoptdplbXaMoMjSgqk4MrWN3WBHmqtJkGC9RJmj+N2QZMqaXTd1X/LOzP0skHbZ4r/OI4cCOFdRWlTDKrCXdLLtSW6OMTyWj94feWw0i6nwXGzgFbhos4Jsr5+WrYxo1pVat8k+b4j5acvUdpL543IRV3fNJgTyGgn/maw7skokyK2jFXGkHfaE2E4cYlX6qohFNTs3YJbBBvw7NcPmBtAGnvjb6UpJPlB3Fl/bmSwilO7knFd+8EV8SfmXhyF2Izvj2he9ncxd0P9Q1MhRSGmnfsULj6bT4nvzIr9l61GUaaEN6A55g6s8px1xmnyXVSZcW1rP1df2P4RhlgbfQ7Q9beZbO8CRdOFRzAjLjn87K4QDZK3EyzfGz9ZqcDYjfesByqAptLY9s0yzdNdfFS7RqC5v9MklMJLgOYXHBYPLNJzrKjvbaKYEacA3vNwRvFOTxKJ8lha9Ir1o8hu4urWUE6Mu3GGttCmFenF7jVE6Qf4Fgpt8OKSk4cVXPP/2w+vpeAki2hCBNDhpVmOWVsc0uvibEQjoFB62TYkUeSR1e9P2bjCVsXei526CLAogC497trRRTYlQiviNjUcB+26JshoF1nmwuw', InvalidKeyLengthError, \"too_long_dsa_16384\", [\"loose\", \"strict\"]],\n [\"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAEAgDGrGaNv7i+sGSelzf+7JsCECa9a0sqSg8q4foGkjeV6RkS2tWvKXoT9rICjEdXXodj0CCVhe/V7dmAO0AK8KM0mcvPfTSC8zH1ZBsqaFFTWwmBD01fbH9axrrg3hM0f+AL4bMMWUdxdNrVo90s8PKU6k/HmUNLVx4gC6uQ4A6YczvOVZkuJ4f7HDYK/v1LNTRNeAkw94YpSIZVAoTOZN943+fRCE9cm155pwmFsS+wfzK9+jjhGXNEK0xooiVBRwQM7qetN076vV5FiiM0LO1qYi5JrIqK/70ske86x2mMhMkOe6jqQQbt32PFVmYqYJWcAYXz+bhcQw6oru0c6gNq53aGOnuqI0uh/zV2XH+cN4c8ABcOplzH5YQEUepNVzxylkvpWxdg/ZzR1pvyu5C8RkJWrE3AlCwpix1ak2xTDzgc3rwTTggNSYqvzmYq0mYJhZk2VWsLVxUgdxfwC3LvIHMXSTU9iU2Aqrlhy7bJAqxQFKWy05wsIOI6raPBLqZnPmJ76Ld9aXTrhBFfIDiigr9ZVsVAdOvmyAGCIj4x3Xnlol/3lN0M2+OSV1SU/5ZrS6dIlXPZDak/OXHU0iIFIODhYU5r8EI1M6BI/jsgQ8HatXmOJkfnIkVP0HxD1KvoAFKjVG5sM9KG12LqsnzfD1KL6PzxpOOgoVgznpOjSzVmPKAkU8N/r6R4VIAmZqxpF8Hlzqg/Gfh5kf6CJXXx8OQt1Z/DAsfnl3LvHFNuE8GgXgrUE022W9pV4oONgojc97JSgBXaFkK885UnJKTceAdGQvChEhsU1j3TiyKPox6ICGpoC2nGONJoDE8VQ8dE/YiZmqkZ1lJWX07EwevrIcnz1UBHFaR72aiAADRYClsitLA5+1mnydVstkQ8XQuuKNOFT7miaWUzRHwj9BYGb7oGhNd9oi1VTVjH/5Yq1UiHHESGaIjeLi5uG2KguDFpcvy2ngtUy3ZbvDj+DVOLL+3vAlycRPjN0nBE4e/J6UqdpLg0DbG56zNj86aU0ZgL8kL8NRkFHyV+5zG5iLFkGklbm4nwCxSW/bVT0PFD1is6JbtIk5i+liS+hiuzSF6NGouSuxDy95yWSG8/84fgPDFtvXtOD7Kl4P7EpEAL+VBZnremT9I8tRl1wOHxJKe7jbEcWC2zkuHNlju0Nv5SFijF9c+krRbHDYEzsxPpdqlI4gPtDFdkKwaKN6BrsxBsz9u+PhS1AloUYcxKRqWbqHuDBrKmxnhOgFqJ9ITX0RajtrApt1LfkSBXcFrVEx2nhQkGa6VwjcX/zw2I2iuJFOCQmc9udHIlyaCCSe1PqOIbOlOk5h/Gl1QvRNwSIgf9dZ05lZr6dc+VX8YGdyHsjQ=\", InvalidKeyError, \"broken_rsa_base64\", [\"loose\", \"strict\"]],\n \n [\"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAH0GODBKRjsFB/1v3pDRGpA6xR+QpOJg9vat0brlbUNA=\",\ndiff --git a/tests/valid_keys.py b/tests/valid_keys.py\n--- a/tests/valid_keys.py\n+++ b/tests/valid_keys.py\n@@ -23,18 +23,6 @@\n 'SHA256:sgF5bepZnGBURoJcg+ON18F1LwvAmnUYlHoJUgYZVKs',\n 'dsa_3072',\n [\"loose\"]],\n- ['ssh-dss AAAAB3NzaC1kc3MAAAIBANmIbHMDdrRAyd9rMdYoAAxxTIIke8xxS7txH16etsax8YlUEojvIEKRS/C5hulXw+fZYjdz0vjhTamoRo76ML2qz4X2lh96tJxoHVMVtXTdSDKfra4VuTp1mkYr6ycH9h7PEop+9WuB/Jx9ua/OCQ6cB6QePvg/XEaoKETHqrH7IilfJfBeLGgZc76q6+xjw5Qwtmu5nyImd17dWCa4ECdGw7NlbuuZHUs0/TfcJ/UHHuBgj1FKHEINBu3bcZdYKsyOm0VmxRw9Kkvh7qobBMXaNN9l3YvySTsSSbEWboK7jcFEC904i+hHxuYRa/K1x4v8SDaiOkxuJHGOCbfK/TaQdnyUBA2OEgt3MIuQGzc0xSXE5rEkLcjjkadkLA79Z/kajTNWzx54PT1qv6jP8kaa9OCzI+9aml+Lw2dtyDdBN3UDdSZj/823ABEm/zEgifziFrJbz32y3cdza9Jj/EcHCxDiN66Ghj2uLBERhXDxBWPZPoPlKeH+Bk/X/2CqA2H51RgLHtsVvqdrIWSXpAYYpGDaLnsG0yfsSrjGGIs6S9My+1ZSJPE0x568g6vbsTkuLJ70HwY4nhb/yuT0CuzPXsHnzKifzWMZldJki2KDowbJXye2ZJkz6PoknxC1ygT76AFxFvnZUHBMQQxrzV7j3op9jVhsvutyxueHB0efAAAAFQD8bGMGSx8iUNR+mdFoociqqhTuHQAAAgBEg7s1ZDHOuD04RlPNl9dMKOVZpDTdvVI0sKyWvWoxbImwQIBdE+DGZnrWD7/IxosU43J0hE+HcoADou2uJLGcxks8lcRBvdH4d4bVisT/+DsOwPuJKbHNwkV4De9ZmdGEvahQcHFI7KO9HNWWOWMl7opb6KUjRFYWkzF77vVRUw+FdZe4k3zrdHLrv3/+nLPuI7qxqyr+4HpntA4udx9mMVvKBL6ZgaDiXjET43NaFGmnIg9XXoiVj1xtk+dpFDc02gvskHr3zl2G79+FRBYHhMjzlJMrH85bmIIhGPZwFsCWBOupbeXTkDhzDBjcn5NYBhTHmpIcISv5N/anscs4ptUBnS9QbaZNaLTC/4IaNgLvI+iEvGxOml2uMHLu5njJENhB/T1nlDIiTMCk/8cfT8+Rnx/AUjdT3cOxnygYwHeFAc935EmPzh/li2CY+3p43Ihbd5V30MNWTvGGQzs4+7eCqOe+bhkh8EefCnh9yGCKkFZSH0JEoqkTr8842vXBhaQG4cT9e5UTKSkDigJdMb2Fp26jI3EdRbAWD46/Zp7CQkbKrQJxo7OzWUC7UY0oM2ka4jXvrf+SIL7/ith2gILOwzIH5MSM7A9AgQ6TdEvFQ7mQoWnCB2kTjB831qrPFKO21EP+iUrtCslDri2KfSw91l8Fv2Aro8A4pC7/wwAAAgB/hMtSA+nXw7691YNE7howbEEBdUS+5xP5KygfzSkruA6NvzQxC/V+6hHJ8i5oCgdhJ8KvVx2QeXe7M5QHDu3Tj3PWRi9oINwMlN1ZKRHY3FPoHBHje3uvL9hgOZUOqWTY7iuqEuX9TaL5Mu9dXsdBkjWuQYKfh1rnhnRWaH+ViPpQxv8ZMVQ0+tWSQWVpkM72lZIyZtgVOuHcRyImN1zHeps6ibNpfJ7UaHY1IA7DczWD0qGR8yawyhCVQ0KtbVVBscA18SO5c4VroJI/je9/ls61d190BAWEJnacEXHEtAuVJRbUWIzldlWy6NM4E+mg1R7Wy9FESxvviXUap2jAh+60og6TVvpgOHhw+BGc6pBgzkxhY46MvgOuOSr7V9ikk6h11IFTn2wHTB14NibS+fA8NOadAHB6OklY7pEMgadSgFC3kOzKpHeoEPosnHr7TgtiGwOhfAVlr/DbsgQnoQaMHdMAaJ1iTAMhwDZNpFSzgU5WjUnMQjydJKkDo/3OUksrMyEXR4kHt0vbmLBKmUVSX02MpQqEzXNPqxEbjfg7jYezZfjbsN36bD87BLpjZhlxkDECvTKPD45Af3di0Cams7tlc0RJxnj8TvMhG9NTjsIjaqLGCLZHeF0rnWE9nteKO8JLGBXgv/12nOUy66NwIbiTohTJvsIYaIyXCA==',\n- 4096,\n- 'MD5:ca:b0:46:93:9e:0b:25:73:50:29:85:48:a4:b0:64:a6',\n- 'SHA256:yYiuSoa3x+MDucep2aaFSb+rAULOPUB0WLll6ppyaws',\n- 'dsa_4096',\n- [\"loose\"]],\n- ['ssh-dss AAAAB3NzaC1kc3MAAAgBAP9jN4kCDNCxQ6hKKIswzKPtiP2HTA/yHDOe4Vs4xfXINs464QV3ah0gIgx4i0uNidZL07msPbXihrmJT97fPhT/6Qa9SsqKQ3k9fxJIe1CyEQ+/2dJtGO1uwrLkymTSqa+PyQJ8WS+bVEgYfmGGvGyxClYkK1Qcny0h9+k5GEHnUzVKe0giwWk2DGFHBZqjaskTNJtcgGVdm9jYbZDnSPC+ovJ04rTnzgPwHY/GqBSt/lk8MLidEqgZ5RFN7Ytlx1/hZIw1jpiQmQms9lrLVXN1PqA+WSoIPjhLKOUoG6HMBNvH3XSdA00dxRJ92F4QX+hGKUt/I5fLf2JluObjRqodfTua5iU254TTQ9XNk0JvVmty2FvJ+8E2MPd2VQ5KnTSghkn0JodBBxLoEbt9EatZn4wCzWw82rtvxzPnRS50fgw6UDbt4H9JEIlVOMKzxLY9C4G3+Es0qe7MlWzEEZsGQdFU9jjSBxj1Tg5Wz8ZzsCDZ9nZtAY9crhknFOj7/NAW56n+i1TZTmbEK0WtMO8Vjcl2MXOyQxo8PhAzshNBcX4KHcnkRjZOFGnKBAo02K9WUg1ATKvhGKhXOC6BGQ1a+7vdaqcZ+YeV9ByDK+TbnN5Jv8f2JvNzwFwIjTScmZ+nVY+NmANOEu71JDy4YzndpxodHjtT6ewT1oYXQGBi4w8XiC6R6tGTCLu3ho9dm7egeF2J1Llrf1HhHGTudYh+dbg3OoN9VLpYSA2erD3m5+2LQ2zzYmYoMWL0RUk00dPdosrkuoRTEYG6SXILFlXoar83VOhn01Kdh6mMzRFUvBX3wGFU5t7G9PR6KzPCreLpFDvuBmNrMwgkVspOhn4palbsbshm4zhfL7W1tovrfhkXk81LYYHj7Ir+uv0sDwOY2kgH/5RUxaz2SH5zQwbyc21PftDPIyNGvGBUm6Srn4slaeC9egHn/oKCZ0PZwb/FIBafl8I9QJ7rATX03Aq1QSF5/iNvJDeEZTuK6WzvW4CSHgDM+TfYJzUHo/YfUS8SQ1/HeES99rpB+7qdmmT5WSbN4Nr4+CGWEpdAnIknB6jaf8zZBDHUPOMnwjReHxIe/cWDBua/+PTwPGp5/314gKYz+iQJWI6WDHoX87YiWZNOq43QCxX8KRHfYB8e3Bg1ftEIz8bg0VrH1ucsnuJ3L31eoHGmgr3J2qIzctwA74zzNR+BparwxgHxKD21UzIHekmA0kkC5A1Pt7+HLW570WFphMMmh1xBpKEiP0R/h76NYkbfA1FYpsAVvwUGdLNI0qC/UrTAXpw1B6I0oyqH5hypityc0ys+LG2RSa9n0kyx6+EbtCTep5nzaBO53jTJCzqnUeloFW9fM76yWg7Sgm9Upt/g+Fw1XDpZZF+54EnRfLj6ajBRcDN+o9gQ9oBXGv4fiUtRipapEwdjlQyhoWe6Wt49pYyA8ai+ASK3DAuA6czVpbltlbVt7ouv4wawxLvFd+MapvH4R28Uyb2BM3Ceq9afY9mPOyWUv26BpBJcq6LRnYlz5asaw2nsJ6v5y8Y6POjKfAb6q9qvyNxVC8AtIesMLLheL3RBUFTY0Iz7B1oRCtLEjOi4KEc+440c7jrNUxPM4Gw9hZbosuCR2yzIafjRfvmPfmFgGVFrPTVW1pP8yKSdrpnYkZbUeCI+QENbqJHe55hB0kacCsLChNRuGUPg84F7spGlLV/l7f554kjoDgaI+rO3Vb59mDCnr1zGVzdsPCfeyHz+s9LL1+KwnXCX0DDJfET90axiPCZ36IIhaBdCRJJwkmxN6koHSMf6Jbcby6IWOuwJDjK/502Z59Isx7r6R7VLiomi4lkOfk/j8Sg6dmH1bb9jAXduQSSsofxRRFX2rRbcGr7HNWbWmat4a4X4a+2dRwm/JlHBtgiDeuWNSIz8laNJCnuFvbFds+sB4WczL4s1h/VAdMiMFXXwVfoyMAh2CsYPnsUbKTy4/J3AfIleX+5kdkipAed4a9EWU+auyIdqpSiNItv/ewQvbcnknbG82TmsG5whsO4A0D37OasbRE23P2w9rMqD6Wg8w5O4bKabX5AHdMOWn1kVNThsCsTar5eC9PIOMgic17Vofq4p31GE78VxS4tQlgzCFp3x9HxiFZGmkmJqBn+CaWIQxHaujRr4eQ1GVhmMgrxck+Oe0cAx7Cd1BWPHGRps1LO3EetG3JJE6qrGtkJmp4Dvq9abmKY1m3qXDxEJfMLFe6et+y7tPrmtsm/zMkpLFF8qQCHTetwWzkHzZcfyQMLdzJMb1IWmbn7KXS4AsU18KDfa2V7PZhiUn2fHMZFMjGdJmBs9LoX1aZ4Wf3UuTv5nU76xE3MXxQmVzM3itWs/yhmeFwkHFdA93Vlr/70uxiZSBf+Uz/9o6xJZi+urdPw9TBfYRvfHEP0Ifcl/TDCA5ZF9M/X2LGimGXRhlMIMXK+ng2sa9OTpYgiEzP8etpGK2kB8GvTdm5ymdUXm21uZ8Q6VjGViOohVz9V8R2pBrcNKQN6Z5eJ5eyR9tPyseye0OAXWFrkSztx+IrghbzalHC5udjkJdO7tYPJqpK0oxdLWTttJxw5a1ZprF4s7spvimGEGlXiBZbs2gMWY8UHj+tAwFZstdhGWeCYt9QaVP7pn5+FoROJy5Zmki2WWWaJAhwDn/Q7N0guGzvjpioprlMGgAfhGicyMZvbcLfNt+65Deozu9ztQoZpCiJRHZSUesbt7hF1fAAAAFQCwLEFVVTPnDmAyuuZ3DbxwYtiggQAACAEAp/cjRofm8fa1N+vcONr/FV1z0b9TabyWsjmw8OKFlYhvAZ+l9iHw2VYK8SR9IBOiUDAz4WsF01wT5UKNW1M4HGDoNDIawTqjBERb+81vbTr5lmU0pMaCcJD3Y1iku1ioNQsKV/tnRClNnQ4SvV+hmCwjEJGTFAISn1I5yn7WbwElC9BzpGH3OaShl+t8twsXx/H4uenegZIZBoRyrd7DlRo3epIcSsFx3xS3XAeu6yIcPcf9xONm87ZElLNEBQ+TqZq5w9reH4HFD0GgYq7ZcJ8Ux2zaPPnPEiCsPkZN0aVfOumM6qnrGijlEBOSuqmSPNoRboM3LSfgnSECgiLCVvbRlAIGqEIaCGbnw1p/DcprelAHf7bwSHfehjYf0FVTq7vvXFDz/vO2+8EojLbmhnE3ZFi/0opLa54X16Dcpe25prkIv92VIWLuMsPbHCazroNujejv3U+1jvQ4kJn+jATx5LSo1k9rlJtIQsxRvu5000Ac+HtBZq7dM8IahItp4ltEcHWUVhpQUhoRpz2NDqW4CgosKMnTwRKArbgEXy0wlbVtI3E9vQqvAcklaP4kaiLazJaOqNYP/V+WTCuan3weArOu+GC30QfCaSGtUDVj0TxHkGWjGW4l8rYnyktW1AtGGcHiVXM/aAV6WAAPzYHdJF3RFO79aJnVaXEbW38etFy5A2I2Va5zyJwMduIFKK5yLCG30B9BtG8NpPzcTd4cq7UeFcSQX79Fw/ifpZvzj3b4nT/JmudVDfimLSAgO5Q5oSbCHVHkZEvFzWBtue0KinoGs/ykqLmXN+q4BZIGucYsU/CNBmrgWPoXHAJEmHr3GzVH6lvAmuve5Ye1xco89CPHouUAezHuliJ6+7yXJ3+AP0y3zH3AcdLRIvz7yGDXKCD/m+VZpo8eq0WM0BWHT8Zze15Kf6iMLS60vHRanutOiLumh+AWQDwDc0abtG6tYOPGw5lKcmou/HuDOXJUyQYeba2l/Y761kVzVLC8FluqZovu7WaM9Wzl/Ofr/H4aNOMWwALvWDsd6wn+Hzc+wwpiZiGL59anNOZMecgye6nMZMoIIx8b2h0tR5TJffX2tCDwlGAudTCCU8Tna/+IzWytS7zUdQEE5UBfwi7pAeIfu4P/KlYP4LLocEN5kM+xxas1xM/MIQxXbvhr4LBq8paHl/Ed2LzjxLnix1C+i0REkyX7CADvsw8uvxDk3VqAVOO2hVyFOmtmaqex/lDa0ga486OJWw+i9Ei4Cxb/c8sno2C20ABjiHnXbZVKJWV7bSvMqgq4VBcDyhH9s/OUVIz3sdZw7BFok2RzQ8GtGAXNmDrtqUT3zGo+4NeYucIMnZ4zamhdz+krS+aMuNEcth1xsnX0omN4COMtIkIQKuPrYDfi220KGFUP4sUJ3zxkzVENFiF+c4bh6aMZrh096sZx+Sf+ZNWm/8ZpuOEJOuhpcvaGsLmbN2FjuyJoa0ZcsUi7w0NpClJzVHnnaXrWOmaVGAeNRQ0YIQHiXmAmlX6ZqCwiWD1rNJHkDunhwRDerexExdy7qkORMKwHQOenyy+L6YUflT4rj8lw9exHA6MpPpFvLCv9rN31sDO26WzI1i4Iekz8WGyMgsoY4a8iSrNFbh59dku+NmVc/MrQ3LCgXQ2NZTZwmGwxfQfRxDnBVy8aMZga9OQQ372GWPJeYnGpOSeTMQOidT6khTEoraY32Cq9jqQpCuRdAOjPSBu3uufxORjZHBFbCetT7khHwfsqQlcXpsDfy06leNbPT2tXKlcGzmq1zElTZq0Vdlx9dOX3NwrJBMmYJ8hR6TJeTfMPABwg8OqU/RPDAsKfC6QziDt+iTkH1JFkobrG9x3C48ydzB3FZi/6ulPO558kpA3xkVFDx1V7yQyXu+8z96qvgrpig84wzCQbVIAr4P6dVyPBGrkc6Z/mElo2wTRRM4bYoGdkBFA0H4mMXSDUPc4QbgAafbFiI9aS/dV6cx4LQrpp/XwpyMlhT9ZFF0pj3/6mev44h2qq/P2fGS2B6ntZZXrdQ5a7q3AZ4bz+ggvOC3qJHsUl9MD158ABf29rSzUU2YcP3C1snKNrMw1aWCN+aT9tWzb0WztupAeHqEqGxiWEPxZNaQkU/BEqL8QkzhmV3xuqxQ9Jc72HDptQJthIPtrHnDzGoj7x3pu24W/PbHa8iDZz8dHf8U068M0QsopuOSY59TwDKUkwsBrHbgglos/2D57duWkq+xo36LK+uiKenvoEGqry7Vgz8xEMpQqvyY/9m1H3J/NLQCeE/si/o2bUPNaBYIO6acjqCk+JDObhL1RDJeeN1h602hQ0r5+FqEJDwwZxllYBM2bPPMZVD51yPm1GcbXHWA0JTKuJyLqk0XtLiUokHCqCezLm4Uwr44lS3nqNOMyyG0GdBItJms1X1ZPSR6pnIwbc4OF9fCWJQMN1BNRFpoOpFxvsPhAi6Y8MgHrKxBtR4xUEWVXJ1oJIDhDRtXsBgOUmi0kPWMZ7yCaql30OlAq/rSpkIm4wTQhMQO7kJrRzB8qmvWGzjjC85aMxbXMnOYu0xYlhoeqspnrqUXGJcgB+4o0w/olkbEKp0XNSEWcB8DRVhvsUaK2hCc+DpQeg4te0Hf3SPhGbl2aKeFBl3TnYmpMa+2H87QK4+9Vhs29Gsd7MYmSNWby36lOmc6Y7m1QWnWdn7srFzhLppumIWKWjRdcWjewW0zGaPjdX/cNik/oAAAgBANkBODfTinYNlvuuzT5FGsG1ZSu1YWgbrPw959qGRpuBgMUtFsf1PKe5UM76P9EbTNBMPCt4k+PFFwtpEtLZKeQ3Ieng+XPvXGhMISa5aV/7mB+/JGY0dd3F9Vux15sNqM8mrCniGjCps+lE6nkm6suQJB2SVnmfdM4UJ6FnR+4yDlmc6eZVyz1Qk8gh5ujz6/o/scwEp5VfUspj9u34TP7bSzp1CDjsI//3c3rgoaMwIiFH7SVenxI4c7a3e0Yxj+NRDuOruswTNwSmcSAvhiro0ZboALQRirwCLA/0JUckWYOYd1WM5H7+PQZAuyFHXe/hK61fP8pjFf3hcG7lLfLEk6gki+u91l0ZXrWb2AgMMHxNf3myUqK1SbIGm3q05JbXfCPPk7rra26gPKa48b9J81AIWrVbS5AK7Pe1Hp+mmZaGFBgBcZR/5sZxkb5BNeyipI71Vtq4EW1duBeVQ++Z+1wgIg/NDvW1JS1TrCvvwBqcZ02KWrD3XKtnfICoXhAUCeof+QbxM36GVwnF4OUnxhV/7+SOyl3xh+nGW/28Ir9400UPWIhu2B5CPuW8SUelpWJG3Mkn13yLwBC1bcHspi3lSTdS8nETarAiBZqq5l0ZftqaJ5pRWayPtWI0oq4XNoQrAEW3lqtUDOPUXO/d7hJuYTNOigs3o6bAjzsj4VhGp6Q92icjR3neeRtshN76cb2hX+Hang+WYgme1r6OGhsifr0uHwslVuMYHyUAWEaKTylz9MzY8lqz9+gtKXFy1AcII0KnCMIYggH2QFHC9hnlXyWNkshne/euNj4tI7cxzZNyHsJp6zxqmfLI+JvpM3pVBz/ANJ/cQMbRQsGEuqgiPOP2Pbje4KMdRLDAFI43F4U9Bkjd/3Qe/6oK7/ZrQ/nfxGpdX97z0Ek4kfxdHu3HLZeegVZgz5s1rgZfAqAMnXoc7UEZHtKYicwiWg5Suzwhsbb8H4z6maJt/6o8tVHbxqSqYBszIdciBGYebQXvJDCCjIclNdHY224ejeEIB6zNm4lcin7yLxW2cuxoYsNeWUT3oBt1x3cvAjQdx+839yIONUKbTmUyKrcrIRYIp2jqdQQx+UqeyhflSkbySNCNv1V2YSy4WkbukzjQ/w9CNhYm01kHAZDDd5nDm5tkrDZJxKxwwqHazukaLOk0FgjnYtTieMrATNFS9H61u0NdYdkgLfJ8ap9qhJZsZ3I+LJOJAuu4rP5YCUrgRaWjAzhDINhfucorSHOu/qXs5wwc7MnhxaeAkhGMqm2V/t5GDo1sl5LkH47ER4jG3h2l+KDlYcH2AGyQfV9CQSt/Tyq5z6beXhWCouJZp1HhFVyZEwKXsVYxReWjz5h3dvAlENY9QFI1yeSWLzuJYdvr+WPiYbWvr2zDhQK5s7YEcbVLDZTI51gjL6emvEr3mg5g2PHAkYl2lPx5xRxexcAL9uk8JUGUeJCulso2RSK/hFYM2dG7FW+RcROW2H9xtVHtDSVxbyX520BB4oh9aaMLK7PK4gA3fyKaQBEzxUaKbpdpsU8lu066DV7xQIYVOG4HRl1RCBJgjFm8D/XiSYLUsa2iuF64zthagWkeRCLyr+MQ8MThL5SP3ryTYqxOsFiLIJGsI7zEdMuaoq2BI6N0KqTlb2iVa+BgOcFPE0QGe50ICkHXy3jda/5h1oH/TjvS7DsWoAcMQFZt+XP5KJOCz33uvVQrVsDDyCoCk5Iyj2jWzSxyO8lhYrdGUezY41sWaHQM4bbqARRrnZFymA5Bl3kBxfkKvtXATbIcBtR8OET5VxUeUaUfjvka2hoFSTf8JyqNUb8/iNie3jr1alo5/rXL8QaC2X3mJ3liCuyen1EoQvQqzbGuHg238+gVznlGjlGIMBw1MgNuvzOziTZd8Oh0NtIwMn4FMcMk6sx3EbYIHvfVaSkceAylMf5zpMXymuNvZzXccBtwkzMGT4i3XazdC9a9tdd5SzAMKrjkaviM9+Z8gA2SQgLUR75gozqeh9QsuOnOZxmkOB+YVfnRUej53cm5e7fbUm4a6pv2+r7/Xpi9GCXIeX6Sfdk5jmr9jTQ++7f5WeAhPPgGJEHkP1NhAldK5NpnBo5nYRpvxrNkAhk1sHlDoptdplbXaMoMjSgqk4MrWN3WBHmqtJkGC9RJmj+N2QZMqaXTd1X/LOzP0skHbZ4r/OI4cCOFdRWlTDKrCXdLLtSW6OMTyWj94feWw0i6nwXGzgFbhos4Jsr5+WrYxo1pVat8k+b4j5acvUdpL543IRV3fNJgTyGgn/maw7skokyK2jFXGkHfaE2E4cYlX6qohFNTs3YJbBBvw7NcPmBtAGnvjb6UpJPlB3Fl/bmSwilO7knFd+8EV8SfmXhyF2Izvj2he9ncxd0P9Q1MhRSGmnfsULj6bT4nvzIr9l61GUaaEN6A55g6s8px1xmnyXVSZcW1rP1df2P4RhlgbfQ7Q9beZbO8CRdOFRzAjLjn87K4QDZK3EyzfGz9ZqcDYjfesByqAptLY9s0yzdNdfFS7RqC5v9MklMJLgOYXHBYPLNJzrKjvbaKYEacA3vNwRvFOTxKJ8lha9Ir1o8hu4urWUE6Mu3GGttCmFenF7jVE6Qf4Fgpt8OKSk4cVXPP/2w+vpeAki2hCBNDhpVmOWVsc0uvibEQjoFB62TYkUeSR1e9P2bjCVsXei526CLAogC497trRRTYlQiviNjUcB+26JshoF1nmwuw',\n- 16384,\n- 'MD5:0c:21:bb:98:84:43:4f:e8:62:27:fe:dc:2a:51:48:36',\n- 'SHA256:3j/qJWuiGWbNRvtN+HZTXC6VuRmNsDHrLA1F+Ow8gbg',\n- 'valid_dsa_16384',\n- [\"loose\"]],\n ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAIAGVQSlWHuzZCaZOdUFTZHeLCZFmqK729sGT2Ymc36zhyV1MK8oPcUqsqCWX8HOYODOBv80tdjsSH7kbm1UGcv8FgzJuCmhVslozru/SGsuRJWwjLIyHYKXx/KT3jHngzL1tQwdBk5uqZx9pekQ9xnvbXkUzucf7LZ/8MvTdQJPSqPX/3KdwUw3eiQoVGMSeKunukSAs9jbtlex8SN2ubqsuBEMtY7YUD4zLSWzkQ26L+dEhmYr1+WGVYD7t16vQT/3WZsqa6MWHF6q0OJTojsHWc0TILYmeI8jQJ2uR64TjgmsEug7egbgoK1oBZwhChzdemI0reJ66VS01OwJxpVKKXlHPZlnjMwF4jvWTCE6vwBG/BjVHFyVNZ987XAJLmoP5TY53cnoFykyKbfd6Knwm7hBXrdBz+ZVYzVoDPewfkkYYiKh490GiWKLuKN35th5DMglNrXgtdeHZDm4VwAXPsWwMs/FXyIhPmq5M27HqLb/e4ELkrIf5XGJM+tVaQxUfvQU4/TWGwTqgd3V5k5gGWR8ekmpVWWspcnrzM4aks2vxSLuDUQOnyLJ9RYjVfMePZ0uctN298+Zf1QTLewAnbvDC//kiZmgy+Yt7Go8Eg56CY1lFrWHZ/LQNf/0j8VGlTUPTg9uYWFNj3VGkTXSGEco7XSOPFQCkvkzoVaAxWeiNm7ECIUkIBusAOEqhhzJfpOirlgXxbrpK40NXJGvAPMo82HLA48WLBG7RcpzIIk/BDdhOBsM90crljGNmCs3Y4KbQX6CaxTUAUtRt8ydDP9V7qNkAsWgDp3uIOT8HjMP9K8PBTarwnZziGBx+ZlgqdYkxeOgXMiLhNKZl2VlmAeS9ojfK7azpCd+b0MuwvBfkvI1BtbJph/1gtyLTXv4JSUbZurZVES9xGh9Wf6fX5MroZhQ9SZry6xzOpCK7SlJJTwSQKLzNby0hLGBs7S6ew/DCFfAZEa1SJrubX+y4ogW4AcdSo6wKW6XdlCXivT8bvSdQRAbU+eVWDAdbi8fvq5BQuxEU+qtoxX+eZHF3PFVJWoPlGtKanEHJa/LyAXtrRVFKh79HlK/0PgGurS4Eco2ZHOuFz48yTrxPQMrhetfwCU8yRed6Ocrw0yJ2P/QtSw4+/EPWT3eyTL/8EN/ZY/6mOAPWksScZjgwM/a+BpaZsM2IS0SUPRaFmyr2QaAgqM34B2muukx4J1nrGzNgdwGXwgHeTtHekRLTUTKpr0ZVMhNoz/Nu/1ypwr/oSN5mnn9JuFKzTnsPVhjgEpywPSYppJFltJfxo82Ya2bi/CdYfGD9+KPR3gdppjx6eUPgimvgYS5zr+HmINMZEba84+Zi2JNdTjiOSfHGdb/RvnY/4FX7sB2/rCzaRlIpM5kUlM8EvzvSAN2l6Gn2Kjau2e5hZKwVxIq8bUmwKkCw2bq3hlHWsnCClp9kIWtnV8xGKvd9dBEryEMDWM2DDjJenxGPifrOHgfwEbfHKWkQu1JfxEhmNpjtppkzooVkgs5Pcq9dlOjoDZziEGAiAeXRFDqvoP0hOViHYrV/I0SlIfZ+p9YxLNJo/6FNI3d+ifTzYB7GyCrqI+cR83qesi+XIaTBZXsVxGYFG1+fADy5DLhdeaHDx6638kPHTxUoZhHMYs443cJZTjsg8F7LH5diN69kh4IxEo0t6RpvaQx0gb/03N5jjyY7rNVy6QYAeS7b116IO68lzWwnOhWdDbgjMKC9Il0wtKEhlGirYum1gBC6cqR9SDOTwCtXsNwDllxU0VvLJu8Fwk/KAaxZwT6ZwIJQPD9LtXcpFsiZGX4mOW2n+AMachk6gKve0ZH0BNDQzcShSdIgWl2bOxd0R1XcDDCbcd0oQHhECrNe8Nx64ObhW38U2WOg8QYCGLVsys/afkKkado4Kw6zDOA0baROXNPup6gWesEgEfKsMqkcIYu9tEbB2JoCRwFgZorDP0VroJphscWYpVXNlMtavP/DgU6yiOVFZtg5HaBat1DREQzvrk8fLxd+jOAo6CwSXsQDC9ebXKFEXjlCD2igQUtFqV7Wz8HEyl6hA5shBWUSgdVKIsspRC9PeksJrlCPzx/5d9whBZzr8uaFaM7f20nhAgzIki7XSKlN0/a/nw8WUlQMbMx98n5LY0whtmj429k8zAI8jEIrVyCQjzEss2FKIuw836acH+XF/e501UGlIAoFvj4/OBKfx/+L68ujo7PUDPcuFlu8mZ2I6tohHKriJYeZcRryeT/zXpQs28AN1QWDfFDNSQGFkrUoLuMUYSjMx0ftb6LDw/Ilaz3/zJzz4PtECutPgKtrtqYxYyDVHbyn6fBiECtHnSXd3b9dp3iFI4t8VBFOEIcX01Mdgjvum5Pb6KxJf58pcUBQUeI+Bg1aHR+ojh5ZeqEYFr2ojdSD/0WehwUPF8IGCPVCaTKksh2yPyR174LDD05UoWvm8hc1CLhuuASq6xPrAXhcZlEl7zTJXWKD356j9OvLItEFQqolsIhS2m/W8pWzCPtY9je3bWyB+vzN3BquSuLoriIcZrF7FL7f+ZVGQDmNhIKGalojIOlzyRIHXKEV89gQHU88lWWAEc0MNP80Ag0/avrp35myUbWoP4Elkm3UjUvZHWOiWCDABEoaGlnexVgtQctJ42ZnQIztGp+hvgmezJWtqKrtfiIW6G2N+3O2pLoDubejswrG9k7OhtK358XF1YOxzIGyFPTvEODhe0Zv9 ojarva@ojar-laptop.local',\n 16383,\n 'MD5:b6:ff:d9:90:61:a7:73:77:49:cc:b1:41:ca:c1:3b:a5',\n", "problem_statement": "", "hints_text": "", "created_at": "2018-02-21T20:06:04Z"}
PythonDataset/test/quality-time-task-instances.jsonl.all ADDED
The diff for this file is too large to render. See raw diff
 
PythonDataset/test/respa-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "City-of-Helsinki/respa", "pull_number": 587, "instance_id": "City-of-Helsinki__respa-587", "issue_numbers": "", "base_commit": "d509424d79dc29954b9a717f124c5224227af02f", "patch": "diff --git a/resources/importer/kirjastot.py b/resources/importer/kirjastot.py\n--- a/resources/importer/kirjastot.py\n+++ b/resources/importer/kirjastot.py\n@@ -3,7 +3,7 @@\n import requests\n from django.conf import settings\n from django.db import transaction\n-from raven import Client\n+from sentry_sdk import capture_message\n from resources.models import Unit\n from typing import Dict, List\n from .base import Importer, register_importer\n@@ -59,10 +59,9 @@ def process_varaamo_libraries():\n problems.append(\" \".join([\"Failed data fetch on library: \", str(varaamo_unit)]))\n \n try:\n- if problems and settings.RAVEN_DSN:\n- # Report problems to Raven/Sentry\n- client = Client(settings.RAVEN_DSN)\n- client.captureMessage(\"\\n\".join(problems))\n+ if problems:\n+ # without Sentry, this will gracefully file the message to /dev/null\n+ capture_message(\"\\n\".join(problems))\n except AttributeError:\n pass\n \ndiff --git a/resources/importer/kirjastot_v2.py b/resources/importer/kirjastot_v2.py\n--- a/resources/importer/kirjastot_v2.py\n+++ b/resources/importer/kirjastot_v2.py\n@@ -12,7 +12,7 @@\n from resources.models import Unit, UnitIdentifier\n from .base import Importer, register_importer\n \n-from raven import Client\n+from sentry_sdk import capture_message\n \n from django.conf import settings\n \n@@ -72,13 +72,9 @@ def process_varaamo_libraries():\n print(\"Failed data fetch on library: \", varaamo_unit)\n problems.append(\" \".join([\"Failed data fetch on library: \", str(varaamo_unit)]))\n \n- try:\n- if problems and settings.RAVEN_DSN:\n- # Report problems to Raven/Sentry\n- client = Client(settings.RAVEN_DSN)\n- client.captureMessage(\"\\n\".join(problems))\n- except AttributeError:\n- pass\n+ # report problems to Sentry (will fail silently if not available)\n+ if problems:\n+ capture_message(\"\\n\".join(problems))\n \n \n @transaction.atomic\ndiff --git a/respa/settings.py b/respa/settings.py\n--- a/respa/settings.py\n+++ b/respa/settings.py\n@@ -2,15 +2,42 @@\n Django settings for respa project.\n \"\"\"\n \n-# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n import os\n+import subprocess\n import environ\n-import raven\n+import sentry_sdk\n+from sentry_sdk.integrations.django import DjangoIntegration\n from django.utils.translation import ugettext_lazy as _\n from django.core.exceptions import ImproperlyConfigured\n \n-\n root = environ.Path(__file__) - 2 # two folders back\n+# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n+BASE_DIR = root()\n+\n+# Location of the fallback version file, used when no repository is available.\n+# This is hardcoded as reading it from configuration does not really make\n+# sense. It is supposed to be a fallback after all.\n+VERSION_FILE = os.path.join(BASE_DIR, '../service_state/deployed_version')\n+\n+def get_git_revision_hash():\n+ \"\"\"\n+ We need a way to retrieve git revision hash for sentry reports\n+ \"\"\"\n+ try:\n+ # We are not interested in gits complaints, stderr -> null\n+ git_hash = subprocess.check_output(['git', 'describe', '--tags', '--long', '--always'], stderr=subprocess.DEVNULL, encoding='utf8')\n+ # First is \"git not found\", second is most likely \"no repository\"\n+ except (FileNotFoundError, subprocess.CalledProcessError):\n+ try:\n+ # fall back to hardcoded file location\n+ with open(VERSION_FILE) as f:\n+ git_hash = f.readline()\n+ except FileNotFoundError:\n+ git_hash = \"revision_not_available\"\n+\n+ return git_hash.rstrip()\n+\n+\n env = environ.Env(\n DEBUG=(bool, False),\n SECRET_KEY=(str, ''),\n@@ -47,8 +74,6 @@\n # reservation confirmation emails use this\n RESPA_IMAGE_BASE_URL = env('RESPA_IMAGE_BASE_URL')\n \n-BASE_DIR = root()\n-\n DEBUG_TOOLBAR_CONFIG = {\n 'RESULTS_CACHE_SIZE': 100,\n }\n@@ -117,12 +142,12 @@\n ]\n \n if env('SENTRY_DSN'):\n- RAVEN_CONFIG = {\n- 'dsn': env('SENTRY_DSN'),\n- 'environment': env('SENTRY_ENVIRONMENT'),\n- 'release': raven.fetch_git_sha(BASE_DIR),\n- }\n- INSTALLED_APPS.append('raven.contrib.django.raven_compat')\n+ sentry_sdk.init(\n+ dsn=env('SENTRY_DSN'),\n+ environment=env('SENTRY_ENVIRONMENT'),\n+ release=get_git_revision_hash(),\n+ integrations=[DjangoIntegration()]\n+ )\n \n MIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\ndiff --git a/respa_exchange/management/base.py b/respa_exchange/management/base.py\n--- a/respa_exchange/management/base.py\n+++ b/respa_exchange/management/base.py\n@@ -39,14 +39,6 @@ def configure_logging(logger=\"respa_exchange\", level=logging.INFO, handler=None)\n datefmt=logging.Formatter.default_time_format\n ))\n logger.addHandler(handler)\n- if hasattr(settings, 'RAVEN_CONFIG') and 'dsn' in settings.RAVEN_CONFIG:\n- from raven.handlers.logging import SentryHandler\n- from raven.conf import setup_logging\n-\n- sentry_handler = SentryHandler(settings.RAVEN_CONFIG['dsn'])\n- sentry_handler.setLevel(logging.ERROR)\n- logger.addHandler(sentry_handler)\n- setup_logging(sentry_handler)\n \n \n def select_resources(resources, selected_resources):\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2019-06-12T09:23:58Z"}
PythonDataset/test/ripe-atlas-sagan-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "RIPE-NCC/ripe-atlas-sagan", "pull_number": 81, "instance_id": "RIPE-NCC__ripe-atlas-sagan-81", "issue_numbers": "", "base_commit": "5a0919eaf98c1165a4f95ffa9cf0266477395388", "patch": "diff --git a/ripe/atlas/sagan/ssl.py b/ripe/atlas/sagan/ssl.py\n--- a/ripe/atlas/sagan/ssl.py\n+++ b/ripe/atlas/sagan/ssl.py\n@@ -15,28 +15,30 @@\n \n import logging\n import pytz\n-import re\n+import codecs\n \n from datetime import datetime\n-from dateutil.relativedelta import relativedelta\n \n try:\n- import OpenSSL\n+ from cryptography import x509\n+ from cryptography.hazmat.backends import openssl\n+ from cryptography.hazmat.primitives import hashes\n except ImportError:\n logging.warning(\n- \"pyOpenSSL is not installed, without it you cannot parse SSL \"\n+ \"cryptography module is not installed, without it you cannot parse SSL \"\n \"certificate measurement results\"\n )\n \n from .base import Result, ResultParseError, ParsingDict\n \n \n-class Certificate(ParsingDict):\n+OID_COUNTRY = \"2.5.4.6\"\n+OID_ORG = \"2.5.4.10\"\n+OID_COMMON_NAME = \"2.5.4.3\"\n+EXT_SAN = \"subjectAltName\"\n \n- TIME_FORMAT = \"%Y%m%d%H%M%SZ\"\n- TIME_REGEX = re.compile(\n- \"(\\d\\d\\d\\d)(\\d\\d)(\\d\\d)(\\d\\d)(\\d\\d)(\\d\\d)(\\+|\\-)(\\d\\d)(\\d\\d)\"\n- )\n+\n+class Certificate(ParsingDict):\n \n def __init__(self, data, **kwargs):\n \n@@ -49,6 +51,7 @@ def __init__(self, data, **kwargs):\n self.issuer_cn = None\n self.issuer_o = None\n self.issuer_c = None\n+\n self.valid_from = None\n self.valid_until = None\n \n@@ -58,32 +61,59 @@ def __init__(self, data, **kwargs):\n \n self.has_expired = None\n \n- # Clean up the certificate data and use OpenSSL to parse it\n- x509 = OpenSSL.crypto.load_certificate(\n- OpenSSL.crypto.FILETYPE_PEM,\n- data.replace(\"\\\\/\", \"/\").replace(\"\\n\\n\", \"\\n\")\n- )\n- subject = dict(x509.get_subject().get_components())\n- issuer = dict(x509.get_issuer().get_components())\n+ self.extensions = {}\n+\n+ cert = x509.load_pem_x509_certificate(data.encode(\"ascii\"), openssl.backend)\n \n- if x509 and subject and issuer:\n+ if cert:\n+ self.checksum_md5 = self._colonify(cert.fingerprint(hashes.MD5()))\n+ self.checksum_sha1 = self._colonify(cert.fingerprint(hashes.SHA1()))\n+ self.checksum_sha256 = self._colonify(cert.fingerprint(hashes.SHA256()))\n \n- self.subject_cn = self._string_from_dict_or_none(subject, b\"CN\")\n- self.subject_o = self._string_from_dict_or_none(subject, b\"O\")\n- self.subject_c = self._string_from_dict_or_none(subject, b\"C\")\n- self.issuer_cn = self._string_from_dict_or_none(issuer, b\"CN\")\n- self.issuer_o = self._string_from_dict_or_none(issuer, b\"O\")\n- self.issuer_c = self._string_from_dict_or_none(issuer, b\"C\")\n+ self.valid_from = pytz.utc.localize(cert.not_valid_before)\n+ self.valid_until = pytz.utc.localize(cert.not_valid_after)\n \n- self.checksum_md5 = x509.digest(\"md5\").decode()\n- self.checksum_sha1 = x509.digest(\"sha1\").decode()\n- self.checksum_sha256 = x509.digest(\"sha256\").decode()\n+ self.has_expired = self._has_expired()\n \n- self.has_expired = bool(x509.has_expired())\n+ self._add_extensions(cert)\n \n- self.valid_from = None\n- self.valid_until = None\n- self._process_validation_times(x509)\n+ if cert and cert.subject:\n+ self.subject_cn, self.subject_o, self.subject_c = \\\n+ self._parse_x509_name(cert.subject)\n+\n+ if cert and cert.issuer:\n+ self.issuer_cn, self.issuer_o, self.issuer_c = \\\n+ self._parse_x509_name(cert.issuer)\n+\n+ def _add_extensions(self, cert):\n+ for ext in cert.extensions:\n+ if ext.oid._name == EXT_SAN:\n+ self.extensions[EXT_SAN] = []\n+ for san in ext.value:\n+ self.extensions[EXT_SAN].append(san.value)\n+\n+ @staticmethod\n+ def _colonify(bytes):\n+ hex = codecs.getencoder(\"hex_codec\")(bytes)[0].decode(\"ascii\").upper()\n+ return \":\".join(a+b for a,b in zip(hex[::2], hex[1::2]))\n+\n+ @staticmethod\n+ def _parse_x509_name(name):\n+ cn = None\n+ o = None\n+ c = None\n+ for attr in name:\n+ if attr.oid.dotted_string == OID_COUNTRY:\n+ c = attr.value\n+ elif attr.oid.dotted_string == OID_ORG:\n+ o = attr.value\n+ elif attr.oid.dotted_string == OID_COMMON_NAME:\n+ cn = attr.value\n+ return cn, o, c\n+\n+ def _has_expired(self):\n+ now = pytz.utc.localize(datetime.utcnow())\n+ return self.valid_from <= now <= self.valid_until\n \n @property\n def cn(self):\n@@ -113,72 +143,6 @@ def country(self):\n def checksum(self):\n return self.checksum_sha256\n \n- def _process_validation_times(self, x509):\n- \"\"\"\n- PyOpenSSL uses a kooky date format that *usually* parses out quite\n- easily but on the off chance that it's not in UTC, a lot of work needs\n- to be done.\n- \"\"\"\n-\n- valid_from = x509.get_notBefore()\n- valid_until = x509.get_notAfter()\n-\n- try:\n- self.valid_from = pytz.UTC.localize(datetime.strptime(\n- valid_from.decode(),\n- self.TIME_FORMAT\n- ))\n- except ValueError:\n- self.valid_from = self._process_nonstandard_time(valid_from)\n-\n- try:\n- self.valid_until = pytz.UTC.localize(datetime.strptime(\n- valid_until.decode(),\n- self.TIME_FORMAT\n- ))\n- except ValueError:\n- self.valid_until = self._process_nonstandard_time(valid_until)\n-\n- def _process_nonstandard_time(self, string):\n- \"\"\"\n- In addition to `YYYYMMDDhhmmssZ`, PyOpenSSL can also use timestamps\n- in `YYYYMMDDhhmmss+hhmm` or `YYYYMMDDhhmmss-hhmm`.\n- \"\"\"\n-\n- match = re.match(self.TIME_REGEX, string)\n-\n- if not match:\n- raise ResultParseError(\n- \"Unrecognised time format: {s}\".format(s=string)\n- )\n-\n- r = datetime(\n- int(match.group(1)),\n- int(match.group(2)),\n- int(match.group(3)),\n- int(match.group(4)),\n- int(match.group(5)),\n- int(match.group(6)),\n- 0,\n- pytz.UTC\n- )\n- delta = relativedelta(\n- hours=int(match.group(8)),\n- minutes=int(match.group(9))\n- )\n- if match.group(7) == \"-\":\n- return r - delta\n- return r + delta\n-\n- @staticmethod\n- def _string_from_dict_or_none(dictionary, key):\n- \"\"\"\n- Created to make nice with the Python3 problem.\n- \"\"\"\n- if key not in dictionary:\n- return None\n- return dictionary[key].decode(\"UTF-8\")\n-\n \n class Alert(ParsingDict):\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,9 +12,8 @@\n \n tests_require = [\"nose\"]\n \n-# pyOpenSSL support is flaky on some systems (I'm looking at you Apple)\n if \"SAGAN_WITHOUT_SSL\" not in os.environ:\n- install_requires.append(\"pyOpenSSL\")\n+ install_requires.append(\"cryptography\")\n \n # Allow setup.py to be run from any path\n os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n", "test_patch": "diff --git a/tests/ssl.py b/tests/ssl.py\n--- a/tests/ssl.py\n+++ b/tests/ssl.py\n@@ -386,3 +386,9 @@ def test_alert():\n result = Result.get('{\"af\":4,\"cert\":[\"-----BEGIN CERTIFICATE-----\\\\nMIIFBTCCAu2gAwIBAgIDDLHHMA0GCSqGSIb3DQEBBQUAMHkxEDAOBgNVBAoTB1Jv\\\\nb3QgQ0ExHjAcBgNVBAsTFWh0dHA6Ly93d3cuY2FjZXJ0Lm9yZzEiMCAGA1UEAxMZ\\\\nQ0EgQ2VydCBTaWduaW5nIEF1dGhvcml0eTEhMB8GCSqGSIb3DQEJARYSc3VwcG9y\\\\ndEBjYWNlcnQub3JnMB4XDTEzMDEwNjE0MDA1NVoXDTEzMDcwNTE0MDA1NVowGDEW\\\\nMBQGA1UEAxQNKi5wcmV0aWNhbC5lZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC\\\\nAQoCggEBAMS+vX7gA8TvzFwxryFFRj1OyQjnW88GvfMuGhKJopalG1EB103oRsxi\\\\nMcXqwFZUicpqLKHW4lCHcRuhpKoZp8EOILnRAJRKFOjgIrcHQ02Xn4Lf/ewl601h\\\\n5qxqt1keU1P8j+u9m7zZN+vOoNlEKZ5SnZhysAAYqr/XIt1WY2cji/4GxjF+q1OH\\\\nIl5zddkIfnE52UbREKKlIakfFdj/c6GXqqsP2QTmm4x2HitCD964tZ06fA9BitQj\\\\nnnBXNhtm2MCuBIPBSq0/C7LREwmfnqxCFqE7iqEPNIQ2IT2D4Gh4c+nIZHqYKvCV\\\\nP3zh3aUaBj1o5Lo83IDdXCKAIiQRFMkCAwEAAaOB9jCB8zAMBgNVHRMBAf8EAjAA\\\\nMA4GA1UdDwEB/wQEAwIDqDA0BgNVHSUELTArBggrBgEFBQcDAgYIKwYBBQUHAwEG\\\\nCWCGSAGG+EIEAQYKKwYBBAGCNwoDAzAzBggrBgEFBQcBAQQnMCUwIwYIKwYBBQUH\\\\nMAGGF2h0dHA6Ly9vY3NwLmNhY2VydC5vcmcvMDEGA1UdHwQqMCgwJqAkoCKGIGh0\\\\ndHA6Ly9jcmwuY2FjZXJ0Lm9yZy9yZXZva2UuY3JsMDUGA1UdEQQuMCyCDSoucHJl\\\\ndGljYWwuZWWgGwYIKwYBBQUHCAWgDwwNKi5wcmV0aWNhbC5lZTANBgkqhkiG9w0B\\\\nAQUFAAOCAgEAycddS/c47eo0WVrFxpvCIJdfvn7CYdTPpXNSg0kjapkSjYuAkcmq\\\\nsrScUUGMBe6tfkmkdPTuNKwRVYNJ1Wi9EYaMvJ3CVw6x9O5mgktmu0ogbIXsivwI\\\\nTSzGDMWcb9Of85e/ALWpK0cWIugtWO0d6v3qMWfxlYfAaYu49pttOJQOjbXAAhfR\\\\njE5VOcDaIlWChG48jLAyCLsMwHlyLw8D5Myb9MfTs1XxgLESO9ZTSqGEqJw+BwTJ\\\\nstHk/oCHo9FL/Xv5MmFcNaTpqbB60duYJ+DLLX1QiRRfLJ18G7wEiEAm6H9egupQ\\\\nL9MhQQLJ4o60xTrCnpqGTXTSR16jiTm70bDB0+SU3xTpNwCzdigH6ceKbPIr0cO6\\\\no0ump598e2JNCPsXIc+XcbLDDFgYrlnl3YnK3J+K3LC7SWPMsYdDfe+Im880tNuW\\\\nOlnOCDpP8408KqCp4xm0DMznmThUM34/Ia+o8Q3NWNBfuaOsJ9aA+FmgobJhih9e\\\\nUr9x3ByRQXcW5Cs/AMtCikKWVPsx+IA5eeyt+1i+dKBWksO40B3ADsq1O5DRYYRa\\\\n+dwqdX/jduqZjbyHuFH04q28j4zVDviUBQEa9UQoDM3c82dILDjbYtZ+T28sPMTa\\\\nbMZdcMur9E+ovrS58lIKGCvDEPSUDXHzr0tpb4A13TTnxW6pclqUyJk=\\\\n-----END CERTIFICATE-----\"],\"dst_addr\":\"80.79.115.54\",\"dst_name\":\"pretical.ee\",\"dst_port\":\"https\",\"from\":\"77.95.64.18\",\"fw\":4480,\"method\":\"SSL\",\"msm_id\":1006864,\"prb_id\":517,\"src_addr\":\"77.95.64.18\",\"timestamp\":1362454627,\"type\":\"sslcert\",\"ver\":\"3.0\"}')\n assert(result.alert is None)\n assert(result.is_error is False)\n+\n+def test_san_extension():\n+ result = Result.get('{\"af\":4,\"cert\":[\"-----BEGIN CERTIFICATE-----\\nMIIH4jCCBsqgAwIBAgIIFaqhpQEYRXIwDQYJKoZIhvcNAQELBQAwSTELMAkGA1UE\\nBhMCVVMxEzARBgNVBAoTCkdvb2dsZSBJbmMxJTAjBgNVBAMTHEdvb2dsZSBJbnRl\\ncm5ldCBBdXRob3JpdHkgRzIwHhcNMTcwMzE2MDkzNzQyWhcNMTcwNjA4MDg1NDAw\\nWjBmMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwN\\nTW91bnRhaW4gVmlldzETMBEGA1UECgwKR29vZ2xlIEluYzEVMBMGA1UEAwwMKi5n\\nb29nbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAjgPs3rpA\\ntF2jQzXrVQ8x33EVHB3OIpj3GcwVf8U9qcPce0XuG97fHInb20U9Uw1b45ecNRtn\\nWLUw14/7+F4cvFJXHHsYaoUdBoeSJAcOy8ktgxvIEMk82KJwJlzWA7X7B459Fy1U\\nr8Dvu6dNFzhtu8eJs8bFOMJ/Wczjh8tylKXyWNMpotTbvAG3rGH+8fttmGXnztTB\\n3dwxxf6SEL6m4XGH7POxwH9+AKzIwV9PrkU4JM5U2YsGPHf6ao/w27gPVpO5sh3g\\nP9J/3jf8lXNwPZWSLCK5C2i7kz12ohaD7jlipVyw4nYLcEFPs27LwzjYa/YFU8VZ\\nreIcbazBmDsqBwIDAQABo4IErzCCBKswHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG\\nAQUFBwMCMIIDewYDVR0RBIIDcjCCA26CDCouZ29vZ2xlLmNvbYINKi5hbmRyb2lk\\nLmNvbYIWKi5hcHBlbmdpbmUuZ29vZ2xlLmNvbYISKi5jbG91ZC5nb29nbGUuY29t\\ngg4qLmdjcC5ndnQyLmNvbYIWKi5nb29nbGUtYW5hbHl0aWNzLmNvbYILKi5nb29n\\nbGUuY2GCCyouZ29vZ2xlLmNsgg4qLmdvb2dsZS5jby5pboIOKi5nb29nbGUuY28u\\nanCCDiouZ29vZ2xlLmNvLnVrgg8qLmdvb2dsZS5jb20uYXKCDyouZ29vZ2xlLmNv\\nbS5hdYIPKi5nb29nbGUuY29tLmJygg8qLmdvb2dsZS5jb20uY2+CDyouZ29vZ2xl\\nLmNvbS5teIIPKi5nb29nbGUuY29tLnRygg8qLmdvb2dsZS5jb20udm6CCyouZ29v\\nZ2xlLmRlggsqLmdvb2dsZS5lc4ILKi5nb29nbGUuZnKCCyouZ29vZ2xlLmh1ggsq\\nLmdvb2dsZS5pdIILKi5nb29nbGUubmyCCyouZ29vZ2xlLnBsggsqLmdvb2dsZS5w\\ndIISKi5nb29nbGVhZGFwaXMuY29tgg8qLmdvb2dsZWFwaXMuY26CFCouZ29vZ2xl\\nY29tbWVyY2UuY29tghEqLmdvb2dsZXZpZGVvLmNvbYIMKi5nc3RhdGljLmNugg0q\\nLmdzdGF0aWMuY29tggoqLmd2dDEuY29tggoqLmd2dDIuY29tghQqLm1ldHJpYy5n\\nc3RhdGljLmNvbYIMKi51cmNoaW4uY29tghAqLnVybC5nb29nbGUuY29tghYqLnlv\\ndXR1YmUtbm9jb29raWUuY29tgg0qLnlvdXR1YmUuY29tghYqLnlvdXR1YmVlZHVj\\nYXRpb24uY29tggsqLnl0aW1nLmNvbYIaYW5kcm9pZC5jbGllbnRzLmdvb2dsZS5j\\nb22CC2FuZHJvaWQuY29tghtkZXZlbG9wZXIuYW5kcm9pZC5nb29nbGUuY26CBGcu\\nY2+CBmdvby5nbIIUZ29vZ2xlLWFuYWx5dGljcy5jb22CCmdvb2dsZS5jb22CEmdv\\nb2dsZWNvbW1lcmNlLmNvbYIKdXJjaGluLmNvbYIKd3d3Lmdvby5nbIIIeW91dHUu\\nYmWCC3lvdXR1YmUuY29tghR5b3V0dWJlZWR1Y2F0aW9uLmNvbTBoBggrBgEFBQcB\\nAQRcMFowKwYIKwYBBQUHMAKGH2h0dHA6Ly9wa2kuZ29vZ2xlLmNvbS9HSUFHMi5j\\ncnQwKwYIKwYBBQUHMAGGH2h0dHA6Ly9jbGllbnRzMS5nb29nbGUuY29tL29jc3Aw\\nHQYDVR0OBBYEFHRy1woLF5IqQVubJZ5ZvXAjaJ0aMAwGA1UdEwEB/wQCMAAwHwYD\\nVR0jBBgwFoAUSt0GFhu89mi1dvWBtrtiGrpagS8wIQYDVR0gBBowGDAMBgorBgEE\\nAdZ5AgUBMAgGBmeBDAECAjAwBgNVHR8EKTAnMCWgI6Ahhh9odHRwOi8vcGtpLmdv\\nb2dsZS5jb20vR0lBRzIuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQAsoPR1jJz2adkK\\nTVXpGse/M3l+xKgmuZHpXzXkAiqE9wcsxXxCU3dEUmPBYYGRTqODNkOh9AMyGzIL\\nfrYh/zY9rhqJ2B26OunmxKFF9BmwRi2rp+Ksvg/+27F57Hyaq2phSaR8E7hRZcYR\\nYqCaNA5e1hialuB1g58mAvs38jxxV4bQhKzCKkBOxolhYbUEBEV4mQ14ODdSvAB0\\n8L1dMjk3+LEDB/hWdtpOOhtMbSPa1u7xJeM/Ip7+GV47lS3V6rUALDKz4ASNk8ih\\nX0ZmxPA1rabqNFutG8L/4HK2/ffO4bKEkHEdOQXC9B17n1x65fbLUbweDPDAzaow\\nrum/OChG\\n-----END CERTIFICATE-----\"],\"dst_addr\":\"193.0.6.158\",\"dst_name\":\"atlas.ripe.net\",\"dst_port\":\"443\",\"from\":\"86.82.178.27\",\"fw\":4760,\"lts\":133,\"method\":\"TLS\",\"msm_id\":14002,\"msm_name\":\"SSLCert\",\"prb_id\":10951,\"rt\":51.558465,\"src_addr\":\"192.168.180.22\",\"timestamp\":1490659208,\"ttc\":14.88238,\"type\":\"sslcert\",\"ver\":\"1.2\"}')\n+ ext = result.certificates[0].extensions\n+ assert(ext and len(ext['subjectAltName'])==54)\n+\n", "problem_statement": "", "hints_text": "", "created_at": "2017-04-03T11:57:18Z"}
PythonDataset/test/scrapy-pagestorage-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "scrapy-plugins/scrapy-pagestorage", "pull_number": 1, "instance_id": "scrapy-plugins__scrapy-pagestorage-1", "issue_numbers": "", "base_commit": "9987253e1cd90bd565a9f5e3f057cf8050cd34a0", "patch": "diff --git a/scrapy_pagestorage.py b/scrapy_pagestorage.py\n--- a/scrapy_pagestorage.py\n+++ b/scrapy_pagestorage.py\n@@ -4,8 +4,8 @@\n import os\n from cgi import parse_qsl\n \n-from hubstorage import ValueTooLarge\n-from hubstorage.utils import urlpathjoin\n+from scrapinghub.hubstorage import ValueTooLarge\n+from scrapinghub.hubstorage.utils import urlpathjoin\n from scrapy.exceptions import NotConfigured, IgnoreRequest\n from scrapy.utils.request import request_fingerprint\n from scrapy.http import TextResponse\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,7 +22,7 @@\n ],\n install_requires=[\n 'Scrapy>=1.0.3',\n- 'hubstorage>=0.21',\n+ 'scrapinghub>=1.9.0',\n 'scrapinghub-entrypoint-scrapy>=0.4',\n ],\n dependency_links=[\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2016-12-05T15:57:03Z"}
PythonDataset/test/seed-task-instances.jsonl.all ADDED
The diff for this file is too large to render. See raw diff
 
PythonDataset/test/sleap-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "talmolab/sleap", "pull_number": 120, "instance_id": "talmolab__sleap-120", "issue_numbers": "", "base_commit": "03a1e1c772578e9f1e0118329f8cd21cec11eb73", "patch": "diff --git a/sleap/gui/active.py b/sleap/gui/active.py\n--- a/sleap/gui/active.py\n+++ b/sleap/gui/active.py\n@@ -660,7 +660,7 @@ def run_active_learning_pipeline(\n \n if not skip_learning:\n timestamp = datetime.now().strftime(\"%y%m%d_%H%M%S\")\n- inference_output_path = os.path.join(save_dir, f\"{timestamp}.inference.json\")\n+ inference_output_path = os.path.join(save_dir, f\"{timestamp}.inference.h5\")\n \n # Create Predictor from the results of training\n predictor = Predictor(sleap_models=training_jobs,\ndiff --git a/sleap/gui/app.py b/sleap/gui/app.py\n--- a/sleap/gui/app.py\n+++ b/sleap/gui/app.py\n@@ -514,6 +514,9 @@ def importData(self, filename=None, do_load=True):\n labels = filename\n filename = None\n has_loaded = True\n+ elif filename.endswith(\".h5\"):\n+ labels = Labels.load_hdf5(filename, video_callback=gui_video_callback)\n+ has_loaded = True\n elif filename.endswith((\".json\", \".json.zip\")):\n labels = Labels.load_json(filename, video_callback=gui_video_callback)\n has_loaded = True\n@@ -526,7 +529,7 @@ def importData(self, filename=None, do_load=True):\n has_loaded = True\n \n if do_load:\n- Instance.drop_all_nan_points(labels.all_instances)\n+\n self.labels = labels\n self.filename = filename\n \n@@ -1268,10 +1271,14 @@ def openProject(self, first_open=False):\n def saveProject(self):\n if self.filename is not None:\n filename = self.filename\n+\n if filename.endswith((\".json\", \".json.zip\")):\n compress = filename.endswith(\".zip\")\n Labels.save_json(labels = self.labels, filename = filename,\n compress = compress)\n+ elif filename.endswith(\".h5\"):\n+ Labels.save_hdf5(labels = self.labels, filename = filename)\n+\n # Mark savepoint in change stack\n self.changestack_savepoint()\n # Redraw. Not sure why, but sometimes we need to do this.\n@@ -1301,8 +1308,15 @@ def saveProjectAs(self):\n self.changestack_savepoint()\n # Redraw. Not sure why, but sometimes we need to do this.\n self.plotFrame()\n+ elif filename.endswith(\".h5\"):\n+ Labels.save_hdf5(labels = self.labels, filename = filename)\n+ self.filename = filename\n+ # Mark savepoint in change stack\n+ self.changestack_savepoint()\n+ # Redraw. Not sure why, but sometimes we need to do this.\n+ self.plotFrame()\n else:\n- QMessageBox(text=f\"File not saved. Only .json currently implemented.\").exec_()\n+ QMessageBox(text=f\"File not saved. Try saving as json.\").exec_()\n \n def closeEvent(self, event):\n if not self.changestack_has_changes():\ndiff --git a/sleap/instance.py b/sleap/instance.py\n--- a/sleap/instance.py\n+++ b/sleap/instance.py\n@@ -7,22 +7,24 @@\n import numpy as np\n import h5py as h5\n import pandas as pd\n+import cattr\n \n from typing import Dict, List, Optional, Union, Tuple\n \n-from attr import __init__\n+from numpy.lib.recfunctions import structured_to_unstructured\n \n from sleap.skeleton import Skeleton, Node\n from sleap.io.video import Video\n-from sleap.util import attr_to_dtype\n \n import attr\n \n+try:\n+ from typing import ForwardRef\n+except:\n+ from typing import _ForwardRef as ForwardRef\n \n-# This can probably be a namedtuple but has been made a full class just in case\n-# we need more complicated functionality later.\n-@attr.s(auto_attribs=True, slots=True)\n-class Point:\n+\n+class Point(np.record):\n \"\"\"\n A very simple class to define a labelled point and any metadata associated with it.\n \n@@ -30,41 +32,98 @@ class Point:\n x: The horizontal pixel location of the point within the image frame.\n y: The vertical pixel location of the point within the image frame.\n visible: Whether point is visible in the labelled image or not.\n+ complete: Has the point been verified by the a user labeler.\n \"\"\"\n \n- x: float = attr.ib(default=math.nan, converter=float)\n- y: float = attr.ib(default=math.nan, converter=float)\n- visible: bool = True\n- complete: bool = False\n+ # Define the dtype from the point class attributes plus some\n+ # additional fields we will use to relate point to instances and\n+ # nodes.\n+ dtype = np.dtype(\n+ [('x', 'f8'),\n+ ('y', 'f8'),\n+ ('visible', '?'),\n+ ('complete', '?')])\n+\n+ def __new__(cls, x: float = math.nan, y: float = math.nan,\n+ visible: bool = True, complete: bool = False):\n+\n+ # HACK: This is a crazy way to instantiate at new Point but I can't figure\n+ # out how recarray does it. So I just use it to make matrix of size 1 and\n+ # index in to get the np.record/Point\n+ # All of this is a giant hack so that Point(x=2,y=3) works like expected.\n+ val = PointArray(1)\n+ val[0] = (x, y, visible, complete)\n+ val = val[0]\n+\n+ # val.x = x\n+ # val.y = y\n+ # val.visible = visible\n+ # val.complete = complete\n+\n+ return val\n \n def __str__(self):\n return f\"({self.x}, {self.y})\"\n \n- @classmethod\n- def dtype(cls):\n+ def isnan(self):\n \"\"\"\n- Get the compound numpy dtype of a point. This is very important for\n- serialization.\n+ Are either of the coordinates a NaN value.\n \n Returns:\n- The compound numpy dtype of the point\n+ True if x or y is NaN, False otherwise.\n \"\"\"\n- return attr_to_dtype(cls)\n-\n- def isnan(self):\n return math.isnan(self.x) or math.isnan(self.y)\n \n \n-@attr.s(auto_attribs=True, slots=True)\n+# This turns PredictedPoint into an attrs class. Defines comparators for\n+# us and generaly makes it behave better. Crazy that this works!\n+Point = attr.s(these={name: attr.ib()\n+ for name in Point.dtype.names},\n+ init=False)(Point)\n+\n+\n class PredictedPoint(Point):\n \"\"\"\n A predicted point is an output of the inference procedure. It has all\n the properties of a labeled point with an accompanying score.\n \n Args:\n+ x: The horizontal pixel location of the point within the image frame.\n+ y: The vertical pixel location of the point within the image frame.\n+ visible: Whether point is visible in the labelled image or not.\n+ complete: Has the point been verified by the a user labeler.\n score: The point level prediction score.\n \"\"\"\n- score: float = attr.ib(default=0.0, converter=float)\n+\n+ # Define the dtype from the point class attributes plus some\n+ # additional fields we will use to relate point to instances and\n+ # nodes.\n+ dtype = np.dtype(\n+ [('x', 'f8'),\n+ ('y', 'f8'),\n+ ('visible', '?'),\n+ ('complete', '?'),\n+ ('score', 'f8')])\n+\n+ def __new__(cls, x: float = math.nan, y: float = math.nan,\n+ visible: bool = True, complete: bool = False,\n+ score: float = 0.0):\n+\n+ # HACK: This is a crazy way to instantiate at new Point but I can't figure\n+ # out how recarray does it. So I just use it to make matrix of size 1 and\n+ # index in to get the np.record/Point\n+ # All of this is a giant hack so that Point(x=2,y=3) works like expected.\n+ val = PredictedPointArray(1)\n+ val[0] = (x, y, visible, complete, score)\n+ val = val[0]\n+\n+ # val.x = x\n+ # val.y = y\n+ # val.visible = visible\n+ # val.complete = complete\n+ # val.score = score\n+\n+ return val\n \n @classmethod\n def from_point(cls, point: Point, score: float = 0.0):\n@@ -78,7 +137,126 @@ def from_point(cls, point: Point, score: float = 0.0):\n Returns:\n A scored point based on the point passed in.\n \"\"\"\n- return cls(**{**attr.asdict(point), 'score': score})\n+ return cls(**{**Point.asdict(point), 'score': score})\n+\n+\n+# This turns PredictedPoint into an attrs class. Defines comparators for\n+# us and generaly makes it behave better. Crazy that this works!\n+PredictedPoint = attr.s(these={name: attr.ib()\n+ for name in PredictedPoint.dtype.names},\n+ init=False)(PredictedPoint)\n+\n+\n+class PointArray(np.recarray):\n+ \"\"\"\n+ PointArray is a sub-class of numpy recarray which stores\n+ Point objects as records.\n+ \"\"\"\n+\n+ _record_type = Point\n+\n+ def __new__(subtype, shape, buf=None, offset=0, strides=None,\n+ formats=None, names=None, titles=None,\n+ byteorder=None, aligned=False, order='C'):\n+\n+ dtype = subtype._record_type.dtype\n+\n+ if dtype is not None:\n+ descr = np.dtype(dtype)\n+ else:\n+ descr = np.format_parser(formats, names, titles, aligned, byteorder)._descr\n+\n+ if buf is None:\n+ self = np.ndarray.__new__(subtype, shape, (subtype._record_type, descr), order=order)\n+ else:\n+ self = np.ndarray.__new__(subtype, shape, (subtype._record_type, descr),\n+ buffer=buf, offset=offset,\n+ strides=strides, order=order)\n+ return self\n+\n+ def __array_finalize__(self, obj):\n+ \"\"\"\n+ Overide __array_finalize__ on recarray because it converting the dtype\n+ of any np.void subclass to np.record, we don't want this.\n+ \"\"\"\n+ pass\n+\n+ @classmethod\n+ def make_default(cls, size: int):\n+ \"\"\"\n+ Construct a point array of specific size where each value in the array\n+ is assigned the default values for a Point.\n+\n+ Args:\n+ size: The number of points to allocate.\n+\n+ Returns:\n+ A point array with all elements set to Point()\n+ \"\"\"\n+ p = cls(size)\n+ p[:] = cls._record_type()\n+ return p\n+\n+ def __getitem__(self, indx):\n+ obj = super(np.recarray, self).__getitem__(indx)\n+\n+ # copy behavior of getattr, except that here\n+ # we might also be returning a single element\n+ if isinstance(obj, np.ndarray):\n+ if obj.dtype.fields:\n+ obj = obj.view(type(self))\n+ #if issubclass(obj.dtype.type, numpy.void):\n+ # return obj.view(dtype=(self.dtype.type, obj.dtype))\n+ return obj\n+ else:\n+ return obj.view(type=np.ndarray)\n+ else:\n+ # return a single element\n+ return obj\n+\n+class PredictedPointArray(PointArray):\n+ \"\"\"\n+ PredictedPointArray is analogous to PointArray except for predicted\n+ points.\n+ \"\"\"\n+ _record_type = PredictedPoint\n+\n+ @classmethod\n+ def from_array(cls, a: PointArray):\n+ \"\"\"\n+ Convert a PointArray to a PredictedPointArray, use the default\n+ attribute values for PredictedPoints.\n+\n+ Args:\n+ a: The array to convert.\n+\n+ Returns:\n+ A PredictedPointArray with the same points as a.\n+ \"\"\"\n+ v = cls.make_default(len(a))\n+\n+ for field in Point.dtype.names:\n+ v[field] = a[field]\n+\n+ return v\n+\n+ @classmethod\n+ def to_array(cls, a: 'PredictedPointArray'):\n+ \"\"\"\n+ Convert a PredictedPointArray to a normal PointArray.\n+\n+ Args:\n+ a: The array to convert.\n+\n+ Returns:\n+ The converted array.\n+ \"\"\"\n+ v = PointArray.make_default(len(a))\n+\n+ for field in Point.dtype.names:\n+ v[field] = a[field]\n+\n+ return v\n \n \n @attr.s(slots=True, cmp=False)\n@@ -113,23 +291,30 @@ def matches(self, other: 'Track'):\n # attributes _frame and _point_array_cache after init. These are private variables\n # that are created in post init so they are not serialized.\n \n-@attr.s(auto_attribs=True)\n+@attr.s(cmp=False, slots=True)\n class Instance:\n \"\"\"\n The class :class:`Instance` represents a labelled instance of skeleton\n \n Args:\n skeleton: The skeleton that this instance is associated with.\n- points: A dictionary where keys are skeleton node names and values are _points.\n+ points: A dictionary where keys are skeleton node names and values are Point objects. Alternatively,\n+ a point array whose length and order matches skeleton.nodes\n track: An optional multi-frame object track associated with this instance.\n This allows individual animals/objects to be tracked across frames.\n from_predicted: The predicted instance (if any) that this was copied from.\n+ frame: A back reference to the LabeledFrame that this Instance belongs to.\n+ This field is set when Instances are added to LabeledFrame objects.\n \"\"\"\n \n- skeleton: Skeleton\n+ skeleton: Skeleton = attr.ib()\n track: Track = attr.ib(default=None)\n from_predicted: Optional['PredictedInstance'] = attr.ib(default=None)\n- _points: Dict[Node, Union[Point, PredictedPoint]] = attr.ib(default=attr.Factory(dict))\n+ _points: PointArray = attr.ib(default=None)\n+ frame: Union['LabeledFrame', None] = attr.ib(default=None)\n+\n+ # The underlying Point array type that this instances point array should be.\n+ _point_array_type = PointArray\n \n @from_predicted.validator\n def _validate_from_predicted_(self, attribute, from_predicted):\n@@ -147,39 +332,57 @@ def _validate_all_points(self, attribute, points):\n Raises:\n ValueError: If a point is associated with a skeleton node name that doesn't exist.\n \"\"\"\n- is_string_dict = set(map(type, self._points)) == {str}\n- if is_string_dict:\n- for node_name in points.keys():\n- if not self.skeleton.has_node(node_name):\n- raise KeyError(f\"There is no node named {node_name} in {self.skeleton}\")\n+ if type(points) is dict:\n+ is_string_dict = set(map(type, points)) == {str}\n+ if is_string_dict:\n+ for node_name in points.keys():\n+ if not self.skeleton.has_node(node_name):\n+ raise KeyError(f\"There is no node named {node_name} in {self.skeleton}\")\n+ elif isinstance(points, PointArray):\n+ if len(points) != len(self.skeleton.nodes):\n+ raise ValueError(\"PointArray does not have the same number of rows as skeleton nodes.\")\n \n def __attrs_post_init__(self):\n \n- # If the points dict is non-empty, validate it.\n- if self._points:\n- # Check if the dict contains all strings\n- is_string_dict = set(map(type, self._points)) == {str}\n+ if not self.skeleton:\n+ raise ValueError(\"No skeleton set for Instance\")\n \n- # Check if the dict contains all Node objects\n- is_node_dict = set(map(type, self._points)) == {Node}\n+ # If the user did not pass a points list initialize a point array for future\n+ # points.\n+ if self._points is None:\n \n- # If the user fed in a dict whose keys are strings, these are node names,\n- # convert to node indices so we don't break references to skeleton nodes\n- # if the node name is relabeled.\n- if self._points and is_string_dict:\n- self._points = {self.skeleton.find_node(name): point for name,point in self._points.items()}\n+ # Initialize an empty point array that is the size of the skeleton.\n+ self._points = self._point_array_type.make_default(len(self.skeleton.nodes))\n \n- if not is_string_dict and not is_node_dict:\n- raise ValueError(\"points dictionary must be keyed by either strings \" +\n- \"(node names) or Nodes.\")\n+ else:\n+\n+ if type(self._points) is dict:\n+ parray = self._point_array_type.make_default(len(self.skeleton.nodes))\n+ Instance._points_dict_to_array(self._points, parray, self.skeleton)\n+ self._points = parray\n+\n+ @staticmethod\n+ def _points_dict_to_array(points, parray, skeleton):\n+\n+ # Check if the dict contains all strings\n+ is_string_dict = set(map(type, points)) == {str}\n+\n+ # Check if the dict contains all Node objects\n+ is_node_dict = set(map(type, points)) == {Node}\n \n- # Create here in post init so it is not serialized by cattrs, wish there was better way\n- # This field keeps track of any labeled frame that this Instance has been associated with.\n- self.frame: Union[LabeledFrame, None] = None\n+ # If the user fed in a dict whose keys are strings, these are node names,\n+ # convert to node indices so we don't break references to skeleton nodes\n+ # if the node name is relabeled.\n+ if points and is_string_dict:\n+ points = {skeleton.find_node(name): point for name, point in points.items()}\n \n- # A variable used to cache a constructed points_array call to save on time. This is only\n- # used when cached=True is passed to points_array\n- self._points_array_cache: Union[np.array, None] = None\n+ if not is_string_dict and not is_node_dict:\n+ raise ValueError(\"points dictionary must be keyed by either strings \" +\n+ \"(node names) or Nodes.\")\n+\n+ # Get rid of the points dict and replace with equivalent point array.\n+ for node, point in points.items():\n+ parray[skeleton.node_to_index(node)] = point\n \n def _node_to_index(self, node_name):\n \"\"\"\n@@ -195,7 +398,7 @@ def _node_to_index(self, node_name):\n \n def __getitem__(self, node):\n \"\"\"\n- Get the _points associated with particular skeleton node or list of skeleton nodes\n+ Get the Points associated with particular skeleton node or list of skeleton nodes\n \n Args:\n node: A single node or list of nodes within the skeleton associated with this instance.\n@@ -213,14 +416,10 @@ def __getitem__(self, node):\n \n return ret_list\n \n- if isinstance(node, str):\n- node = self.skeleton.find_node(node)\n- if node in self.skeleton.nodes:\n- if not node in self._points:\n- self._points[node] = Point()\n-\n+ try:\n+ node = self._node_to_index(node)\n return self._points[node]\n- else:\n+ except ValueError:\n raise KeyError(f\"The underlying skeleton ({self.skeleton}) has no node '{node}'\")\n \n def __contains__(self, node):\n@@ -233,7 +432,17 @@ def __contains__(self, node):\n Returns:\n bool: True if the point with the node name specified has a point in this instance.\n \"\"\"\n- return node in self._points\n+\n+ if type(node) is Node:\n+ node = node.name\n+\n+ if node not in self.skeleton:\n+ return False\n+\n+ node_idx = self._node_to_index(node)\n+\n+ # If the points are nan, then they haven't been allocated.\n+ return not self._points[node_idx].isnan()\n \n def __setitem__(self, node, value):\n \n@@ -250,18 +459,20 @@ def __setitem__(self, node, value):\n for n, v in zip(node, value):\n self.__setitem__(n, v)\n else:\n- if isinstance(node,str):\n- node = self.skeleton.find_node(node)\n-\n- if node in self.skeleton.nodes:\n- self._points[node] = value\n- else:\n+ try:\n+ node_idx = self._node_to_index(node)\n+ self._points[node_idx] = value\n+ except ValueError:\n raise KeyError(f\"The underlying skeleton ({self.skeleton}) has no node '{node}'\")\n \n def __delitem__(self, node):\n \"\"\" Delete node key and points associated with that node. \"\"\"\n- # TODO: handle this case somehow?\n- pass\n+ try:\n+ node_idx = self._node_to_index(node)\n+ self._points[node_idx].x = math.nan\n+ self._points[node_idx].y = math.nan\n+ except ValueError:\n+ raise KeyError(f\"The underlying skeleton ({self.skeleton}) has no node '{node}'\")\n \n def matches(self, other):\n \"\"\"\n@@ -273,6 +484,9 @@ def matches(self, other):\n Returns:\n True if match, False otherwise.\n \"\"\"\n+ if type(self) is not type(other):\n+ return False\n+\n if list(self.points()) != list(other.points()):\n return False\n \n@@ -297,10 +511,10 @@ def nodes(self):\n Get the list of nodes that have been labelled for this instance.\n \n Returns:\n- A list of nodes that have been labelled for this instance.\n+ A tuple of nodes that have been labelled for this instance.\n \n \"\"\"\n- return tuple(self._points.keys())\n+ return tuple(self.skeleton.nodes[i] for i, point in enumerate(self._points) if not point.isnan())\n \n @property\n def nodes_points(self):\n@@ -311,42 +525,51 @@ def nodes_points(self):\n Returns:\n The instance's (node, point) tuple pairs for all labelled point.\n \"\"\"\n- names_to_points = {node: point for node, point in self._points.items()}\n+ names_to_points = dict(zip(self.nodes, self.points()))\n return names_to_points.items()\n \n- def points(self) -> Tuple:\n+ def points(self) -> Tuple[Point]:\n \"\"\"\n Return the list of labelled points, in order they were labelled.\n \n Returns:\n The list of labelled points, in order they were labelled.\n \"\"\"\n- return tuple(self._points.values())\n+ return tuple(point for point in self._points if not point.isnan())\n \n- def points_array(self, cached=False, invisible_as_nan=False) -> np.ndarray:\n+ def points_array(self, copy: bool = True,\n+ invisible_as_nan: bool = False,\n+ full: bool = False) -> np.ndarray:\n \"\"\"\n Return the instance's points in array form.\n \n Args:\n- cached: If True, use a cached version of the points data if available,\n- create cache if it doesn't exist. If False, recompute and cache each\n- call.\n+ copy: If True, the return a copy of the points array as an\n+ Nx2 ndarray where first column is x and second column is y.\n+ If False, return a view of the underlying recarray.\n+ invisible_as_nan: Should invisible points be marked as NaN.\n+ full: If True, return the raw underlying recarray with all attributes\n+ of the point, if not, return just the x and y coordinate. Assumes\n+ copy is False and invisible_as_nan is False.\n Returns:\n A Nx2 array containing x and y coordinates of each point\n as the rows of the array and N is the number of nodes in the skeleton.\n The order of the rows corresponds to the ordering of the skeleton nodes.\n Any skeleton node not defined will have NaNs present.\n \"\"\"\n- if not cached or (cached and self._points_array_cache is None):\n- pts = np.ndarray((len(self.skeleton.nodes), 2))\n- for i, n in enumerate(self.skeleton.nodes):\n- p = self._points.get(n, Point())\n- pts[i, 0] = p.x if p.visible or not invisible_as_nan else np.nan\n- pts[i, 1] = p.y if p.visible or not invisible_as_nan else np.nan\n \n- self._points_array_cache = pts\n+ if full:\n+ return self._points\n+\n+ if not copy and not invisible_as_nan:\n+ return self._points[['x', 'y']]\n+ else:\n+ parray = structured_to_unstructured(self._points[['x', 'y']])\n+\n+ if invisible_as_nan:\n+ parray[self._points.visible is False, :] = math.nan\n \n- return self._points_array_cache\n+ return parray\n \n @property\n def centroid(self) -> np.ndarray:\n@@ -355,250 +578,6 @@ def centroid(self) -> np.ndarray:\n centroid = np.nanmedian(points, axis=0)\n return centroid\n \n- @classmethod\n- def to_pandas_df(cls, instances: Union['Instance', List['Instance']], skip_nan:bool = True) -> pd.DataFrame:\n- \"\"\"\n- Given an instance or list of instances, generate a pandas DataFrame that contains\n- all of the data in normalized form.\n- Args:\n- instances: A single instance or list of instances.\n- skip_nan: Whether to drop points that have NaN values for x or y.\n-\n- Returns:\n- A pandas DataFrame that contains all of the isntance's points level data\n- in and normalized form. The columns of the DataFrame are:\n-\n- * id - A unique number for each row of the table.\n- * instanceId - a unique id for each unique instance.\n- * skeleton - the name of the skeleton that this point is a part of.\n- * node - A string specifying the name of the skeleton node that this point value corresponds.\n- * videoId - A string specifying the video that this instance is in.\n- * frameIdx - The frame number of the video that this instance occurs on.\n- * visible - Whether the point in this row for this instance is visible.\n- * x - The horizontal pixel position of this node for this instance.\n- * y - The vertical pixel position of this node for this instance.\n- \"\"\"\n-\n- # If this is a single instance, make it a list\n- if type(instances) is Instance:\n- instances = [instances]\n-\n- # Lets construct a list of dicts which will be records for the pandas data frame\n- records = []\n-\n- # Extract all the data from each instance and its points\n- id = 0\n- for instance_id, instance in enumerate(instances):\n-\n- # Get all the attributes from the instance except the points dict or from_predicted\n- irecord = {'id': id, 'instance_id': instance_id,\n- **attr.asdict(instance, filter=lambda attr, value: attr.name not in (\"_points\", \"from_predicted\"))}\n-\n- # Convert the skeleton to it's name\n- irecord['skeleton'] = irecord['skeleton'].name\n-\n- # FIXME: Do the same for the video\n-\n- for (node, point) in instance.nodes_points:\n-\n- # Skip any NaN points if the user has asked for it.\n- if skip_nan and (math.isnan(point.x) or math.isnan(point.y)):\n- continue\n-\n- precord = {'node': node.name, **attr.asdict(point)} # FIXME: save other node attributes?\n-\n- records.append({**irecord, **precord})\n-\n- id = id + 1\n-\n- # Construct a pandas data frame from this list of instances\n- if len(records) == 1:\n- df = pd.DataFrame.from_records(records, index=[0])\n- else:\n- df = pd.DataFrame.from_records(records)\n-\n- return df\n-\n- @classmethod\n- def save_hdf5(cls, file: Union[str, h5.File],\n- instances: Union['Instance', List['Instance']],\n- skip_nan: bool = True):\n- \"\"\"\n- Write the instance point level data to an HDF5 file and group. This\n- function writes the data to an HDF5 group not a dataset. Each\n- column of the data is a dataset. The datasets within the group\n- will be all the same length (the total number of points across all\n- instances). They are as follows:\n-\n- * id - A unique number for each row of the table.\n- * instanceId - a unique id for each unique instance.\n- * skeleton - the name of the skeleton that this point is a part of.\n- * node - A string specifying the name of the skeleton node that this point value corresponds.\n- * videoId - A string specifying the video that this instance is in.\n- * frameIdx - The frame number of the video that this instance occurs on.\n- * visible - Whether the point in this row for this instance is visible.\n- * x - The horizontal pixel position of this node for this instance.\n- * y - The vertical pixel position of this node for this instance.\n-\n- Args:\n- file: The HDF5 file to save the instance data to.\n- instances: A single instance or list of instances.\n- skip_nan: Whether to drop points that have NaN values for x or y.\n-\n- Returns:\n- None\n- \"\"\"\n-\n- # Make it into a list of length one if needed.\n- if type(instances) is Instance:\n- instances = [instances]\n-\n- if type(file) is str:\n- with h5.File(file) as _file:\n- Instance._save_hdf5(file=_file, instances=instances, skip_nan=skip_nan)\n- else:\n- Instance._save_hdf5(file=file, instances=instances, skip_nan=skip_nan)\n-\n- @classmethod\n- def _save_hdf5(cls, file: h5.File, instances: List['Instance'], skip_nan: bool = True):\n-\n- # Get all the unique skeleton objects in this list of instances\n- skeletons = {i.skeleton for i in instances}\n-\n- # First, lets save the skeletons to the file\n- Skeleton.save_all_hdf5(file=file, skeletons=list(skeletons))\n-\n- # Second, lets get the instance data as a pandas data frame.\n- df = cls.to_pandas_df(instances=instances, skip_nan=skip_nan)\n-\n- # If the group doesn't exists, create it, but do so with track order.\n- # If it does exists, leave it be.\n- if 'points' not in file:\n- group = file.create_group('points', track_order=True)\n- else:\n- group = file['points']\n-\n- # Write each column as a data frame.\n- for col in df:\n- vals = df[col].values\n- if col in group:\n- del group[col]\n-\n- # If the column are objects (should be strings), convert to dtype=S, strings as per\n- # h5py recommendations.\n- if vals.dtype == np.dtype('O'):\n- dtype = h5.special_dtype(vlen=str)\n- group.create_dataset(name=col, shape=vals.shape,\n- data=vals,\n- dtype=dtype,\n- compression=\"gzip\")\n- else:\n- group.create_dataset(name=col, shape=vals.shape,\n- data=vals, compression=\"gzip\")\n-\n- @classmethod\n- def load_hdf5(cls, file: Union[h5.File, str]) -> List['Instance']:\n- \"\"\"\n- Load instance data from an HDF5 dataset.\n-\n- Args:\n- file: The name of the HDF5 file or the open h5.File object.\n-\n- Returns:\n- A list of Instance objects.\n- \"\"\"\n-\n- if type(file) is str:\n- with h5.File(file) as _file:\n- return Instance._load_hdf5(_file)\n- else:\n- return Instance._load_hdf5(file)\n-\n- @classmethod\n- def _load_hdf5(self, file: h5.File):\n-\n- # First, get all the skeletons in the HDF5 file\n- skeletons = Skeleton.load_all_hdf5(file=file, return_dict=True)\n-\n- if 'points' not in file:\n- raise ValueError(\"No instance data found in dataset.\")\n-\n- group = file['points']\n-\n- # Next get a dict that contains all the datasets for the instance\n- # data.\n- records = {}\n- for key, dset in group.items():\n- records[key] = dset[...]\n-\n- # Convert to a data frame.\n- df = pd.DataFrame.from_dict(records)\n-\n- # Lets first create all the points, start by grabbing the Point columns, grab only\n- # columns that exist. This is just in case we are reading an older form of the dataset\n- # format and the fields don't line up.\n- point_cols = [f.name for f in attr.fields(Point)]\n- point_cols = list(filter(lambda x: x in group, point_cols))\n-\n- # Extract the points columns and convert dicts of keys and values.\n- points = df[[*point_cols]].to_dict('records')\n-\n- # Convert to points dicts to points objects\n- points = [Point(**args) for args in points]\n-\n- # Instance columns\n- instance_cols = [f.name for f in attr.fields(Instance)]\n- instance_cols = list(filter(lambda x: x in group, instance_cols))\n-\n- instance_records = df[[*instance_cols]].to_dict('records')\n-\n- # Convert skeletons references to skeleton objects\n- for r in instance_records:\n- r['skeleton'] = skeletons[r['skeleton']]\n-\n- instances: List[Instance] = []\n- curr_id = -1 # Start with an invalid instance id so condition is tripped\n- for idx, r in enumerate(instance_records):\n- if curr_id == -1 or curr_id != df['instance_id'].values[idx]:\n- curr_id = df['instance_id'].values[idx]\n- curr_instance = Instance(**r)\n- instances.append(curr_instance)\n-\n- # Add the point the instance\n- curr_instance[df['node'].values[idx]] = points[idx]\n-\n- return instances\n-\n- def drop_nan_points(self):\n- \"\"\"\n- Drop any points for the instance that are not completely specified.\n-\n- Returns:\n- None\n- \"\"\"\n- is_nan = []\n- for n, p in self._points.items():\n- if p.isnan():\n- is_nan.append(n)\n-\n- # Remove them\n- for n in is_nan:\n- self._points.pop(n, None)\n-\n- @classmethod\n- def drop_all_nan_points(cls, instances: List['Instance']):\n- \"\"\"\n- Call drop_nan_points on a list of Instances.\n-\n- Args:\n- instances: The list of instances to call drop_nan_points() on.\n-\n- Returns:\n- None\n- \"\"\"\n- for i in instances:\n- i.drop_nan_points()\n-\n @property\n def frame_idx(self) -> Union[None, int]:\n \"\"\"\n@@ -614,7 +593,7 @@ def frame_idx(self) -> Union[None, int]:\n return self.frame.frame_idx\n \n \n-@attr.s(auto_attribs=True)\n+@attr.s(cmp=False, slots=True)\n class PredictedInstance(Instance):\n \"\"\"\n A predicted instance is an output of the inference procedure. It is\n@@ -625,8 +604,12 @@ class PredictedInstance(Instance):\n \"\"\"\n score: float = attr.ib(default=0.0, converter=float)\n \n+ # The underlying Point array type that this instances point array should be.\n+ _point_array_type = PredictedPointArray\n+\n def __attrs_post_init__(self):\n super(PredictedInstance, self).__attrs_post_init__()\n+\n if self.from_predicted is not None:\n raise ValueError(\"PredictedInstance should not have from_predicted.\")\n \n@@ -645,14 +628,92 @@ def from_instance(cls, instance: Instance, score):\n Returns:\n A PredictedInstance for the given Instance.\n \"\"\"\n- kw_args = attr.asdict(instance, recurse=False)\n- kw_args['points'] = {key: PredictedPoint.from_point(val)\n- for key, val in kw_args['_points'].items()}\n- del kw_args['_points']\n+ kw_args = attr.asdict(instance, recurse=False, filter=lambda attr, value: attr.name != \"_points\")\n+ kw_args['points'] = PredictedPointArray.from_array(instance._points)\n kw_args['score'] = score\n return cls(**kw_args)\n \n \n+def make_instance_cattr():\n+ \"\"\"\n+ Create a cattr converter for handling Lists of Instances/PredictedInstances\n+\n+ Returns:\n+ A cattr converter with hooks registered for structuring and unstructuring\n+ Instances.\n+ \"\"\"\n+\n+ converter = cattr.Converter()\n+\n+ #### UNSTRUCTURE HOOKS\n+\n+ # JSON dump cant handle NumPy bools so convert them. These are present\n+ # in Point/PredictedPoint objects now since they are actually custom numpy dtypes.\n+ converter.register_unstructure_hook(np.bool_, bool)\n+\n+ converter.register_unstructure_hook(PointArray, lambda x: None)\n+ converter.register_unstructure_hook(PredictedPointArray, lambda x: None)\n+ def unstructure_instance(x: Instance):\n+\n+ # Unstructure everything but the points array and frame attribute\n+ d = {field.name: converter.unstructure(x.__getattribute__(field.name))\n+ for field in attr.fields(x.__class__)\n+ if field.name not in ['_points', 'frame']}\n+\n+ # Replace the point array with a dict\n+ d['_points'] = converter.unstructure({k: v for k, v in x.nodes_points})\n+\n+ return d\n+\n+ converter.register_unstructure_hook(Instance, unstructure_instance)\n+ converter.register_unstructure_hook(PredictedInstance, unstructure_instance)\n+\n+ ## STRUCTURE HOOKS\n+\n+ def structure_points(x, type):\n+ if 'score' in x.keys():\n+ return cattr.structure(x, PredictedPoint)\n+ else:\n+ return cattr.structure(x, Point)\n+\n+ converter.register_structure_hook(Union[Point, PredictedPoint], structure_points)\n+\n+ def structure_instances_list(x, type):\n+ inst_list = []\n+ for inst_data in x:\n+ if 'score' in inst_data.keys():\n+ inst = converter.structure(inst_data, PredictedInstance)\n+ else:\n+ inst = converter.structure(inst_data, Instance)\n+ inst_list.append(inst)\n+\n+ return inst_list\n+\n+ converter.register_structure_hook(Union[List[Instance], List[PredictedInstance]],\n+ structure_instances_list)\n+\n+ converter.register_structure_hook(ForwardRef('PredictedInstance'),\n+ lambda x, type: converter.structure(x, PredictedInstance))\n+\n+ # We can register structure hooks for point arrays that do nothing\n+ # because Instance can have a dict of points passed to it in place of\n+ # a PointArray\n+ def structure_point_array(x, t):\n+ if x:\n+ point1 = x[list(x.keys())[0]]\n+ if 'score' in point1.keys():\n+ return converter.structure(x, Dict[Node, PredictedPoint])\n+ else:\n+ return converter.structure(x, Dict[Node, Point])\n+ else:\n+ return {}\n+\n+ converter.register_structure_hook(PointArray, structure_point_array)\n+ converter.register_structure_hook(PredictedPointArray, structure_point_array)\n+\n+ return converter\n+\n+\n @attr.s(auto_attribs=True)\n class LabeledFrame:\n video: Video = attr.ib()\n@@ -786,4 +847,5 @@ def merge_frames(labeled_frames, video):\n # remove labeled frames with no instances\n labeled_frames = list(filter(lambda lf: len(lf.instances),\n labeled_frames))\n- return labeled_frames\n\\n+ return labeled_frames\n+\ndiff --git a/sleap/io/dataset.py b/sleap/io/dataset.py\n--- a/sleap/io/dataset.py\n+++ b/sleap/io/dataset.py\n@@ -17,6 +17,7 @@\n import attr\n import cattr\n import json\n+import rapidjson\n import shutil\n import tempfile\n import numpy as np\n@@ -35,10 +36,38 @@\n \n from sleap.skeleton import Skeleton, Node\n from sleap.instance import Instance, Point, LabeledFrame, \\\n- Track, PredictedPoint, PredictedInstance\n+ Track, PredictedPoint, PredictedInstance, \\\n+ make_instance_cattr, PointArray, PredictedPointArray\n from sleap.rangelist import RangeList\n from sleap.io.video import Video\n-from sleap.util import save_dict_to_hdf5\n+from sleap.util import uniquify\n+\n+\n+def json_loads(json_str: str):\n+ try:\n+ return rapidjson.loads(json_str)\n+ except:\n+ return json.loads(json_str)\n+\n+def json_dumps(d: Dict, filename: str = None):\n+ \"\"\"\n+ A simple wrapper around the JSON encoder we are using.\n+\n+ Args:\n+ d: The dict to write.\n+ f: The filename to write to.\n+\n+ Returns:\n+ None\n+ \"\"\"\n+ import codecs\n+ encoder = rapidjson\n+\n+ if filename:\n+ with open(filename, 'w') as f:\n+ encoder.dump(d, f, ensure_ascii=False)\n+ else:\n+ return encoder.dumps(d)\n \n \"\"\"\n The version number to put in the Labels JSON format.\n@@ -76,37 +105,50 @@ def __attrs_post_init__(self):\n \n # Add any videos that are present in the labels but\n # missing from the video list\n- self.videos = list(set(self.videos).union({label.video for label in self.labels}))\n+ if len(self.videos) == 0:\n+ self.videos = list({label.video for label in self.labels})\n \n # Ditto for skeletons\n- self.skeletons = list(set(self.skeletons).union({instance.skeleton\n- for label in self.labels\n- for instance in label.instances}))\n+ if len(self.skeletons) == 0:\n+ self.skeletons = list({instance.skeleton\n+ for label in self.labels\n+ for instance in label.instances})\n \n # Ditto for nodes\n- self.nodes = list(set(self.nodes).union({node for skeleton in self.skeletons for node in skeleton.nodes}))\n-\n- # Keep the tracks we already have\n- tracks = set(self.tracks)\n+ if len(self.nodes) == 0:\n+ self.nodes = list({node\n+ for skeleton in self.skeletons\n+ for node in skeleton.nodes})\n+\n+ # Ditto for tracks, a pattern is emerging here\n+ if len(self.tracks) == 0:\n+\n+ # Add tracks from any Instances or PredictedInstances\n+ tracks = {instance.track\n+ for frame in self.labels\n+ for instance in frame.instances\n+ if instance.track}\n+\n+ # Add tracks from any PredictedInstance referenced by instance\n+ # This fixes things when there's a referenced PredictionInstance\n+ # which is no longer in the frame.\n+ tracks = tracks.union({instance.from_predicted.track\n+ for frame in self.labels\n+ for instance in frame.instances\n+ if instance.from_predicted\n+ and instance.from_predicted.track})\n+\n+ self.tracks = list(tracks)\n \n- # Add tracks from any Instances or PredictedInstances\n- tracks = tracks.union({instance.track\n- for frame in self.labels\n- for instance in frame.instances\n- if instance.track})\n+ # Lets sort the tracks by spawned on and then name\n+ self.tracks.sort(key=lambda t:(t.spawned_on, t.name))\n \n- # Add tracks from any PredictedInstance referenced by instance\n- # This fixes things when there's a referenced PredictionInstance\n- # which is no longer in the frame.\n- tracks = tracks.union({instance.from_predicted.track\n- for frame in self.labels\n- for instance in frame.instances\n- if instance.from_predicted\n- and instance.from_predicted.track})\n+ self._update_lookup_cache()\n \n- # Lets sort the tracks by spawned on and then name\n- self.tracks = sorted(list(tracks), key=lambda t:(t.spawned_on, t.name))\n+ # Create a variable to store a temporary storage directory. When we unzip\n+ self.__temp_dir = None\n \n+ def _update_lookup_cache(self):\n # Data structures for caching\n self._lf_by_video = dict()\n self._frame_idx_map = dict()\n@@ -116,9 +158,6 @@ def __attrs_post_init__(self):\n self._frame_idx_map[video] = {lf.frame_idx: lf for lf in self._lf_by_video[video]}\n self._track_occupancy[video] = self._make_track_occupany(video)\n \n- # Create a variable to store a temporary storage directory. When we unzip\n- self.__temp_dir = None\n-\n # Below are convenience methods for working with Labels as list.\n # Maybe we should just inherit from list? Maybe this class shouldn't\n # exists since it is just a list really with some class methods. I\n@@ -599,13 +638,16 @@ def merge_matching_frames(self, video=None):\n else:\n self.labeled_frames = LabeledFrame.merge_frames(self.labeled_frames, video=video)\n \n- def to_dict(self):\n+ def to_dict(self, skip_labels: bool = False):\n \"\"\"\n Serialize all labels in the underling list of LabeledFrames to a\n dict structure. This function returns a nested dict structure\n composed entirely of primitive python types. It is used to create\n JSON and HDF5 serialized datasets.\n \n+ Args:\n+ skip_labels: If True, skip labels serialization and just do the metadata.\n+\n Returns:\n A dict containing the followings top level keys:\n * version - The version of the dict/json serialization format.\n@@ -617,6 +659,7 @@ def to_dict(self):\n * suggestions - The suggested frames.\n * negative_anchors - The negative training sample anchors.\n \"\"\"\n+\n # FIXME: Update list of nodes\n # We shouldn't have to do this here, but for some reason we're missing nodes\n # which are in the skeleton but don't have points (in the first instance?).\n@@ -626,12 +669,13 @@ def to_dict(self):\n # of video and skeleton objects present in the labels. We will serialize these\n # as references to the above constructed lists to limit redundant data in the\n # json\n- label_cattr = cattr.Converter()\n- label_cattr.register_unstructure_hook(Skeleton, lambda x: self.skeletons.index(x))\n- label_cattr.register_unstructure_hook(Video, lambda x: self.videos.index(x))\n- label_cattr.register_unstructure_hook(Node, lambda x: self.nodes.index(x))\n- label_cattr.register_unstructure_hook(Track, lambda x: self.tracks.index(x))\n+ label_cattr = make_instance_cattr()\n+ label_cattr.register_unstructure_hook(Skeleton, lambda x: str(self.skeletons.index(x)))\n+ label_cattr.register_unstructure_hook(Video, lambda x: str(self.videos.index(x)))\n+ label_cattr.register_unstructure_hook(Node, lambda x: str(self.nodes.index(x)))\n+ label_cattr.register_unstructure_hook(Track, lambda x: str(self.tracks.index(x)))\n \n+ # Make a converter for the top level skeletons list.\n idx_to_node = {i: self.nodes[i] for i in range(len(self.nodes))}\n \n skeleton_cattr = Skeleton.make_cattr(idx_to_node)\n@@ -642,12 +686,14 @@ def to_dict(self):\n 'skeletons': skeleton_cattr.unstructure(self.skeletons),\n 'nodes': cattr.unstructure(self.nodes),\n 'videos': Video.cattr().unstructure(self.videos),\n- 'labels': label_cattr.unstructure(self.labeled_frames),\n 'tracks': cattr.unstructure(self.tracks),\n 'suggestions': label_cattr.unstructure(self.suggestions),\n 'negative_anchors': label_cattr.unstructure(self.negative_anchors)\n }\n \n+ if not skip_labels:\n+ dicts['labels'] = label_cattr.unstructure(self.labeled_frames)\n+\n return dicts\n \n def to_json(self):\n@@ -660,7 +706,7 @@ def to_json(self):\n \"\"\"\n \n # Unstructure the data into dicts and dump to JSON.\n- return json.dumps(self.to_dict())\n+ return json_dumps(self.to_dict())\n \n @staticmethod\n def save_json(labels: 'Labels', filename: str,\n@@ -719,11 +765,8 @@ def save_json(labels: 'Labels', filename: str,\n d = labels.to_dict()\n d['videos'] = Video.cattr().unstructure(new_videos)\n \n- # We can't call Labels.to_json, so we need to do this here. Not as clean as I\n- # would like.\n- json_str = json.dumps(d)\n else:\n- json_str = labels.to_json()\n+ d = labels.to_dict()\n \n if compress or save_frame_data:\n \n@@ -732,23 +775,22 @@ def save_json(labels: 'Labels', filename: str,\n filename = re.sub(\"(\\.json)?(\\.zip)?$\", \".json\", filename)\n \n # Write the json to the tmp directory, we will zip it up with the frame data.\n- with open(os.path.join(tmp_dir, os.path.basename(filename)), 'w') as file:\n- file.write(json_str)\n+ full_out_filename = os.path.join(tmp_dir, os.path.basename(filename))\n+ json_dumps(d, full_out_filename)\n \n # Create the archive\n shutil.make_archive(base_name=filename, root_dir=tmp_dir, format='zip')\n \n # If the user doesn't want to compress, then just write the json to the filename\n else:\n- with open(filename, 'w') as file:\n- file.write(json_str)\n+ json_dumps(d, filename)\n \n @classmethod\n def from_json(cls, data: Union[str, dict], match_to: Optional['Labels'] = None) -> 'Labels':\n \n # Parse the json string if needed.\n- if data is str:\n- dicts = json.loads(data)\n+ if type(data) is str:\n+ dicts = json_loads(data)\n else:\n dicts = data\n \n@@ -798,41 +840,25 @@ def from_json(cls, data: Union[str, dict], match_to: Optional['Labels'] = None)\n else:\n negative_anchors = dict()\n \n- label_cattr = cattr.Converter()\n- label_cattr.register_structure_hook(Skeleton, lambda x,type: skeletons[x])\n- label_cattr.register_structure_hook(Video, lambda x,type: videos[x])\n- label_cattr.register_structure_hook(Node, lambda x,type: x if isinstance(x,Node) else nodes[int(x)])\n- label_cattr.register_structure_hook(Track, lambda x, type: None if x is None else tracks[x])\n-\n- def structure_points(x, type):\n- if 'score' in x.keys():\n- return cattr.structure(x, PredictedPoint)\n- else:\n- return cattr.structure(x, Point)\n-\n- label_cattr.register_structure_hook(Union[Point, PredictedPoint], structure_points)\n-\n- def structure_instances_list(x, type):\n- inst_list = []\n- for inst_data in x:\n- if 'score' in inst_data.keys():\n- inst = label_cattr.structure(inst_data, PredictedInstance)\n- else:\n- inst = label_cattr.structure(inst_data, Instance)\n- inst_list.append(inst)\n- return inst_list\n+ # If there is actual labels data, get it.\n+ if 'labels' in dicts:\n+ label_cattr = make_instance_cattr()\n+ label_cattr.register_structure_hook(Skeleton, lambda x,type: skeletons[int(x)])\n+ label_cattr.register_structure_hook(Video, lambda x,type: videos[int(x)])\n+ label_cattr.register_structure_hook(Node, lambda x,type: x if isinstance(x,Node) else nodes[int(x)])\n+ label_cattr.register_structure_hook(Track, lambda x, type: None if x is None else tracks[int(x)])\n \n- label_cattr.register_structure_hook(Union[List[Instance], List[PredictedInstance]],\n- structure_instances_list)\n- label_cattr.register_structure_hook(ForwardRef('PredictedInstance'), lambda x,type: label_cattr.structure(x, PredictedInstance))\n- labels = label_cattr.structure(dicts['labels'], List[LabeledFrame])\n+ labels = label_cattr.structure(dicts['labels'], List[LabeledFrame])\n+ else:\n+ labels = []\n \n return cls(labeled_frames=labels,\n videos=videos,\n skeletons=skeletons,\n nodes=nodes,\n suggestions=suggestions,\n- negative_anchors=negative_anchors)\n+ negative_anchors=negative_anchors,\n+ tracks=tracks)\n \n @classmethod\n def load_json(cls, filename: str,\n@@ -885,7 +911,7 @@ def load_json(cls, filename: str,\n # We do this to tell apart old JSON data from leap_dev vs the\n # newer format for sLEAP.\n json_str = file.read()\n- dicts = json.loads(json_str)\n+ dicts = json_loads(json_str)\n \n # If we have a version number, then it is new sLEAP format\n if \"version\" in dicts:\n@@ -926,12 +952,18 @@ def load_json(cls, filename: str,\n else:\n return load_labels_json_old(data_path=filename, parsed_json=dicts)\n \n- def save_hdf5(self, filename: str, save_frame_data: bool = True):\n+ @staticmethod\n+ def save_hdf5(labels: 'Labels', filename: str,\n+ append: bool = False,\n+ save_frame_data: bool = False):\n \"\"\"\n Serialize the labels dataset to an HDF5 file.\n \n Args:\n+ labels: The Labels dataset to save\n filename: The file to serialize the dataset to.\n+ append: Whether to append these labeled frames to the file or\n+ not.\n save_frame_data: Whether to save the image frame data for any\n labeled frame as well. This is useful for uploading the HDF5 for\n model training when video files are to large to move. This will only\n@@ -941,45 +973,242 @@ def save_hdf5(self, filename: str, save_frame_data: bool = True):\n None\n \"\"\"\n \n- # Unstructure this labels dataset to a bunch of dicts, same as we do for\n- # JSON serialization.\n- d = self.to_dict()\n+ # FIXME: Need to implement this.\n+ if save_frame_data:\n+ raise NotImplementedError('Saving frame data is not implemented yet with HDF5 Labels datasets.')\n \n # Delete the file if it exists, we want to start from scratch since\n # h5py truncates the file which seems to not actually delete data\n- # from the file.\n- if os.path.exists(filename):\n+ # from the file. Don't if we are appending of course.\n+ if os.path.exists(filename) and not append:\n os.unlink(filename)\n \n- with h5.File(filename, 'w') as f:\n-\n- # Save the skeletons\n- #Skeleton.save_all_hdf5(filename=f, skeletons=self.skeletons)\n+ # Serialize all the meta-data to JSON.\n+ d = labels.to_dict(skip_labels=True)\n+\n+ with h5.File(filename, 'a') as f:\n+\n+ # Add all the JSON metadata\n+ meta_group = f.require_group('metadata')\n+\n+ # If we are appending and there already exists JSON metadata\n+ if append and 'json' in meta_group.attrs:\n+\n+ # Otherwise, we need to read the JSON and append to the lists\n+ old_labels = Labels.from_json(meta_group.attrs['json'].tostring().decode())\n+\n+ # A function to join to list but only include new non-dupe entries\n+ # from the right hand list.\n+ def append_unique(old, new):\n+ unique = []\n+ for x in new:\n+ try:\n+ matches = [y.matches(x) for y in old]\n+ except AttributeError:\n+ matches = [x == y for y in old]\n+\n+ # If there were no matches, this is a unique object.\n+ if sum(matches) == 0:\n+ unique.append(x)\n+ else:\n+ # If we have an object that matches, replace the instance with\n+ # the one from the new list. This will will make sure objects\n+ # on the Instances are the same as those in the Labels lists.\n+ for i, match in enumerate(matches):\n+ if match:\n+ old[i] = x\n+\n+ return old + unique\n+\n+ # Append the lists\n+ labels.tracks = append_unique(old_labels.tracks, labels.tracks)\n+ labels.skeletons = append_unique(old_labels.skeletons, labels.skeletons)\n+ labels.videos = append_unique(old_labels.videos, labels.videos)\n+ labels.nodes = append_unique(old_labels.nodes, labels.nodes)\n+\n+ # FIXME: Do something for suggestions and negative_anchors\n+\n+ # Get the dict for JSON and save it over the old data\n+ d = labels.to_dict(skip_labels=True)\n+\n+ # Output the dict to JSON\n+ meta_group.attrs['json'] = np.string_(json_dumps(d))\n+\n+ # FIXME: We can probably construct these from attrs fields\n+ # We will store Instances and PredcitedInstances in the same\n+ # table. instance_type=0 or Instance and instance_type=1 for\n+ # PredictedInstance, score will be ignored for Instances.\n+ instance_dtype = np.dtype([('instance_id', 'i8'),\n+ ('instance_type', 'u1'),\n+ ('frame_id', 'u8'),\n+ ('skeleton', 'u4'),\n+ ('track', 'i4'),\n+ ('from_predicted', 'i8'),\n+ ('score', 'f4'),\n+ ('point_id_start', 'u8'),\n+ ('point_id_end', 'u8')])\n+ frame_dtype = np.dtype([('frame_id', 'u8'),\n+ ('video', 'u4'),\n+ ('frame_idx', 'u8'),\n+ ('instance_id_start', 'u8'),\n+ ('instance_id_end', 'u8')])\n+\n+ num_instances = len(labels.all_instances)\n+ max_skeleton_size = max([len(s.nodes) for s in labels.skeletons])\n+\n+ # Initialize data arrays for serialization\n+ points = np.zeros(num_instances * max_skeleton_size, dtype=Point.dtype)\n+ pred_points = np.zeros(num_instances * max_skeleton_size, dtype=PredictedPoint.dtype)\n+ instances = np.zeros(num_instances, dtype=instance_dtype)\n+ frames = np.zeros(len(labels), dtype=frame_dtype)\n+\n+ # Pre compute some structures to make serialization faster\n+ skeleton_to_idx = {skeleton: labels.skeletons.index(skeleton) for skeleton in labels.skeletons}\n+ track_to_idx = {track: labels.tracks.index(track) for track in labels.tracks}\n+ track_to_idx[None] = -1\n+ video_to_idx = {video: labels.videos.index(video) for video in labels.videos}\n+ instance_type_to_idx = {Instance: 0, PredictedInstance: 1}\n+\n+ # If we are appending, we need look inside to see what frame, instance, and point\n+ # ids we need to start from. This gives us offsets to use.\n+ if append and 'points' in f:\n+ point_id_offset = f['points'].shape[0]\n+ pred_point_id_offset = f['pred_points'].shape[0]\n+ instance_id_offset = f['instances'][-1]['instance_id'] + 1\n+ frame_id_offset = int(f['frames'][-1]['frame_id']) + 1\n+ else:\n+ point_id_offset = 0\n+ pred_point_id_offset = 0\n+ instance_id_offset = 0\n+ frame_id_offset = 0\n+\n+ point_id = 0\n+ pred_point_id = 0\n+ instance_id = 0\n+ frame_id = 0\n+ all_from_predicted = []\n+ from_predicted_id = 0\n+ for frame_id, label in enumerate(labels):\n+ frames[frame_id] = (frame_id+frame_id_offset, video_to_idx[label.video], label.frame_idx,\n+ instance_id+instance_id_offset, instance_id+instance_id_offset+len(label.instances))\n+ for instance in label.instances:\n+ parray = instance.points_array(copy=False, full=True)\n+ instance_type = type(instance)\n+\n+ # Check whether we are working with a PredictedInstance or an Instance.\n+ if instance_type is PredictedInstance:\n+ score = instance.score\n+ pid = pred_point_id + pred_point_id_offset\n+ else:\n+ score = np.nan\n+ pid = point_id + point_id_offset\n+\n+ # Keep track of any from_predicted instance links, we will insert the\n+ # correct instance_id in the dataset after we are done.\n+ if instance.from_predicted:\n+ all_from_predicted.append(instance.from_predicted)\n+ from_predicted_id = from_predicted_id + 1\n+\n+ # Copy all the data\n+ instances[instance_id] = (instance_id+instance_id_offset,\n+ instance_type_to_idx[instance_type],\n+ frame_id,\n+ skeleton_to_idx[instance.skeleton],\n+ track_to_idx[instance.track],\n+ -1,\n+ score,\n+ pid, pid + len(parray))\n+\n+ # If these are predicted points, copy them to the predicted point array\n+ # otherwise, use the normal point array\n+ if type(parray) is PredictedPointArray:\n+ pred_points[pred_point_id:pred_point_id + len(parray)] = parray\n+ pred_point_id = pred_point_id + len(parray)\n+ else:\n+ points[point_id:point_id + len(parray)] = parray\n+ point_id = point_id + len(parray)\n+\n+ instance_id = instance_id + 1\n+\n+ # We pre-allocated our points array with max possible size considering the max\n+ # skeleton size, drop any unused points.\n+ points = points[0:point_id]\n+ pred_points = pred_points[0:pred_point_id]\n+\n+ # Create datasets if we need to\n+ if append and 'points' in f:\n+ f['points'].resize((f[\"points\"].shape[0] + points.shape[0]), axis = 0)\n+ f['points'][-points.shape[0]:] = points\n+ f['pred_points'].resize((f[\"pred_points\"].shape[0] + pred_points.shape[0]), axis=0)\n+ f['pred_points'][-pred_points.shape[0]:] = pred_points\n+ f['instances'].resize((f[\"instances\"].shape[0] + instances.shape[0]), axis=0)\n+ f['instances'][-instances.shape[0]:] = instances\n+ f['frames'].resize((f[\"frames\"].shape[0] + frames.shape[0]), axis=0)\n+ f['frames'][-frames.shape[0]:] = frames\n+ else:\n+ f.create_dataset(\"points\", data=points, maxshape=(None,), dtype=Point.dtype)\n+ f.create_dataset(\"pred_points\", data=pred_points, maxshape=(None,), dtype=PredictedPoint.dtype)\n+ f.create_dataset(\"instances\", data=instances, maxshape=(None,), dtype=instance_dtype)\n+ f.create_dataset(\"frames\", data=frames, maxshape=(None,), dtype=frame_dtype)\n \n- # Save the frame data for the videos. For each video, we will\n- # save a dataset that contains only the frame data that has been\n- # labelled.\n- if save_frame_data:\n+ @classmethod\n+ def load_hdf5(cls, filename: str, video_callback=None):\n+\n+ with h5.File(filename, 'r') as f:\n+\n+ # Extract the Labels JSON metadata and create Labels object with just\n+ # this metadata.\n+ dicts = json_loads(f.require_group('metadata').attrs['json'].tostring().decode())\n+\n+ # Use the callback if given to handle missing videos\n+ if callable(video_callback):\n+ video_callback(dicts[\"videos\"])\n+\n+ labels = cls.from_json(dicts)\n+\n+ frames_dset = f['frames'][:]\n+ instances_dset = f['instances'][:]\n+ points_dset = f['points'][:]\n+ pred_points_dset = f['pred_points'][:]\n+\n+ # Rather than instantiate a bunch of Point\\PredictedPoint objects, we will\n+ # use inplace numpy recarrays. This will save a lot of time and memory\n+ # when reading things in.\n+ points = PointArray(buf=points_dset, shape=len(points_dset))\n+ pred_points = PredictedPointArray(buf=pred_points_dset, shape=len(pred_points_dset))\n+\n+ # Extend the tracks list with a None track. We will signify this with a -1 in the\n+ # data which will map to last element of tracks\n+ tracks = labels.tracks.copy()\n+ tracks.extend([None])\n+\n+ # Create the instances\n+ instances = []\n+ for i in instances_dset:\n+ track = tracks[i['track']]\n+ skeleton = labels.skeletons[i['skeleton']]\n+\n+ if i['instance_type'] == 0: # Instance\n+ instance = Instance(skeleton=skeleton, track=track,\n+ points=points[i['point_id_start']:i['point_id_end']])\n+ else: # PredictedInstance\n+ instance = PredictedInstance(skeleton=skeleton, track=track,\n+ points=pred_points[i['point_id_start']:i['point_id_end']],\n+ score=i['score'])\n+ instances.append(instance)\n+\n+ # Create the labeled frames\n+ frames = [LabeledFrame(video=labels.videos[frame['video']],\n+ frame_idx=frame['frame_idx'],\n+ instances=instances[frame['instance_id_start']:frame['instance_id_end']])\n+ for i, frame in enumerate(frames_dset)]\n+\n+ labels.labeled_frames = frames\n+\n+ # Do the stuff that should happen after we have labeled frames\n+ labels._update_lookup_cache()\n \n- #\n- # # All videos data will be put in the videos group\n- # if 'frames' not in f:\n- # frames_group = f.create_group('frames', track_order=True)\n- # else:\n- # frames_group = f.require_group('frames')\n- self.save_frame_data_imgstore()\n-\n- #\n- # dset = f.create_dataset(f\"/frames/{v_idx}\",\n- # data=v.get_frames(frame_idxs),\n- # compression=\"gzip\")\n- #\n- # # Write the dataset to JSON string, then store it in a string\n- # # attribute\n- # dset.attrs[f\"video_json\"] = np.string_(json.dumps(d['videos'][v_idx]))\n-\n- # Save the instance level data\n- Instance.save_hdf5(file=f, instances=self.all_instances)\n+ return labels\n \n def save_frame_data_imgstore(self, output_dir: str = './', format: str = 'png'):\n \"\"\"\n@@ -1068,7 +1297,6 @@ def load_mat(cls, filename):\n x = points_[node_idx][0][i]\n y = points_[node_idx][1][i]\n new_inst[node] = Point(x, y)\n- new_inst.drop_nan_points()\n if len(new_inst.points()):\n new_frame = LabeledFrame(video=vid, frame_idx=i)\n new_frame.instances = new_inst,\n@@ -1266,7 +1494,7 @@ def load_labels_json_old(data_path: str, parsed_json: dict = None,\n A newly constructed Labels object.\n \"\"\"\n if parsed_json is None:\n- data = json.loads(open(data_path).read())\n+ data = json_loads(open(data_path).read())\n else:\n data = parsed_json\n \ndiff --git a/sleap/io/video.py b/sleap/io/video.py\n--- a/sleap/io/video.py\n+++ b/sleap/io/video.py\n@@ -76,6 +76,21 @@ def check(self, attribute, value):\n self.__width_idx = 2\n self.__height_idx = 1\n \n+ def matches(self, other):\n+ \"\"\"\n+ Check if attributes match.\n+\n+ Args:\n+ other: The instance to compare with.\n+\n+ Returns:\n+ True if attributes match, False otherwise\n+ \"\"\"\n+ return self.filename == other.filename and \\\n+ self.dataset == other.dataset and \\\n+ self.convert_range == other.convert_range and \\\n+ self.input_format == other.input_format\n+\n # The properties and methods below complete our contract with the\n # higher level Video interface.\n \n@@ -159,6 +174,21 @@ def __attrs_post_init__(self):\n if self._detect_grayscale is True:\n self.grayscale = bool(np.alltrue(self.__test_frame[..., 0] == self.__test_frame[..., -1]))\n \n+ def matches(self, other):\n+ \"\"\"\n+ Check if attributes match.\n+\n+ Args:\n+ other: The instance to compare with.\n+\n+ Returns:\n+ True if attributes match, False otherwise\n+ \"\"\"\n+ return self.filename == other.filename and \\\n+ self.grayscale == other.grayscale and \\\n+ self.bgr == other.bgr\n+\n+\n @property\n def fps(self):\n return self.__reader.get(cv2.CAP_PROP_FPS)\n@@ -245,6 +275,18 @@ def __attrs_post_init__(self):\n # The properties and methods below complete our contract with the\n # higher level Video interface.\n \n+ def matches(self, other):\n+ \"\"\"\n+ Check if attributes match.\n+\n+ Args:\n+ other: The instance to comapare with.\n+\n+ Returns:\n+ True if attributes match, False otherwise\n+ \"\"\"\n+ return np.all(self.__data == other.__data)\n+\n @property\n def frames(self):\n return self.__data.shape[self.__frame_idx]\n@@ -306,6 +348,18 @@ def __attrs_post_init__(self):\n # The properties and methods below complete our contract with the\n # higher level Video interface.\n \n+ def matches(self, other):\n+ \"\"\"\n+ Check if attributes match.\n+\n+ Args:\n+ other: The instance to comapare with.\n+\n+ Returns:\n+ True if attributes match, False otherwise\n+ \"\"\"\n+ return self.filename == other.filename and self.index_by_original == other.index_by_original\n+\n @property\n def frames(self):\n return self.__store.frame_count\ndiff --git a/sleap/nn/inference.py b/sleap/nn/inference.py\n--- a/sleap/nn/inference.py\n+++ b/sleap/nn/inference.py\n@@ -51,10 +51,8 @@ class Predictor:\n Pipeline:\n \n * Pre-processing to load, crop and scale images\n-\n * Inference to predict confidence maps and part affinity fields,\n and use these to generate PredictedInstances in LabeledFrames\n-\n * Post-processing to collate data from all frames, track instances\n across frames, and save the results\n \n@@ -139,6 +137,10 @@ def predict(self,\n # anything in OpenCV that is actually multi-threaded but maybe\n # we will down the line.\n cv2.setNumThreads(usable_cpu_count())\n+ \n+ # Delete the output file if it exists already\n+ if os.path.exists(self.output_path):\n+ os.unlink(self.output_path)\n \n logger.info(f\"Predict is async: {is_async}\")\n \n@@ -174,6 +176,10 @@ def predict(self,\n # Initialize tracking\n tracker = FlowShiftTracker(window=self.flow_window, verbosity=0)\n \n+ # Delete the output file if it exists already\n+ if os.path.exists(output_path):\n+ os.unlink(output_path)\n+\n # Process chunk-by-chunk!\n t0_start = time()\n predicted_frames: List[LabeledFrame] = []\n@@ -329,7 +335,10 @@ def predict(self,\n # We should save in chunks then combine at the end.\n labels = Labels(labeled_frames=predicted_frames)\n if self.output_path is not None:\n- Labels.save_json(labels, filename=self.output_path, compress=True)\n+ if output_path.endswith('json'):\n+ Labels.save_json(labels, filename=output_path, compress=True)\n+ else:\n+ Labels.save_hdf5(labels, filename=output_path)\n \n logger.info(\" Saved to: %s [%.1fs]\" % (self.output_path, time() - t0))\n \n@@ -718,6 +727,9 @@ def frame_list(frame_str: str):\n 'a range separated by hyphen (e.g. 1-3). (default is entire video)')\n parser.add_argument('-o', '--output', type=str, default=None,\n help='The output filename to use for the predicted data.')\n+ parser.add_argument('--out_format', choices=['hdf5', 'json'], help='The format to use for'\n+ ' the output file. Either hdf5 or json. hdf5 is the default.',\n+ default='hdf5')\n parser.add_argument('--save-confmaps-pafs', dest='save_confmaps_pafs', action='store_const',\n const=True, default=False,\n help='Whether to save the confidence maps or pafs')\n@@ -729,7 +741,11 @@ def frame_list(frame_str: str):\n \n args = parser.parse_args()\n \n- output_suffix = \".predictions.json\"\n+ if args.out_format == 'json':\n+ output_suffix = \".predictions.json\"\n+ else:\n+ output_suffix = \".predictions.h5\"\n+\n if args.frames is not None:\n output_suffix = f\".frames{min(args.frames)}_{max(args.frames)}\" + output_suffix\n \ndiff --git a/sleap/nn/tracking.py b/sleap/nn/tracking.py\n--- a/sleap/nn/tracking.py\n+++ b/sleap/nn/tracking.py\n@@ -222,7 +222,7 @@ def process(self,\n self.last_frame_index = t\n t = frame.frame_idx\n \n- instances_pts = [i.points_array(cached=True) for i in frame.instances]\n+ instances_pts = [i.points_array() for i in frame.instances]\n \n # If we do not have any active tracks, we will spawn one for each\n # matched instance and continue to the next frame.\n@@ -240,7 +240,7 @@ def process(self,\n \n # Get all points in reference frame\n instances_ref = self.tracks.get_frame_instances(self.last_frame_index, max_shift=self.window - 1)\n- pts_ref = [instance.points_array(cached=True) for instance in instances_ref]\n+ pts_ref = [instance.points_array() for instance in instances_ref]\n \n tmp = min([instance.frame_idx for instance in instances_ref] +\n [instance.source.frame_idx for instance in instances_ref\n@@ -305,7 +305,7 @@ def process(self,\n cost_matrix = np.full((len(unassigned_pts), len(shifted_tracks)), np.nan)\n for i, track in enumerate(shifted_tracks):\n # Get shifted points for current track\n- track_pts = np.stack([instance.points_array(cached=True)\n+ track_pts = np.stack([instance.points_array()\n for instance in shifted_instances\n if instance.track == track], axis=0) # track_instances x nodes x 2\n \ndiff --git a/sleap/nn/util.py b/sleap/nn/util.py\n--- a/sleap/nn/util.py\n+++ b/sleap/nn/util.py\n@@ -1,10 +1,12 @@\n from typing import Generator, Sequence, Tuple\n \n+\n def batch_count(data, batch_size):\n \"\"\"Return number of batch_size batches into which data can be divided.\"\"\"\n from math import ceil\n return ceil(len(data) / batch_size)\n \n+\n def batch(data: Sequence, batch_size: int) -> Generator[Tuple[int, int, Sequence], None, None]:\n \"\"\"Iterate over sequence data in batches.\n \n@@ -23,6 +25,7 @@ def batch(data: Sequence, batch_size: int) -> Generator[Tuple[int, int, Sequence\n end = min(start + batch_size, total_row_count)\n yield i, start, data[start:end]\n \n+\n def save_visual_outputs(output_path: str, data: dict):\n import h5py\n import numpy as np\n@@ -48,4 +51,5 @@ def save_visual_outputs(output_path: str, data: dict):\n f.create_dataset(key, data=val, maxshape=maxshape,\n compression=\"gzip\", compression_opts=9)\n \n- # logger.info(\" Saved visual outputs [%.1fs]\" % (time() - t0))\n\\n+ # logger.info(\" Saved visual outputs [%.1fs]\" % (time() - t0))\n+\ndiff --git a/sleap/skeleton.py b/sleap/skeleton.py\n--- a/sleap/skeleton.py\n+++ b/sleap/skeleton.py\n@@ -57,6 +57,18 @@ def from_names(name_list: str):\n def as_node(cls, node):\n return node if isinstance(node, cls) else cls(node)\n \n+ def matches(self, other):\n+ \"\"\"\n+ Check whether all attributes match between two nodes.\n+\n+ Args:\n+ other: The node to compare to this one.\n+\n+ Returns:\n+ True if all attributes match, False otherwise.\n+ \"\"\"\n+ return other.name == self.name and other.weight == self.weight\n+\n \n class Skeleton:\n \"\"\"The main object for representing animal skeletons in LEAP.\n@@ -303,17 +315,21 @@ def symmetries_full(self):\n # Find all symmetric edges\n return [(src, dst, key, attr) for src, dst, key, attr in self._graph.edges(keys=True, data=True) if attr[\"type\"] == EdgeType.SYMMETRY]\n \n- def node_to_index(self, node_name: str):\n+ def node_to_index(self, node: Union[str, Node]):\n \"\"\"\n- Return the index of the node with name node_name.\n+ Return the index of the node, accepts either a node or string name of a Node.\n \n Args:\n- node_name: The name of the node.\n+ node: The name of the node or the Node object.\n \n Returns:\n The index of the node in the graph.\n \"\"\"\n- return list(self.graph.nodes).index(self.find_node(node_name))\n+ node_list = list(self._graph.nodes)\n+ try:\n+ return node_list.index(node)\n+ except ValueError:\n+ return node_list.index(self.find_node(node))\n \n def add_node(self, name: str):\n \"\"\"Add a node representing an animal part to the skeleton.\ndiff --git a/sleap/util.py b/sleap/util.py\n--- a/sleap/util.py\n+++ b/sleap/util.py\n@@ -96,4 +96,23 @@ def frame_list(frame_str: str):\n max_frame = int(min_max[1])\n return list(range(min_frame, max_frame+1))\n \n- return [int(x) for x in frame_str.split(\",\")] if len(frame_str) else None\n\\n+ return [int(x) for x in frame_str.split(\",\")] if len(frame_str) else None\n+\n+\n+def uniquify(seq):\n+ \"\"\"\n+ Given a list, return unique elements but preserve order.\n+\n+ Note: This will not work on Python 3.5 or lower since dicts don't\n+ preserve order.\n+\n+ Args:\n+ seq: The list to remove duplicates from.\n+\n+ Returns:\n+ The unique elements from the input list extracted in original order.\n+ \"\"\"\n+\n+ # Raymond Hettinger\n+ # https://twitter.com/raymondh/status/944125570534621185\n+ return list(dict.fromkeys(seq))\n\\n", "test_patch": "diff --git a/tests/io/test_dataset.py b/tests/io/test_dataset.py\n--- a/tests/io/test_dataset.py\n+++ b/tests/io/test_dataset.py\n@@ -3,7 +3,7 @@\n import numpy as np\n \n from sleap.skeleton import Skeleton\n-from sleap.instance import Instance, Point, LabeledFrame\n+from sleap.instance import Instance, Point, LabeledFrame, PredictedInstance\n from sleap.io.video import Video, MediaVideo\n from sleap.io.dataset import Labels, load_labels_json_old\n \n@@ -24,22 +24,52 @@ def _check_labels_match(expected_labels, other_labels, format = 'png'):\n \n # Check the top level objects\n for x, y in zip(expected_labels.skeletons, other_labels.skeletons):\n- assert x.matches(y)\n+\n+ # Inline the skeleton matches check to see if we can get a better\n+ # idea of why this test fails non-deterministically. The callstack\n+ # doesn't go deeper than the method call in pytest for some reason.\n+ # assert x.matches(y). The code below is weird because it is converted\n+ # from Skeleton.__eq__.\n+ self = x\n+ other = y\n+\n+ # First check names, duh!\n+ if other.name != self.name:\n+ assert False\n+\n+ def dict_match(dict1, dict2):\n+ return dict1 == dict2\n+\n+ # Check if the graphs are iso-morphic\n+ import networkx as nx\n+ is_isomorphic = nx.is_isomorphic(self._graph, other._graph, node_match=dict_match)\n+\n+ if not is_isomorphic:\n+ assert False\n+\n+ # Now check that the nodes have the same labels and order. They can have\n+ # different weights I guess?!\n+ for node1, node2 in zip(self._graph.nodes, other._graph.nodes):\n+ if node1.name != node2.name:\n+ assert False\n \n for x, y in zip(expected_labels.tracks, other_labels.tracks):\n assert x.name == y.name and x.spawned_on == y.spawned_on\n \n # Check that we have the same thing\n for expected_label, label in zip(expected_labels.labels, other_labels.labels):\n+\n assert expected_label.frame_idx == label.frame_idx\n \n frame_idx = label.frame_idx\n \n+ frame_data = label.video.get_frame(frame_idx)[0:15, 0:15, :]\n+ expected_frame_data = expected_label.video.get_frame(frame_idx)[0:15, 0:15, :]\n+\n # Compare the first frames of the videos, do it on a small sub-region to\n # make the test reasonable in time.\n if format is 'png':\n- assert np.allclose(expected_label.video.get_frame(frame_idx)[0:15, 0:15, :],\n- label.video.get_frame(frame_idx)[0:15, 0:15, :])\n+ assert np.allclose(frame_data, expected_frame_data)\n \n # Compare the instances\n assert all(i1.matches(i2) for (i1, i2) in zip(expected_label.instances, label.instances))\n@@ -49,7 +79,6 @@ def _check_labels_match(expected_labels, other_labels, format = 'png'):\n break\n \n \n-\n def test_labels_json(tmpdir, multi_skel_vid_labels):\n json_file_path = os.path.join(tmpdir, 'dataset.json')\n \n@@ -236,10 +265,12 @@ def test_instance_access():\n assert len(list(labels.instances(video=dummy_video))) == 20\n assert len(list(labels.instances(video=dummy_video2))) == 30\n \n+\n def test_load_labels_mat(mat_labels):\n assert len(mat_labels.nodes) == 6\n assert len(mat_labels) == 43\n \n+\n @pytest.mark.parametrize(\"format\", ['png', 'mjpeg/avi'])\n def test_save_labels_with_frame_data(multi_skel_vid_labels, tmpdir, format):\n \"\"\"\n@@ -263,6 +294,54 @@ def test_save_labels_with_frame_data(multi_skel_vid_labels, tmpdir, format):\n loaded_labels = Labels.load_json(f\"{filename}.zip\")\n \n \n-def test_save_labels_hdf5(multi_skel_vid_labels, tmpdir):\n- # FIXME: This is not really implemented yet and needs a real test\n- multi_skel_vid_labels.save_hdf5(filename=os.path.join(tmpdir, 'test.h5'), save_frame_data=False)\n+def test_labels_hdf5(multi_skel_vid_labels, tmpdir):\n+ labels = multi_skel_vid_labels\n+ filename = os.path.join(tmpdir, 'test.h5')\n+\n+ Labels.save_hdf5(filename=filename, labels=labels)\n+\n+ loaded_labels = Labels.load_hdf5(filename=filename)\n+\n+ _check_labels_match(labels, loaded_labels)\n+\n+\n+def test_labels_predicted_hdf5(multi_skel_vid_labels, tmpdir):\n+ labels = multi_skel_vid_labels\n+ filename = os.path.join(tmpdir, 'test.h5')\n+\n+ # Lets promote some of these Instances to predicted instances\n+ for label in labels:\n+ for i, instance in enumerate(label.instances):\n+ if i % 2 == 0:\n+ label.instances[i] = PredictedInstance.from_instance(instance, 0.3)\n+\n+ # Lets also add some from_predicted values\n+ for label in labels:\n+ label.instances[1].from_predicted = label.instances[0]\n+\n+ Labels.save_hdf5(filename=filename, labels=labels)\n+\n+ loaded_labels = Labels.load_hdf5(filename=filename)\n+\n+ _check_labels_match(labels, loaded_labels)\n+\n+def test_labels_append_hdf5(multi_skel_vid_labels, tmpdir):\n+ labels = multi_skel_vid_labels\n+ filename = os.path.join(tmpdir, 'test.h5')\n+\n+ # Save each frame of the Labels dataset one by one in append\n+ # mode\n+ for label in labels:\n+\n+ # Just do the first 20 to speed things up\n+ if label.frame_idx > 20:\n+ break\n+\n+ Labels.save_hdf5(filename=filename, labels=Labels([label]), append=True)\n+\n+ # Now load the dataset and make sure we get the same thing we started\n+ # with.\n+ loaded_labels = Labels.load_hdf5(filename=filename)\n+\n+ _check_labels_match(labels, loaded_labels)\n+\ndiff --git a/tests/test_instance.py b/tests/test_instance.py\n--- a/tests/test_instance.py\n+++ b/tests/test_instance.py\n@@ -76,55 +76,6 @@ def test_instance_point_iter(skeleton):\n assert points[node.name] == point\n \n \n-def test_instance_to_pandas_df(skeleton, instances):\n- \"\"\"\n- Test generating pandas DataFrames from lists of instances.\n- \"\"\"\n-\n- # How many columns are supposed to be in point DataFrame\n- NUM_COLS = 9\n-\n- NUM_INSTANCES = len(instances)\n-\n- df = Instance.to_pandas_df(instances)\n-\n- # Check to make sure we got the expected shape\n- assert df.shape == (3*NUM_INSTANCES, NUM_COLS)\n-\n- # Check skip_nan is working\n- assert Instance.to_pandas_df(instances, skip_nan=False).shape == (4*NUM_INSTANCES, NUM_COLS)\n-\n-# Skip HDF5 saving of instances now because tracks are not saved properly\n-@pytest.mark.skip\n-def test_hdf5(instances, tmpdir):\n- out_dir = tmpdir\n- path = os.path.join(out_dir, 'dataset.h5')\n-\n- if os.path.isfile(path):\n- os.remove(path)\n-\n- Instance.save_hdf5(file=path, instances=instances)\n-\n- assert os.path.isfile(path)\n-\n- # Make a deep copy, because we are gonna drop nan points in place.\n- # and I don't want to change the fixture.\n- instances_copy = copy.deepcopy(instances)\n-\n- # Drop the NaN points\n- Instance.drop_all_nan_points(instances_copy)\n-\n- # Make sure we can overwrite\n- Instance.save_hdf5(file=path, instances=instances_copy[0:100], skip_nan=False)\n-\n- # Load the data back\n- instances2 = Instance.load_hdf5(file=path)\n-\n- # Check that we get back the same instances\n- for i in range(len(instances2)):\n- assert instances_copy[i].matches(instances2[i])\n-\n-\n def test_skeleton_node_name_change():\n \"\"\"\n Test that and instance is not broken after a node on the\n@@ -194,18 +145,6 @@ def test_points_array(skeleton):\n assert np.allclose(pts[skeleton.node_to_index('head'), :], [0, 4])\n assert np.allclose(pts[skeleton.node_to_index('thorax'), :], [1, 2])\n \n- # Now use the cached version and make sure changes are not\n- # reflected\n- pts = instance1.points_array(cached=True)\n- assert np.allclose(pts[skeleton.node_to_index('thorax'), :], [1, 2])\n- instance1['thorax'] = Point(1, 6)\n- pts = instance1.points_array(cached=True)\n- assert np.allclose(pts[skeleton.node_to_index('thorax'), :], [1, 2])\n-\n- # Now drop the cache and make sure changes are reflected.\n- pts = instance1.points_array()\n- assert np.allclose(pts[skeleton.node_to_index('thorax'), :], [1, 6])\n-\n def test_instance_labeled_frame_ref(skeleton, centered_pair_vid):\n \"\"\"\n Test whether links between labeled frames and instances are kept\ndiff --git a/tests/test_point_array.py b/tests/test_point_array.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/test_point_array.py\n@@ -0,0 +1,87 @@\n+import numpy as np\n+import pytest\n+\n+from sleap.instance import Point, PredictedPoint, PointArray, PredictedPointArray\n+\n+@pytest.mark.parametrize(\"p1\", [Point(0.0, 0.0), PredictedPoint(0.0, 0.0, 0.0),\n+ PointArray(3)[0], PredictedPointArray(3)[0]])\n+def test_point(p1):\n+ \"\"\"\n+ Test the Point and PredictedPoint API. This is mainly a safety\n+ check to make sure numpy record array stuff doesn't change\n+ \"\"\"\n+\n+ # Make sure we are getting Points or PredictedPoints only.\n+ # This makes sure that PointArray(3)[0] returns a point for\n+ # example\n+ assert type(p1) in [PredictedPoint, Point]\n+\n+ # Check getters and setters\n+ p1.x = 3.0\n+ assert p1.x == 3.0\n+\n+ if type(p1) is PredictedPoint:\n+ p1.score = 30.0\n+ assert p1.score == 30.0\n+\n+\n+def test_constructor():\n+ p = Point(x=1.0, y=2.0, visible=False, complete=True)\n+ assert p.x == 1.0\n+ assert p.y == 2.0\n+ assert p.visible == False\n+ assert p.complete == True\n+\n+ p = PredictedPoint(x=1.0, y=2.0, visible=False, complete=True, score=0.3)\n+ assert p.x == 1.0\n+ assert p.y == 2.0\n+ assert p.visible == False\n+ assert p.complete == True\n+ assert p.score == 0.3\n+\n+\n+@pytest.mark.parametrize('parray_cls', [PointArray, PredictedPointArray])\n+def test_point_array(parray_cls):\n+\n+ p = parray_cls(5)\n+\n+ # Make sure length works\n+ assert len(p) == 5\n+ assert len(p['x']) == 5\n+ assert len(p[['x', 'y']]) == 5\n+\n+ # Check that single point getitem returns a Point class\n+ if parray_cls is PredictedPointArray:\n+ assert type(p[0]) is PredictedPoint\n+ else:\n+ assert type(p[0]) is Point\n+\n+ # Check that slices preserve type as well\n+ assert type(p[0:4]) is type(p)\n+\n+ # Check field access\n+ assert type(p.x) is np.ndarray\n+\n+ # Check make_default\n+ d1 = parray_cls.make_default(3)\n+ d2 = parray_cls.make_default(3)\n+\n+ # I have to convert from structured to unstructured to get this comparison\n+ # to work.\n+ from numpy.lib.recfunctions import structured_to_unstructured\n+ np.testing.assert_array_equal(structured_to_unstructured(d1), structured_to_unstructured(d2))\n+\n+\n+def test_from_and_to_array():\n+ p = PointArray(3)\n+\n+ # Do a round trip conversion\n+ r = PredictedPointArray.to_array(PredictedPointArray.from_array(p))\n+\n+ from numpy.lib.recfunctions import structured_to_unstructured\n+ np.testing.assert_array_equal(structured_to_unstructured(p), structured_to_unstructured(r))\n+\n+ # Make sure conversion uses default score\n+ r = PredictedPointArray.from_array(p)\n+ assert r.score[0] == PredictedPointArray.make_default(1)[0].score\n+\n", "problem_statement": "", "hints_text": "", "created_at": "2019-08-29T19:35:44Z"}
PythonDataset/test/spruned-task-instances.jsonl.all ADDED
The diff for this file is too large to render. See raw diff
 
PythonDataset/test/teuthology-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "ceph/teuthology", "pull_number": 355, "instance_id": "ceph__teuthology-355", "issue_numbers": "", "base_commit": "8aa6bf0ca856a93ef6fad096473ef766d07c221a", "patch": "diff --git a/scripts/results.py b/scripts/results.py\n--- a/scripts/results.py\n+++ b/scripts/results.py\n@@ -1,40 +1,22 @@\n-import argparse\n+\"\"\"\n+usage: teuthology-results [-h] [-v] [--dry-run] [--email EMAIL] [--timeout TIMEOUT] --archive-dir DIR --name NAME\n \n+Email teuthology suite results\n+\n+optional arguments:\n+ -h, --help show this help message and exit\n+ -v, --verbose be more verbose\n+ --dry-run Instead of sending the email, just print it\n+ --email EMAIL address to email test failures to\n+ --timeout TIMEOUT how many seconds to wait for all tests to finish (default\n+ no wait)\n+ --archive-dir DIR path under which results for the suite are stored\n+ --name NAME name of the suite\n+\"\"\"\n+import docopt\n import teuthology.results\n \n \n def main():\n- teuthology.results.main(parse_args())\n-\n-\n-def parse_args():\n- parser = argparse.ArgumentParser(\n- description='Email teuthology suite results')\n- parser.add_argument(\n- '--email',\n- help='address to email test failures to',\n- )\n- parser.add_argument(\n- '--timeout',\n- help='how many seconds to wait for all tests to finish (default no ' +\n- 'wait)',\n- type=int,\n- default=0,\n- )\n- parser.add_argument(\n- '--archive-dir',\n- metavar='DIR',\n- help='path under which results for the suite are stored',\n- required=True,\n- )\n- parser.add_argument(\n- '--name',\n- help='name of the suite',\n- required=True,\n- )\n- parser.add_argument(\n- '-v', '--verbose',\n- action='store_true', default=False,\n- help='be more verbose',\n- )\n- return parser.parse_args()\n+ args = docopt.docopt(__doc__)\n+ teuthology.results.main(args)\ndiff --git a/teuthology/results.py b/teuthology/results.py\n--- a/teuthology/results.py\n+++ b/teuthology/results.py\n@@ -8,6 +8,7 @@\n from textwrap import fill\n \n import teuthology\n+from teuthology.config import config\n from teuthology import misc\n from teuthology import ls\n from .job_status import get_status\n@@ -19,56 +20,60 @@\n def main(args):\n \n log = logging.getLogger(__name__)\n- if args.verbose:\n+ if args['--verbose']:\n teuthology.log.setLevel(logging.DEBUG)\n \n- misc.read_config(args)\n-\n- log_path = os.path.join(args.archive_dir, 'results.log')\n- teuthology.setup_log_file(log_path)\n+ if not args['--dry-run']:\n+ log_path = os.path.join(args['--archive-dir'], 'results.log')\n+ teuthology.setup_log_file(log_path)\n \n try:\n- results(args)\n+ results(args['--archive-dir'], args['--name'], args['--email'],\n+ args['--timeout'], args['--dry-run'])\n except Exception:\n log.exception('error generating results')\n raise\n \n \n-def results(args):\n- archive_base = os.path.split(args.archive_dir)[0]\n+def results(archive_dir, name, email, timeout, dry_run):\n+ archive_base = os.path.split(archive_dir)[0]\n serializer = ResultsSerializer(archive_base)\n starttime = time.time()\n \n- log.info('Waiting up to %d seconds for tests to finish...', args.timeout)\n- while serializer.running_jobs_for_run(args.name) and args.timeout > 0:\n- if time.time() - starttime > args.timeout:\n+ if timeout:\n+ log.info('Waiting up to %d seconds for tests to finish...', timeout)\n+ while serializer.running_jobs_for_run(name) and timeout > 0:\n+ if time.time() - starttime > timeout:\n log.warn('test(s) did not finish before timeout of %d seconds',\n- args.timeout)\n+ timeout)\n break\n time.sleep(10)\n log.info('Tests finished! gathering results...')\n \n- (subject, body) = build_email_body(args.name, args.archive_dir,\n- args.timeout)\n+ (subject, body) = build_email_body(name, archive_dir)\n \n try:\n- if args.email:\n+ if email and dry_run:\n+ print \"From: %s\" % (config.results_sending_email or 'teuthology')\n+ print \"To: %s\" % email\n+ print \"Subject: %s\" % subject\n+ print body\n+ elif email:\n email_results(\n subject=subject,\n- from_=args.teuthology_config.get('results_sending_email',\n- 'teuthology'),\n- to=args.email,\n+ from_=(config.results_sending_email or 'teuthology'),\n+ to=email,\n body=body,\n )\n finally:\n- generate_coverage(args)\n+ generate_coverage(archive_dir, name)\n \n \n-def generate_coverage(args):\n+def generate_coverage(archive_dir, name):\n coverage_config_keys = ('coverage_output_dir', 'coverage_html_dir',\n 'coverage_tools_dir')\n for key in coverage_config_keys:\n- if key not in args.teuthology_config:\n+ if key not in config.to_dict():\n log.warn(\n \"'%s' not in teuthology config; skipping coverage report\",\n key)\n@@ -79,14 +84,12 @@ def generate_coverage(args):\n os.path.join(os.path.dirname(sys.argv[0]), 'teuthology-coverage'),\n '-v',\n '-o',\n- os.path.join(args.teuthology_config[\n- 'coverage_output_dir'], args.name),\n+ os.path.join(config.coverage_output_dir, name),\n '--html-output',\n- os.path.join(args.teuthology_config[\n- 'coverage_html_dir'], args.name),\n+ os.path.join(config.coverage_html_dir, name),\n '--cov-tools-dir',\n- args.teuthology_config['coverage_tools_dir'],\n- args.archive_dir,\n+ config.coverage_tools_dir,\n+ archive_dir,\n ],\n )\n \n@@ -105,7 +108,7 @@ def email_results(subject, from_, to, body):\n smtp.quit()\n \n \n-def build_email_body(name, archive_dir, timeout):\n+def build_email_body(name, archive_dir):\n failed = {}\n hung = {}\n passed = {}\n", "test_patch": "diff --git a/teuthology/test/test_results.py b/teuthology/test/test_results.py\n--- a/teuthology/test/test_results.py\n+++ b/teuthology/test/test_results.py\n@@ -86,8 +86,6 @@ def test_build_email_body(self):\n self.archive.populate_archive(run_name, self.reference['jobs'])\n (subject, body) = results.build_email_body(\n run_name,\n- run_dir,\n- 36000)\n+ run_dir)\n assert subject == self.reference['subject']\n- print body\n assert body == self.reference['body']\n", "problem_statement": "", "hints_text": "", "created_at": "2014-11-11T23:48:49Z"}
PythonDataset/test/translate-python-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "terryyin/translate-python", "pull_number": 32, "instance_id": "terryyin__translate-python-32", "issue_numbers": "", "base_commit": "34d149fc8b182a02ca1336d1b3895dda711bcac5", "patch": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,6 +22,26 @@\n break\n \n \n+# Save last Version\n+def save_version():\n+ version_path = os.path.join(here, \"translate/version.py\")\n+\n+ with open(version_path) as version_file_read:\n+ content_file = version_file_read.read()\n+\n+ VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n+ mo = re.search(VSRE, content_file, re.M)\n+ current_version = mo.group(1)\n+\n+ content_file = content_file.replace(current_version, \"{}\".format(version))\n+\n+ with open(version_path, 'w') as version_file_write:\n+ version_file_write.write(content_file)\n+\n+\n+save_version()\n+\n+\n class VersionCommand(Command):\n description = 'Show library version'\n user_options = []\n@@ -37,12 +57,13 @@ def run(self):\n \n \n # Get the long description\n-with codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n- long_description = f.read()\n+with codecs.open(os.path.join(here, 'README.rst')) as f:\n+ long_description = '\\n{}'.format(f.read())\n \n-# Get version\n-with codecs.open(os.path.join(here, 'CHANGES.rst'), encoding='utf-8') as f:\n+# Get change log\n+with codecs.open(os.path.join(here, 'CHANGES.rst')) as f:\n changelog = f.read()\n+ long_description += '\\n\\n{}'.format(changelog)\n \n # Requirements\n with codecs.open(os.path.join(here, 'requirements.txt')) as f:\n@@ -51,32 +72,37 @@ def run(self):\n with codecs.open(os.path.join(here, 'requirements-dev.txt')) as f:\n tests_requirements = [line.replace('\\n', '') for line in f.readlines() if not line == '-r requirements.txt\\n']\n \n+\n setup(\n- name='translate',\n- version=version,\n- description=description,\n- long_description=long_description,\n- url='https://github.com/terryyin/google-translate-python',\n+ author='Terry Yin',\n+ author_email='terry.yinze@gmail.com',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Education',\n 'Intended Audience :: End Users/Desktop',\n+ 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n- 'Operating System :: POSIX',\n- 'Operating System :: Microsoft :: Windows',\n- 'Operating System :: MacOS :: MacOS X',\n- 'Topic :: Education',\n- 'Programming Language :: Python',\n+ 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n- 'Programming Language :: Python :: 3.5'\n+ 'Programming Language :: Python :: 3.5',\n+ 'Programming Language :: Python',\n+ 'Topic :: Education',\n ],\n- author='Terry Yin',\n- author_email='terry.yinze@gmail.com',\n+ cmdclass={'version': VersionCommand},\n+ description=description,\n+ entry_points='''\n+ [console_scripts]\n+ translate-cli=translate.__main__:cli\n+ ''',\n+ install_requires=install_requirements,\n+ keywords='translate translation command line',\n+ license='MIT',\n+ long_description=long_description,\n+ name='translate',\n packages=find_packages(exclude=['docs', 'tests', 'tests.*', 'requirements']),\n setup_requires=['pytest-runner'],\n- install_requires=install_requirements,\n tests_require=tests_requirements,\n- scripts=['translate-cli'],\n- cmdclass={'version': VersionCommand},\n+ url='https://github.com/terryyin/google-translate-python',\n+ version=version,\n )\ndiff --git a/translate/__init__.py b/translate/__init__.py\n--- a/translate/__init__.py\n+++ b/translate/__init__.py\n@@ -2,5 +2,6 @@\n # encoding: utf-8\n \n from .translate import Translator # noqa\n+from .version import __version__ # noqa\n \n-__all__ = ['Translator', ]\n+__all__ = ['Translator', '__version__']\ndiff --git a/translate/main.py b/translate/main.py\n--- a/translate/main.py\n+++ b/translate/main.py\n@@ -1,48 +1,85 @@\n #!/usr/bin/env python\n # encoding: utf-8\n-\n-import argparse\n-import sys\n+import os\n+import click\n+try:\n+ from configparser import ConfigParser\n+except ImportError:\n+ from ConfigParser import ConfigParser\n import locale\n+import sys\n \n from .translate import Translator\n+from .version import __version__\n+\n+\n+TRANSLATION_FROM_DEFAULT = 'autodetect'\n+CONFIG_FILE_PATH = '~/.python-translate.cfg'\n+VERSION_FILE = 'VERSION.txt'\n+\n+here = os.path.dirname(os.path.abspath(__file__))\n+\n+\n+def get_config_info(lang_type):\n+ config_file_path = os.path.expanduser(CONFIG_FILE_PATH)\n+ lang_types = ('from_lang', 'to_lang')\n+ if not os.path.exists(config_file_path) or lang_type not in lang_types:\n+ return ''\n+\n+ config_parser = ConfigParser()\n+ config_parser.read(config_file_path)\n+ default_section = 'DEFAULT'\n+ return config_parser.get(default_section, lang_type)\n+\n+\n+def print_version(ctx, param, value):\n+ if not value or ctx.resilient_parsing:\n+ return\n+\n+ click.echo('translate, version {}'.format(__version__))\n+ ctx.exit()\n+\n+\n+@click.command()\n+@click.option(\n+ '--version', is_flag=True, callback=print_version,\n+ expose_value=False,\n+ is_eager=True,\n+ help='Show the version and exit.'\n+)\n+@click.option(\n+ 'from_lang', '--from', '-f',\n+ default=get_config_info('from_lang') or TRANSLATION_FROM_DEFAULT,\n+ help=\"Language of the text being translated. The default value is 'autodetect'\"\n+)\n+@click.option(\n+ 'to_lang', '--to', '-t',\n+ default=get_config_info('to_lang'),\n+ prompt='Translate to',\n+ help='Language you want translate.'\n+)\n+@click.argument('text', nargs=-1, type=click.STRING, required=True)\n+def main(from_lang, to_lang, text):\n+ \"\"\"\n+ Python command line tool to make on line translations\n+\n+ Example: \\n\n+ \\b\n+ \\t $ translate-cli -t zh the book is on the table\n+ \\t \u7897\u662f\u5728\u684c\u5b50\u4e0a\u3002\n+\n+ Available languages: \\n\n+ \\b\n+ \\t https://en.wikipedia.org/wiki/ISO_639-1\n+ \\t Examples: (e.g. en, ja, ko, pt, zh, zh-TW, ...)\n+ \"\"\"\n+ text = ' '.join(text)\n+ translator = Translator(from_lang=from_lang, to_lang=to_lang)\n \n-TRANSLATION_FROM_DEFAULT = 'en'\n-TRANSLATION_TO_DEFAULT = 'zh'\n-HELPER_LANGUAGES = '(e.g. en, ja, ko, pt, zh, zh-TW, ...)'\n-TRANSLATION_CLIENT = 'translate-cli'\n-MAIN_FILE = '__main__'\n-\n-\n-def main(defvals=None):\n- if not defvals:\n- defvals = {'f': TRANSLATION_FROM_DEFAULT, 't': TRANSLATION_TO_DEFAULT}\n-\n- parser = argparse.ArgumentParser(description=__doc__)\n- parser.add_argument(\n- '-t', '--to', dest='to_lang', type=str, default=defvals['t'],\n- help='To language {}. Default is {}.'.format(HELPER_LANGUAGES, defvals['t'])\n- )\n- parser.add_argument(\n- '-f', '--from', dest='from_lang', type=str, default=defvals['f'],\n- help='From language {}. Default is {}.'.format(HELPER_LANGUAGES, defvals['f'])\n- )\n- parser.add_argument(\n- 'texts', metavar='text', nargs='+',\n- help='a string to translate(use \"\" when it\\'s a sentence)'\n- )\n-\n- if TRANSLATION_CLIENT in sys.argv[0] or MAIN_FILE in sys.argv[0]:\n- sys.argv.pop(0)\n-\n- parsed_args = parser.parse_args(sys.argv)\n-\n- translator = Translator(from_lang=parsed_args.from_lang, to_lang=parsed_args.to_lang)\n- text = ' '.join(parsed_args.texts)\n translation = translator.translate(text)\n if sys.version_info.major == 2:\n translation = translation.encode(locale.getpreferredencoding())\n \n- sys.stdout.write(translation)\n- sys.stdout.write(\"\\n\")\n+ click.echo(translation)\n+\n return translation\ndiff --git a/translate/version.py b/translate/version.py\nnew file mode 100644\n--- /dev/null\n+++ b/translate/version.py\n@@ -0,0 +1 @@\n+__version__ = \"3.3.0\"\n", "test_patch": "diff --git a/tests/conftest.py b/tests/conftest.py\n--- a/tests/conftest.py\n+++ b/tests/conftest.py\n@@ -1,6 +1,12 @@\n #!/usr/bin/env python\n # encoding: utf-8\n import pytest\n+from click.testing import CliRunner\n+\n+\n+@pytest.fixture\n+def cli_runner():\n+ return CliRunner()\n \n \n @pytest.fixture\ndiff --git a/tests/fixtures/cassettes/test_main_language_to_translate_required.yaml b/tests/fixtures/cassettes/test_main_language_to_translate_required.yaml\nnew file mode 100644\n--- /dev/null\n+++ b/tests/fixtures/cassettes/test_main_language_to_translate_required.yaml\n@@ -0,0 +1,46 @@\n+interactions:\n+- request:\n+ body: null\n+ headers:\n+ Accept: ['*/*']\n+ Accept-Encoding: ['gzip, deflate']\n+ Connection: [keep-alive]\n+ User-Agent: ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.19 (KHTML,\n+ like Gecko) Chrome/18.0.1025.168 Safari/535.19']\n+ method: GET\n+ uri: http://api.mymemory.translated.net/get?q=hello+world&langpair=autodetect%7Czh\n+ response:\n+ body: {string: \"\\r\\n{\\\"responseData\\\":{\\\"translatedText\\\":\\\"\\\\u4f60\\\\u597d\\\\uff0c\\\\\\\n+ u4e16\\\\u754c\\\",\\\"match\\\":1,\\\"detectedLanguage\\\":\\\"English\\\"},\\\"quotaFinished\\\"\\\n+ :false,\\\"responseDetails\\\":\\\"\\\",\\\"responseStatus\\\":200,\\\"responderId\\\":\\\"\\\n+ 238\\\",\\\"matches\\\":[{\\\"id\\\":\\\"437872254\\\",\\\"segment\\\":\\\"hello world\\\",\\\"translation\\\"\\\n+ :\\\"\\\\u4f60\\\\u597d\\\\uff0c\\\\u4e16\\\\u754c\\\",\\\"quality\\\":\\\"80\\\",\\\"reference\\\"\\\n+ :null,\\\"usage-count\\\":22,\\\"subject\\\":\\\"All\\\",\\\"created-by\\\":\\\"MateCat\\\",\\\"\\\n+ last-updated-by\\\":\\\"MateCat\\\",\\\"create-date\\\":\\\"2016-09-30 08:10:55\\\",\\\"last-update-date\\\"\\\n+ :\\\"2016-09-30 08:10:55\\\",\\\"match\\\":1},{\\\"id\\\":\\\"433405804\\\",\\\"segment\\\":\\\"\\\n+ hello world.\\\",\\\"translation\\\":\\\"\\\\u4e16\\\\u754c\\\\uff0c\\\\u4f60\\\\u865f\\\\uff01\\\"\\\n+ ,\\\"quality\\\":\\\"74\\\",\\\"reference\\\":null,\\\"usage-count\\\":1,\\\"subject\\\":\\\"All\\\"\\\n+ ,\\\"created-by\\\":\\\"MateCat\\\",\\\"last-updated-by\\\":\\\"MateCat\\\",\\\"create-date\\\"\\\n+ :\\\"2015-07-09 09:14:23\\\",\\\"last-update-date\\\":\\\"2015-07-09 09:14:23\\\",\\\"match\\\"\\\n+ :0.99},{\\\"id\\\":\\\"464325265\\\",\\\"segment\\\":\\\"Hello World\\\",\\\"translation\\\":\\\"\\\n+ Hello World\\\",\\\"quality\\\":\\\"74\\\",\\\"reference\\\":null,\\\"usage-count\\\":20,\\\"\\\n+ subject\\\":\\\"All\\\",\\\"created-by\\\":\\\"MateCat\\\",\\\"last-updated-by\\\":\\\"MateCat\\\"\\\n+ ,\\\"create-date\\\":\\\"2017-10-29 06:37:33\\\",\\\"last-update-date\\\":\\\"2017-10-29\\\n+ \\ 06:37:33\\\",\\\"match\\\":0.98}]}\"}\n+ headers:\n+ Access-Control-Allow-Origin: ['*']\n+ Cache-Control: ['no-cache, no-store, max-age=0, must-revalidate']\n+ Connection: [close]\n+ Content-Length: ['1077']\n+ Content-Type: [application/json; charset=utf-8]\n+ Date: ['Sat, 25 Nov 2017 11:10:54 GMT']\n+ Expires: ['Fri, 01 Jan 1990 00:00:00 GMT']\n+ Pragma: [no-cache]\n+ Server: [Apache/2.4.10 (Debian)]\n+ X-Backend-Content-Length: ['10']\n+ X-Embedded-Status: ['200']\n+ X-Frame-Options: [SAMEORIGIN]\n+ X-Powered-By: [PHP/5.6.30-0+deb8u1]\n+ X-XSS-Protection: ['0']\n+ status: {code: 200, message: OK}\n+version: 1\ndiff --git a/tests/fixtures/cassettes/test_main_to_language.yaml b/tests/fixtures/cassettes/test_main_to_language.yaml\n--- a/tests/fixtures/cassettes/test_main_to_language.yaml\n+++ b/tests/fixtures/cassettes/test_main_to_language.yaml\n@@ -8,37 +8,37 @@ interactions:\n User-Agent: ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.19 (KHTML,\n like Gecko) Chrome/18.0.1025.168 Safari/535.19']\n method: GET\n- uri: http://api.mymemory.translated.net/get?q=love&langpair=en%7Czh-TW\n+ uri: http://api.mymemory.translated.net/get?q=love&langpair=autodetect%7Czh-TW\n response:\n- body: {string: !!python/unicode \"\\r\\n{\\\"responseData\\\":{\\\"translatedText\\\":\\\"\\\\\\\n- u7231\\\",\\\"match\\\":1},\\\"quotaFinished\\\":false,\\\"responseDetails\\\":\\\"\\\",\\\"responseStatus\\\"\\\n- :200,\\\"responderId\\\":\\\"235\\\",\\\"matches\\\":[{\\\"id\\\":\\\"436719800\\\",\\\"segment\\\"\\\n- :\\\"love\\\",\\\"translation\\\":\\\"\\\\u7231\\\",\\\"quality\\\":\\\"74\\\",\\\"reference\\\":null,\\\"\\\n- usage-count\\\":25,\\\"subject\\\":\\\"All\\\",\\\"created-by\\\":\\\"MateCat\\\",\\\"last-updated-by\\\"\\\n- :\\\"MateCat\\\",\\\"create-date\\\":\\\"2016-06-14 15:28:03\\\",\\\"last-update-date\\\"\\\n- :\\\"2016-06-14 15:28:03\\\",\\\"match\\\":1},{\\\"id\\\":\\\"431321273\\\",\\\"segment\\\":\\\"\\\n- love\\\",\\\"translation\\\":\\\"w ho liau\\\",\\\"quality\\\":\\\"74\\\",\\\"reference\\\":null,\\\"\\\n- usage-count\\\":1,\\\"subject\\\":\\\"General\\\",\\\"created-by\\\":\\\"MateCat\\\",\\\"last-updated-by\\\"\\\n- :\\\"\\\",\\\"create-date\\\":\\\"2014-07-26 17:17:27\\\",\\\"last-update-date\\\":\\\"2014-08-12\\\n- \\ 06:47:10\\\",\\\"match\\\":0.99},{\\\"id\\\":\\\"461350265\\\",\\\"segment\\\":\\\"Love\\\",\\\"\\\n- translation\\\":\\\"\\\\u7231\\\\u610f\\\",\\\"quality\\\":\\\"74\\\",\\\"reference\\\":null,\\\"\\\n- usage-count\\\":1,\\\"subject\\\":\\\"All\\\",\\\"created-by\\\":\\\"MateCat\\\",\\\"last-updated-by\\\"\\\n- :\\\"MateCat\\\",\\\"create-date\\\":\\\"2017-06-22 11:34:29\\\",\\\"last-update-date\\\"\\\n- :\\\"2017-06-22 11:34:29\\\",\\\"match\\\":0.98}]}\"}\n+ body: {string: \"\\r\\n{\\\"responseData\\\":{\\\"translatedText\\\":\\\"\\\\u7231\\\",\\\"match\\\"\\\n+ :1,\\\"detectedLanguage\\\":\\\"English\\\"},\\\"quotaFinished\\\":false,\\\"responseDetails\\\"\\\n+ :\\\"\\\",\\\"responseStatus\\\":200,\\\"responderId\\\":\\\"235\\\",\\\"matches\\\":[{\\\"id\\\"\\\n+ :\\\"436719800\\\",\\\"segment\\\":\\\"love\\\",\\\"translation\\\":\\\"\\\\u7231\\\",\\\"quality\\\"\\\n+ :\\\"74\\\",\\\"reference\\\":null,\\\"usage-count\\\":25,\\\"subject\\\":\\\"All\\\",\\\"created-by\\\"\\\n+ :\\\"MateCat\\\",\\\"last-updated-by\\\":\\\"MateCat\\\",\\\"create-date\\\":\\\"2016-06-14\\\n+ \\ 15:28:03\\\",\\\"last-update-date\\\":\\\"2016-06-14 15:28:03\\\",\\\"match\\\":1},{\\\"\\\n+ id\\\":\\\"431321273\\\",\\\"segment\\\":\\\"love\\\",\\\"translation\\\":\\\"w ho liau\\\",\\\"quality\\\"\\\n+ :\\\"74\\\",\\\"reference\\\":null,\\\"usage-count\\\":1,\\\"subject\\\":\\\"General\\\",\\\"created-by\\\"\\\n+ :\\\"MateCat\\\",\\\"last-updated-by\\\":\\\"\\\",\\\"create-date\\\":\\\"2014-07-26 17:17:27\\\"\\\n+ ,\\\"last-update-date\\\":\\\"2014-08-12 06:47:10\\\",\\\"match\\\":0.99},{\\\"id\\\":\\\"464574969\\\"\\\n+ ,\\\"segment\\\":\\\"Love\\\",\\\"translation\\\":\\\"\\\\u559c\\\\u6b22\\\",\\\"quality\\\":\\\"74\\\"\\\n+ ,\\\"reference\\\":null,\\\"usage-count\\\":9,\\\"subject\\\":\\\"All\\\",\\\"created-by\\\":\\\"\\\n+ MateCat\\\",\\\"last-updated-by\\\":\\\"MateCat\\\",\\\"create-date\\\":\\\"2017-11-25 11:44:15\\\"\\\n+ ,\\\"last-update-date\\\":\\\"2017-11-25 11:44:15\\\",\\\"match\\\":0.98}]}\"}\n headers:\n- access-control-allow-origin: ['*']\n- cache-control: ['no-cache, no-store, max-age=0, must-revalidate']\n- connection: [close]\n- content-length: ['948']\n- content-type: [application/json; charset=utf-8]\n- date: ['Mon, 13 Nov 2017 13:32:31 GMT']\n- expires: ['Fri, 01 Jan 1990 00:00:00 GMT']\n- pragma: [no-cache]\n- server: [Apache/2.4.10 (Debian)]\n- x-backend-content-length: ['10']\n- x-embedded-status: ['200']\n- x-frame-options: [SAMEORIGIN]\n- x-powered-by: [PHP/5.6.30-0+deb8u1]\n- x-xss-protection: ['0']\n+ Access-Control-Allow-Origin: ['*']\n+ Cache-Control: ['no-cache, no-store, max-age=0, must-revalidate']\n+ Connection: [close]\n+ Content-Length: ['977']\n+ Content-Type: [application/json; charset=utf-8]\n+ Date: ['Sat, 25 Nov 2017 11:11:51 GMT']\n+ Expires: ['Fri, 01 Jan 1990 00:00:00 GMT']\n+ Pragma: [no-cache]\n+ Server: [Apache/2.4.10 (Debian)]\n+ X-Backend-Content-Length: ['10']\n+ X-Embedded-Status: ['200']\n+ X-Frame-Options: [SAMEORIGIN]\n+ X-Powered-By: [PHP/5.6.30-0+deb8u1]\n+ X-XSS-Protection: ['0']\n status: {code: 200, message: OK}\n version: 1\ndiff --git a/tests/test_cli.py b/tests/test_cli.py\ndeleted file mode 100644\n--- a/tests/test_cli.py\n+++ /dev/null\n@@ -1,19 +0,0 @@\n-#!/usr/bin/env python\n-# encoding: utf-8\n-from __future__ import unicode_literals\n-import subprocess\n-try:\n- from unittest import mock\n-except Exception:\n- import mock\n-\n-\n-def test_command_line_complete():\n- # This test is impossible mock direct because is a subprocess\n- expected = u'\u7897\u662f\u5728\u684c\u5b50\u4e0a\u3002'\n- subprocess.check_output = mock.create_autospec(subprocess.check_output, return_value=expected)\n- result = subprocess.check_output(\n- [\"./translate-cli\", '--from', 'en', '--to', 'zh-TW',\n- \"The\", \"book\", \"is\", \"on\", \"the\", \"table.\"]\n- )\n- assert expected == result\ndiff --git a/tests/test_main.py b/tests/test_main.py\n--- a/tests/test_main.py\n+++ b/tests/test_main.py\n@@ -1,6 +1,6 @@\n #!/usr/bin/env python\n # encoding: utf-8\n-import sys\n+from __future__ import unicode_literals\n \n from translate.main import main\n \n@@ -8,21 +8,18 @@\n \n \n @vcr.use_cassette\n-def test_main_take_zh_as_default_language():\n- sys.argv = ['hello', 'world']\n- result = main()\n- assert '\u4f60\u597d\uff0c\u4e16\u754c' == result\n+def test_main_language_to_translate_required(cli_runner):\n+ result = cli_runner.invoke(main, ['hello', 'world'], input='zh')\n+ assert 'Translate to []: zh\\n\u4f60\u597d\uff0c\u4e16\u754c\\n' == result.output\n \n \n @vcr.use_cassette\n-def test_main_to_language():\n- sys.argv = ['-t', 'zh-TW', 'love']\n- result = main()\n- assert '\u7231' == result\n+def test_main_to_language(cli_runner):\n+ result = cli_runner.invoke(main, ['-t', 'zh-TW', 'love'])\n+ assert '\u7231\\n' == result.output\n \n \n @vcr.use_cassette\n-def test_main_from_language():\n- sys.argv = ['--from', 'ja', '\u7f8e']\n- result = main()\n- assert '\u7f8e' == result\n+def test_main_from_language(cli_runner):\n+ result = cli_runner.invoke(main, ['--from', 'ja', '--to', 'zh', '\u7f8e'])\n+ assert '\u7f8e\\n' == result.output\ndiff --git a/tests/vcr_conf.py b/tests/vcr_conf.py\n--- a/tests/vcr_conf.py\n+++ b/tests/vcr_conf.py\n@@ -1,9 +1,4 @@\n #!/usr/bin/env python\n-# ----------------------------------------------------------------------------\n-# \"THE BEER-WARE LICENSE\" (Revision 42):\n-# <terry.yinzhe@gmail.com> wrote this file. As long as you retain this notice you\n-# can do whatever you want with this stuff. If we meet some day, and you think\n-# this stuff is worth it, you can buy me a beer in return to Terry Yin.\n import os\n \n from vcr import VCR\n", "problem_statement": "", "hints_text": "", "created_at": "2017-11-25T00:05:54Z"}
PythonDataset/test/wampy-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "noisyboiler/wampy", "pull_number": 61, "instance_id": "noisyboiler__wampy-61", "issue_numbers": "", "base_commit": "92f103784a49312584532931c7feffbca14f557c", "patch": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,14 +37,9 @@\n install_requires=[\n \"six==1.10.0\",\n \"simplejson==3.11.1\",\n+ \"gevent>=1.1\"\n ],\n extras_require={\n- ':python_version == \"2.7\"': [\n- \"eventlet<0.21.0\",\n- ],\n- ':python_version >= \"3\"': [\n- \"eventlet>=0.21.0\",\n- ],\n 'dev': [\n \"crossbar==0.15.0\",\n \"autobahn==0.17.2\",\ndiff --git a/wampy/__init__.py b/wampy/__init__.py\n--- a/wampy/__init__.py\n+++ b/wampy/__init__.py\n@@ -5,7 +5,7 @@\n # Set default logging handler to avoid \"No handler found\" warnings.\n import logging\n \n-import eventlet\n+import gevent.monkey\n \n \n try: # Python 2.7+\n@@ -21,5 +21,5 @@ def emit(self, record):\n root = logging.getLogger(__name__)\n root.addHandler(NullHandler())\n \n-root.warning('eventlet about to monkey patched your environment')\n-eventlet.monkey_patch()\n+root.warning('gevent about to monkey patched your environment')\n+gevent.monkey.patch_all()\ndiff --git a/wampy/session.py b/wampy/session.py\n--- a/wampy/session.py\n+++ b/wampy/session.py\n@@ -5,7 +5,8 @@\n import logging\n from functools import partial\n \n-import eventlet\n+import gevent\n+import gevent.queue\n \n from wampy.errors import ConnectionError, WampProtocolError\n from wampy.messages import MESSAGE_TYPE_MAP\n@@ -65,7 +66,7 @@ def __init__(self, client, router, connection, message_handler):\n # spawn a green thread to listen for incoming messages over\n # a connection and put them on a queue to be processed\n self._managed_thread = None\n- self._message_queue = eventlet.Queue()\n+ self._message_queue = gevent.queue.Queue()\n self._listen(self.connection, self._message_queue)\n \n @property\n@@ -114,8 +115,8 @@ def send_message(self, message_obj):\n \n def recv_message(self, timeout=5):\n try:\n- message = self._wait_for_message(timeout)\n- except eventlet.Timeout:\n+ message = self._message_queue.get(timeout=timeout)\n+ except gevent.queue.Empty:\n raise WampProtocolError(\n \"no message returned (timed-out in {})\".format(timeout)\n )\n@@ -163,33 +164,17 @@ def connection_handler():\n frame = connection.receive()\n if frame:\n message = frame.payload\n- handler = partial(\n- self.message_handler.handle_message,\n- message,\n- self.client\n- )\n- eventlet.spawn(handler)\n+ gevent.spawn(self.message_handler.handle_message,\n+ message, self.client)\n except (\n SystemExit, KeyboardInterrupt, ConnectionError,\n WampProtocolError,\n ):\n break\n \n- gthread = eventlet.spawn(connection_handler)\n+ gthread = gevent.spawn(connection_handler)\n self._managed_thread = gthread\n \n- def _wait_for_message(self, timeout):\n- q = self._message_queue\n-\n- with eventlet.Timeout(timeout):\n- while q.qsize() == 0:\n- # if the expected message is not there, switch context to\n- # allow other threads to continue working to fetch it for us\n- eventlet.sleep()\n-\n- message = q.get()\n- return message\n-\n def _subscribe_to_topic(self, handler, topic):\n message = Subscribe(topic=topic)\n request_id = message.request_id\ndiff --git a/wampy/transports/websocket/connection.py b/wampy/transports/websocket/connection.py\n--- a/wampy/transports/websocket/connection.py\n+++ b/wampy/transports/websocket/connection.py\n@@ -9,7 +9,7 @@\n from base64 import encodestring\n from socket import error as socket_error\n \n-import eventlet\n+import gevent\n \n from wampy.constants import WEBSOCKET_SUBPROTOCOLS, WEBSOCKET_VERSION\n from wampy.errors import (\n@@ -69,7 +69,7 @@ def receive(self, bufsize=1):\n \n try:\n bytes = self.socket.recv(bufsize)\n- except eventlet.greenlet.GreenletExit as exc:\n+ except gevent.greenlet.GreenletExit as exc:\n raise ConnectionError('Connection closed: \"{}\"'.format(exc))\n except socket.timeout as e:\n message = str(e)\n@@ -150,9 +150,9 @@ def _upgrade(self):\n self.socket.send(handshake.encode())\n \n try:\n- with eventlet.Timeout(5):\n+ with gevent.Timeout(5):\n self.status, self.headers = self._read_handshake_response()\n- except eventlet.Timeout:\n+ except gevent.Timeout:\n raise WampyError(\n 'No response after handshake \"{}\"'.format(handshake)\n )\n@@ -208,7 +208,7 @@ def read_line():\n while True:\n # we need this to guarantee we can context switch back to the\n # Timeout.\n- eventlet.sleep()\n+ gevent.sleep(0.01)\n \n received_bytes = read_line()\n if received_bytes == b'\\r\\n':\n", "test_patch": "diff --git a/test/helpers.py b/test/helpers.py\n--- a/test/helpers.py\n+++ b/test/helpers.py\n@@ -2,13 +2,13 @@\n # License, v. 2.0. If a copy of the MPL was not distributed with this\n # file, You can obtain one at http://mozilla.org/MPL/2.0/.\n \n-import eventlet\n+import gevent\n \n \n def assert_stops_raising(\n fn, exception_type=Exception, timeout=5, interval=0.1):\n \n- with eventlet.Timeout(timeout):\n+ with gevent.Timeout(timeout):\n while True:\n try:\n fn()\n@@ -16,4 +16,4 @@ def assert_stops_raising(\n pass\n else:\n return\n- eventlet.sleep(interval)\n+ gevent.sleep(interval)\ndiff --git a/wampy/testing/helpers.py b/wampy/testing/helpers.py\n--- a/wampy/testing/helpers.py\n+++ b/wampy/testing/helpers.py\n@@ -2,7 +2,7 @@\n # License, v. 2.0. If a copy of the MPL was not distributed with this\n # file, You can obtain one at http://mozilla.org/MPL/2.0/.\n \n-import eventlet\n+import gevent\n \n from wampy.message_handler import MessageHandler\n \n@@ -10,36 +10,36 @@\n \n \n def wait_for_subscriptions(client, number_of_subscriptions):\n- with eventlet.Timeout(TIMEOUT):\n+ with gevent.Timeout(TIMEOUT):\n while (\n len(client.session.subscription_map.keys())\n < number_of_subscriptions\n ):\n- eventlet.sleep()\n+ gevent.sleep(0.01)\n \n \n def wait_for_registrations(client, number_of_registrations):\n- with eventlet.Timeout(TIMEOUT):\n+ with gevent.Timeout(TIMEOUT):\n while (\n len(client.session.registration_map.keys())\n < number_of_registrations\n ):\n- eventlet.sleep()\n+ gevent.sleep(0.01)\n \n \n def wait_for_session(client):\n- with eventlet.Timeout(TIMEOUT):\n+ with gevent.Timeout(TIMEOUT):\n while client.session.id is None:\n- eventlet.sleep()\n+ gevent.sleep(0.01)\n \n \n def wait_for_messages(client, number_of_messages):\n messages_received = (\n client.session.message_handler.messages_received)\n \n- with eventlet.Timeout(TIMEOUT):\n+ with gevent.Timeout(TIMEOUT):\n while len(messages_received) < number_of_messages:\n- eventlet.sleep()\n+ gevent.sleep(0.01)\n \n return messages_received\n \n", "problem_statement": "", "hints_text": "", "created_at": "2018-03-07T06:36:26Z"}
PythonDataset/train/DingoLingo-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "Raptor123471/DingoLingo", "pull_number": 29, "instance_id": "Raptor123471__DingoLingo-29", "issue_numbers": "", "base_commit": "7f240327378e1aab157aed6ee9185674c15926ad", "patch": "diff --git a/config/config.py b/config/config.py\n--- a/config/config.py\n+++ b/config/config.py\n@@ -8,6 +8,8 @@\n \n SUPPORTED_EXTENSIONS = ('.webm', '.mp4', '.mp3', '.avi', '.wav', '.m4v', '.ogg', '.mov')\n \n+MAX_SONG_PRELOAD = 5 #maximum of 25\n+\n COOKIE_PATH = \"/config/cookies/cookies.txt\"\n \n STARTUP_MESSAGE = \"Starting Bot...\"\ndiff --git a/musicbot/audiocontroller.py b/musicbot/audiocontroller.py\n--- a/musicbot/audiocontroller.py\n+++ b/musicbot/audiocontroller.py\n@@ -1,5 +1,8 @@\n import discord\n-import youtube_dlc\n+import youtube_dl\n+\n+import asyncio\n+import concurrent.futures\n \n from musicbot import linkutils\n from musicbot import utils\n@@ -67,12 +70,12 @@ def next_song(self, error):\n async def play_song(self, song):\n \"\"\"Plays a song object\"\"\"\n \n- if song.origin == linkutils.Origins.Playlist:\n+ if song.info.title == None:\n if song.host == linkutils.Sites.Spotify:\n- conversion = await self.search_youtube(linkutils.convert_spotify(song.info.webpage_url))\n+ conversion = self.search_youtube(await linkutils.convert_spotify(song.info.webpage_url))\n song.info.webpage_url = conversion\n \n- downloader = youtube_dlc.YoutubeDL(\n+ downloader = youtube_dl.YoutubeDL(\n {'format': 'bestaudio', 'title': True, \"cookiefile\": config.COOKIE_PATH})\n r = downloader.extract_info(\n song.info.webpage_url, download=False)\n@@ -94,6 +97,11 @@ async def play_song(self, song):\n self.guild.voice_client.source)\n self.voice_client.source.volume = float(self.volume) / 100.0\n \n+ self.playlist.playque.popleft()\n+\n+ for song in list(self.playlist.playque)[:config.MAX_SONG_PRELOAD]:\n+ asyncio.ensure_future(self.preload(song))\n+\n async def process_song(self, track):\n \"\"\"Adds the track to the playlist instance and plays it, if it is the first song\"\"\"\n \n@@ -102,11 +110,9 @@ async def process_song(self, track):\n \n if is_playlist != linkutils.Playlist_Types.Unknown:\n \n- queue_scan = len(self.playlist.playque)\n-\n await self.process_playlist(is_playlist, track)\n \n- if queue_scan == 0:\n+ if self.current_song == None:\n await self.play_song(self.playlist.playque[0])\n print(\"Playing {}\".format(track))\n \n@@ -118,22 +124,22 @@ async def process_song(self, track):\n if linkutils.get_url(track) is not None:\n return None\n \n- track = await self.search_youtube(track)\n+ track = self.search_youtube(track)\n \n if host == linkutils.Sites.Spotify:\n- title = linkutils.convert_spotify(track)\n- track = await self.search_youtube(title)\n+ title = await linkutils.convert_spotify(track)\n+ track = self.search_youtube(title)\n \n if host == linkutils.Sites.YouTube:\n track = track.split(\"&list=\")[0]\n \n try:\n- downloader = youtube_dlc.YoutubeDL(\n+ downloader = youtube_dl.YoutubeDL(\n {'format': 'bestaudio', 'title': True, \"cookiefile\": config.COOKIE_PATH})\n r = downloader.extract_info(\n track, download=False)\n except:\n- downloader = youtube_dlc.YoutubeDL(\n+ downloader = youtube_dl.YoutubeDL(\n {'title': True, \"cookiefile\": config.COOKIE_PATH})\n r = downloader.extract_info(\n track, download=False)\n@@ -148,7 +154,7 @@ async def process_song(self, track):\n 'title'), duration=r.get('duration'), webpage_url=r.get('webpage_url'), thumbnail=thumbnail)\n \n self.playlist.add(song)\n- if len(self.playlist.playque) == 1:\n+ if self.current_song == None:\n print(\"Playing {}\".format(track))\n await self.play_song(song)\n \n@@ -171,7 +177,7 @@ async def process_playlist(self, playlist_type, url):\n \"cookiefile\": config.COOKIE_PATH\n }\n \n- with youtube_dlc.YoutubeDL(options) as ydl:\n+ with youtube_dl.YoutubeDL(options) as ydl:\n r = ydl.extract_info(url, download=False)\n \n for entry in r['entries']:\n@@ -185,7 +191,7 @@ async def process_playlist(self, playlist_type, url):\n self.playlist.add(song)\n \n if playlist_type == linkutils.Playlist_Types.Spotify_Playlist:\n- links = linkutils.get_spotify_playlist(url)\n+ links = await linkutils.get_spotify_playlist(url)\n for link in links:\n song = Song(linkutils.Origins.Playlist,\n linkutils.Sites.Spotify, webpage_url=link)\n@@ -196,7 +202,7 @@ async def process_playlist(self, playlist_type, url):\n 'format': 'bestaudio/best',\n 'extract_flat': True\n }\n- with youtube_dlc.YoutubeDL(options) as ydl:\n+ with youtube_dl.YoutubeDL(options) as ydl:\n r = ydl.extract_info(url, download=False)\n \n for entry in r['entries']:\n@@ -208,7 +214,38 @@ async def process_playlist(self, playlist_type, url):\n \n self.playlist.add(song)\n \n- async def search_youtube(self, title):\n+ for song in list(self.playlist.playque)[:config.MAX_SONG_PRELOAD]:\n+ asyncio.ensure_future(self.preload(song))\n+\n+ async def preload(self, song):\n+\n+ if song.info.title != None:\n+ return\n+\n+ def down(song):\n+\n+ if song.host == linkutils.Sites.Spotify:\n+ song.info.webpage_url = self.search_youtube(song.info.title)\n+\n+ downloader = youtube_dl.YoutubeDL(\n+ {'format': 'bestaudio', 'title': True, \"cookiefile\": config.COOKIE_PATH})\n+ r = downloader.extract_info(\n+ song.info.webpage_url, download=False)\n+ song.base_url = r.get('url')\n+ song.info.uploader = r.get('uploader')\n+ song.info.title = r.get('title')\n+ song.info.duration = r.get('duration')\n+ song.info.webpage_url = r.get('webpage_url')\n+ song.info.thumbnail = r.get('thumbnails')[0]['url']\n+\n+ if song.host == linkutils.Sites.Spotify:\n+ song.info.title = await linkutils.convert_spotify(song.info.webpage_url)\n+\n+ loop = asyncio.get_event_loop()\n+ executor = concurrent.futures.ThreadPoolExecutor(max_workers=config.MAX_SONG_PRELOAD)\n+ await asyncio.wait(fs={loop.run_in_executor(executor, down, song)}, return_when=asyncio.ALL_COMPLETED)\n+\n+ def search_youtube(self, title):\n \"\"\"Searches youtube for the video title and returns the first results video link\"\"\"\n \n # if title is already a link\n@@ -222,7 +259,7 @@ async def search_youtube(self, title):\n \"cookiefile\": config.COOKIE_PATH\n }\n \n- with youtube_dlc.YoutubeDL(options) as ydl:\n+ with youtube_dl.YoutubeDL(options) as ydl:\n r = ydl.extract_info(title, download=False)\n \n videocode = r['entries'][0]['id']\ndiff --git a/musicbot/commands/general.py b/musicbot/commands/general.py\n--- a/musicbot/commands/general.py\n+++ b/musicbot/commands/general.py\n@@ -115,7 +115,7 @@ async def _change_channel(self, ctx):\n async def _ping(self, ctx):\n await ctx.send(\"Pong\")\n \n- @commands.command(name='setting', description=config.HELP_SHUFFLE_LONG, help=config.HELP_SETTINGS_SHORT, aliases=['settings', 'set', 'st'])\n+ @commands.command(name='setting', description=config.HELP_SHUFFLE_LONG, help=config.HELP_SETTINGS_SHORT, aliases=['settings', 'set'])\n @has_permissions(administrator=True)\n async def _settings(self, ctx, *args):\n \ndiff --git a/musicbot/commands/music.py b/musicbot/commands/music.py\n--- a/musicbot/commands/music.py\n+++ b/musicbot/commands/music.py\n@@ -1,13 +1,14 @@\n import discord\n from discord.ext import commands\n \n+import asyncio\n+\n from musicbot import utils\n from musicbot import linkutils\n from config import config\n \n from musicbot.commands.general import General\n \n-import requests\n import datetime\n \n \n@@ -21,7 +22,7 @@ class Music(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n \n- @commands.command(name='play', description=config.HELP_YT_LONG, help=config.HELP_YT_SHORT, aliases=['p', 'yt', 'P', 'pl'])\n+ @commands.command(name='play', description=config.HELP_YT_LONG, help=config.HELP_YT_SHORT, aliases=['p', 'yt', 'pl'])\n async def _play_song(self, ctx, *, track: str):\n \n if(await utils.is_connected(ctx) == None):\n@@ -47,7 +48,7 @@ async def _play_song(self, ctx, *, track: str):\n \n if song.origin == linkutils.Origins.Default:\n \n- if len(audiocontroller.playlist.playque) == 1:\n+ if audiocontroller.current_song != None and len(audiocontroller.playlist.playque) == 0:\n await ctx.send(embed=song.info.format_output(config.SONGINFO_NOW_PLAYING))\n else:\n await ctx.send(embed=song.info.format_output(config.SONGINFO_QUEUE_ADDED))\n@@ -55,7 +56,7 @@ async def _play_song(self, ctx, *, track: str):\n elif song.origin == linkutils.Origins.Playlist:\n await ctx.send(config.SONGINFO_PLAYLIST_QUEUED)\n \n- @commands.command(name='loop', description=config.HELP_LOOP_LONG, help=config.HELP_LOOP_SHORT, aliases=['l', 'L'])\n+ @commands.command(name='loop', description=config.HELP_LOOP_LONG, help=config.HELP_LOOP_SHORT, aliases=['l'])\n async def _loop(self, ctx):\n \n current_guild = utils.get_guild(self.bot, ctx.message)\n@@ -93,6 +94,9 @@ async def _shuffle(self, ctx):\n audiocontroller.playlist.shuffle()\n await ctx.send(\"Shuffled queue :twisted_rightwards_arrows:\")\n \n+ for song in list(audiocontroller.playlist.playque)[:config.MAX_SONG_PRELOAD]:\n+ asyncio.ensure_future(audiocontroller.preload(song))\n+\n @commands.command(name='pause', description=config.HELP_PAUSE_LONG, help=config.HELP_PAUSE_SHORT)\n async def _pause(self, ctx):\n current_guild = utils.get_guild(self.bot, ctx.message)\n@@ -108,7 +112,7 @@ async def _pause(self, ctx):\n current_guild.voice_client.pause()\n await ctx.send(\"Playback Paused :pause_button:\")\n \n- @commands.command(name='queue', description=config.HELP_QUEUE_LONG, help=config.HELP_QUEUE_SHORT, aliases=['playlist', 'q', 'Q'])\n+ @commands.command(name='queue', description=config.HELP_QUEUE_LONG, help=config.HELP_QUEUE_SHORT, aliases=['playlist', 'q'])\n async def _queue(self, ctx):\n current_guild = utils.get_guild(self.bot, ctx.message)\n \n@@ -124,13 +128,17 @@ async def _queue(self, ctx):\n \n playlist = utils.guild_to_audiocontroller[current_guild].playlist\n \n+ #Embeds are limited to 25 fields\n+ if config.MAX_SONG_PRELOAD > 25:\n+ config.MAX_SONG_PRELOAD = 25\n+\n embed = discord.Embed(title=\":scroll: Queue [{}]\".format(\n len(playlist.playque)), color=config.EMBED_COLOR, inline=False)\n \n counter = 1\n- for song in list(playlist.playque)[:10]:\n+ for song in list(playlist.playque)[:config.MAX_SONG_PRELOAD]:\n if song.info.title is None:\n- embed.add_field(name=\"{}.\".format(str(counter)), value=\"[(PL) | {}]({})\".format(\n+ embed.add_field(name=\"{}.\".format(str(counter)), value=\"[{}]({})\".format(\n song.info.webpage_url, song.info.webpage_url), inline=False)\n else:\n embed.add_field(name=\"{}.\".format(str(counter)), value=\"[{}]({})\".format(\n@@ -139,7 +147,7 @@ async def _queue(self, ctx):\n \n await ctx.send(embed=embed)\n \n- @commands.command(name='stop', description=config.HELP_STOP_LONG, help=config. HELP_STOP_SHORT)\n+ @commands.command(name='stop', description=config.HELP_STOP_LONG, help=config. HELP_STOP_SHORT, aliases=['st'])\n async def _stop(self, ctx):\n current_guild = utils.get_guild(self.bot, ctx.message)\n \n@@ -154,7 +162,7 @@ async def _stop(self, ctx):\n await utils.guild_to_audiocontroller[current_guild].stop_player()\n await ctx.send(\"Stopped all sessions :octagonal_sign:\")\n \n- @commands.command(name='skip', description=config.HELP_SKIP_LONG, help=config.HELP_SKIP_SHORT, aliases=['s', 'S'])\n+ @commands.command(name='skip', description=config.HELP_SKIP_LONG, help=config.HELP_SKIP_SHORT, aliases=['s'])\n async def _skip(self, ctx):\n current_guild = utils.get_guild(self.bot, ctx.message)\n \ndiff --git a/musicbot/linkutils.py b/musicbot/linkutils.py\n--- a/musicbot/linkutils.py\n+++ b/musicbot/linkutils.py\n@@ -1,4 +1,4 @@\n-import requests\n+import aiohttp\n import re\n from bs4 import BeautifulSoup\n from enum import Enum\n@@ -15,6 +15,12 @@\n except:\n api = False\n \n+url_regex = re.compile(\n+ \"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\")\n+\n+session = aiohttp.ClientSession(\n+ headers={'User-Agent': 'python-requests/2.20.0'})\n+\n \n def clean_sclink(track):\n if track.startswith(\"https://m.\"):\n@@ -24,25 +30,25 @@ def clean_sclink(track):\n return track\n \n \n-def convert_spotify(url):\n- regex = re.compile(\n- \"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\")\n+async def convert_spotify(url):\n \n- if re.search(regex, url):\n- result = regex.search(url)\n+ if re.search(url_regex, url):\n+ result = url_regex.search(url)\n url = result.group(0)\n \n- page = requests.get(url)\n- soup = BeautifulSoup(page.content, 'html.parser')\n+ async with session.get(url) as response:\n \n- title = soup.find('title')\n- title = title.string\n- title = title.replace(', a song by', '').replace(' on Spotify', '')\n+ page = await response.text()\n+ soup = BeautifulSoup(page, 'html.parser')\n+\n+ title = soup.find('title')\n+ title = title.string\n+ title = title.replace('Spotify \u2013 ', '')\n \n- return title\n+ return title\n \n \n-def get_spotify_playlist(url):\n+async def get_spotify_playlist(url):\n \"\"\"Return Spotify_Playlist class\"\"\"\n \n code = url.split('/')[4].split('?')[0]\n@@ -92,11 +98,10 @@ def get_spotify_playlist(url):\n if config.SPOTIFY_ID != \"\" or config.SPOTIFY_SECRET != \"\":\n print(\"ERROR: Check spotify CLIENT_ID and SECRET\")\n \n- headers = {\n- \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36\"}\n+ async with session.get(url) as response:\n+ page = await response.text()\n \n- page = requests.get(url, headers=headers)\n- soup = BeautifulSoup(page.content, 'html.parser')\n+ soup = BeautifulSoup(page, 'html.parser')\n \n results = soup.find_all(property=\"music:song\", attrs={\"content\": True})\n \n@@ -157,7 +162,7 @@ def identify_url(url):\n if \"https://open.spotify.com/track\" in url:\n return Sites.Spotify\n \n- if \"https://open.spotify.com/playlist\"in url or \"https://open.spotify.com/album\" in url:\n+ if \"https://open.spotify.com/playlist\" in url or \"https://open.spotify.com/album\" in url:\n return Sites.Spotify_Playlist\n \n if \"bandcamp.com/track/\" in url:\n@@ -183,7 +188,7 @@ def identify_playlist(url):\n if \"playlist?list=\" in url:\n return Playlist_Types.YouTube_Playlist\n \n- if \"https://open.spotify.com/playlist\"in url or \"https://open.spotify.com/album\" in url:\n+ if \"https://open.spotify.com/playlist\" in url or \"https://open.spotify.com/album\" in url:\n return Playlist_Types.Spotify_Playlist\n \n if \"bandcamp.com/album/\" in url:\ndiff --git a/musicbot/playlist.py b/musicbot/playlist.py\n--- a/musicbot/playlist.py\n+++ b/musicbot/playlist.py\n@@ -33,7 +33,7 @@ def next(self):\n if len(self.playque) == 0:\n return None\n \n- song_played = self.playque.popleft()\n+ song_played = self.playque[0]\n \n if self.loop == True:\n if song_played != \"Dummy\":\ndiff --git a/musicbot/songinfo.py b/musicbot/songinfo.py\n--- a/musicbot/songinfo.py\n+++ b/musicbot/songinfo.py\n@@ -1,6 +1,7 @@\n import discord\n from discord.ext import commands\n from config import config\n+import datetime\n \n \n class Song():\n@@ -22,8 +23,7 @@ def __init__(self, uploader, title, duration, webpage_url, thumbnail):\n \n def format_output(self, playtype):\n \n- embed = discord.Embed(title=\":musical_note: __**{}**__ :musical_note:\".format(\n- self.title), description=\"***{}***\".format(playtype), url=self.webpage_url, color=config.EMBED_COLOR)\n+ embed = discord.Embed(title=playtype, description=\"[{}]({})\".format(self.title, self.webpage_url), color=config.EMBED_COLOR)\n \n if self.thumbnail is not None:\n embed.set_thumbnail(url=self.thumbnail)\n@@ -31,11 +31,9 @@ def format_output(self, playtype):\n embed.add_field(name=config.SONGINFO_UPLOADER,\n value=self.uploader, inline=False)\n \n- print(self.duration)\n-\n if self.duration is not None:\n embed.add_field(name=config.SONGINFO_DURATION,\n- value=\"{}{}\".format(self.duration, config.SONGINFO_SECONDS), inline=False)\n+ value=\"{}\".format(str(datetime.timedelta(seconds=self.duration))), inline=False)\n else:\n embed.add_field(name=config.SONGINFO_DURATION,\n value=config.SONGINFO_UNKNOWN_DURATION , inline=False)\ndiff --git a/musicbot/utils.py b/musicbot/utils.py\n--- a/musicbot/utils.py\n+++ b/musicbot/utils.py\n@@ -49,7 +49,6 @@ async def connect_to_channel(guild, dest_channel_name, ctx, switch=False, defaul\n async def is_connected(ctx):\n try:\n voice_channel = ctx.guild.voice_client.channel\n- print(voice_channel)\n return voice_channel\n except:\n return None\ndiff --git a/run.py b/run.py\n--- a/run.py\n+++ b/run.py\n@@ -13,7 +13,7 @@\n \n initial_extensions = ['musicbot.commands.music',\n 'musicbot.commands.general', 'musicbot.plugins.button']\n-bot = commands.Bot(command_prefix=config.BOT_PREFIX, pm_help=True)\n+bot = commands.Bot(command_prefix=config.BOT_PREFIX, pm_help=True, case_insensitive=True)\n \n \n if __name__ == '__main__':\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2021-03-21T02:38:53Z"}
PythonDataset/train/Mathics-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "mathics/Mathics", "pull_number": 863, "instance_id": "mathics__Mathics-863", "issue_numbers": "", "base_commit": "b32f7f844a4a9c62662da0a8dbab8fa72222e6c8", "patch": "diff --git a/mathics/builtin/graphs.py b/mathics/builtin/graphs.py\n--- a/mathics/builtin/graphs.py\n+++ b/mathics/builtin/graphs.py\n@@ -5,7 +5,7 @@\n Graphs\n \"\"\"\n \n-# uses GraphViz, if it's installed in your PATH (see pydotplus.graphviz.find_graphviz and http://www.graphviz.org).\n+# uses GraphViz, if it's installed in your PATH (see pydot.graphviz.find_graphviz and http://www.graphviz.org).\n \n from __future__ import unicode_literals\n from __future__ import absolute_import\n@@ -43,14 +43,13 @@ def _shell_layout(G):\n \n def _generic_layout(G, warn):\n try:\n- import pydotplus\n-\n- if pydotplus.graphviz.find_graphviz():\n- return nx.nx_pydot.graphviz_layout(G, prog='dot')\n+ import pydot\n except ImportError:\n pass\n+ else:\n+ return nx.nx_pydot.graphviz_layout(G, prog='dot')\n \n- warn('Could not find pydotplus/dot; graph layout quality might be low.')\n+ warn('Could not find pydot; graph layout quality might be low.')\n return nx.drawing.fruchterman_reingold_layout(G, pos=None, k=1.0)\n \n \n@@ -70,7 +69,10 @@ def _path_layout(G, root):\n \n if not neighbors:\n break\n- v = next(neighbors) if isgenerator(neighbors) else neighbors[0]\n+ try:\n+ v = next(neighbors) if isgenerator(neighbors) else neighbors[0]\n+ except StopIteration:\n+ break\n neighbors = G.neighbors(v)\n \n if k == 0:\n@@ -162,7 +164,7 @@ def _pos_into_box(vertices, pos, min_box, max_box):\n \n zx = (x0 + x1) / 2\n zy = (y0 + y1) / 2\n- s = 1.0 / max((x1 - x0) / dx, (y1 - y0) / dy)\n+ s = 1.0 / max(max(x1 - x0, 1) / dx, (max(y1 - y0, 1)) / dy)\n for k, p in pos.items():\n x, y = p\n new_pos[k] = (cx + (x - zx) * s, cy + (y - zy) * s)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -69,9 +69,10 @@\n \"sympy>=1.6, < 1.7\",\n \"django >= 1.8, < 1.12\",\n \"mpmath>=1.1.0\",\n+ \"palettable\", # For bar charts, and portable, no-proprietary color palletes\n+ \"pydot\", # For graphs\n \"python-dateutil\",\n \"colorama\",\n- \"palettable\",\n ]\n \n if not ((not is_PyPy and sys.version_info >= (3, 8)) or (is_PyPy and sys.version_info >= (3, 6))):\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2020-09-12T01:57:29Z"}
PythonDataset/train/ProjectAlice-task-instances.jsonl.all ADDED
The diff for this file is too large to render. See raw diff
 
PythonDataset/train/RESTKnot-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "BiznetGIO/RESTKnot", "pull_number": 126, "instance_id": "BiznetGIO__RESTKnot-126", "issue_numbers": "", "base_commit": "068b0616f0a7b839449138cbebf6f53471b2b2da", "patch": "diff --git a/agent/dnsagent/cli.py b/agent/dnsagent/cli.py\ndeleted file mode 100644\n--- a/agent/dnsagent/cli.py\n+++ /dev/null\n@@ -1,69 +0,0 @@\n-\"\"\"\n-Usage:\n- dnsagent <command> [<args>...]\n-\n-Options:\n- -h, --help display this help and exit\n- -v, --version Print version information and quit\n-\n-Commands:\n- zone Zone Record Configuration Zone Command\n- command CZone onfiguration Command\n- start Starting Agent\n-\n-Run 'dnsagent COMMAND --help' for more information on a command.\n-\"\"\"\n-import logging\n-import os\n-import sys\n-from inspect import getmembers, isclass\n-from logging.handlers import RotatingFileHandler\n-\n-from docopt import docopt\n-import dnsagent.clis\n-from dnsagent import __version__ as VERSION\n-\n-\n-def configure_logger():\n- stdout_handler = logging.StreamHandler(sys.stdout)\n- stdout_format = logging.Formatter(\n- \"[%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s\"\n- )\n- stdout_handler.setFormatter(stdout_format)\n- stdout_handler.setLevel(logging.INFO)\n-\n- root = logging.getLogger()\n- root.addHandler(stdout_handler)\n- root.setLevel(logging.DEBUG)\n-\n-\n-def main():\n- \"\"\"Main CLI entrypoint.\"\"\"\n- configure_logger()\n-\n- options = docopt(__doc__, version=VERSION, options_first=True)\n- command_name = \"\"\n- args = \"\"\n- command_class = \"\"\n-\n- command_name = options.pop(\"<command>\")\n- args = options.pop(\"<args>\")\n-\n- if args is None:\n- args = {}\n-\n- try:\n- module = getattr(dnsagent.clis, command_name)\n- dnsagent.clis = getmembers(module, isclass)\n- command_class = [\n- command[1] for command in dnsagent.clis if command[0] != \"Base\"\n- ][0]\n- except AttributeError as e:\n- raise ValueError(f\"{e}\")\n-\n- command = command_class(options, args)\n- command.execute()\n-\n-\n-if __name__ == \"__main__\":\n- main()\ndiff --git a/agent/dnsagent/clis/__init__.py b/agent/dnsagent/clis/__init__.py\ndeleted file mode 100644\n--- a/agent/dnsagent/clis/__init__.py\n+++ /dev/null\n@@ -1 +0,0 @@\n-from .start import *\ndiff --git a/agent/dnsagent/clis/base.py b/agent/dnsagent/clis/base.py\ndeleted file mode 100644\n--- a/agent/dnsagent/clis/base.py\n+++ /dev/null\n@@ -1,19 +0,0 @@\n-from docopt import docopt\n-\n-\n-class Base(object):\n- \"\"\"Base class for the commands\"\"\"\n-\n- def __init__(self, options, command_args):\n- \"\"\"\n- Initialize the commands.\n-\n- :param command_args: arguments of the command\n- \"\"\"\n- self.options = options\n- self.args = docopt(self.__doc__, argv=command_args)\n-\n- def execute(self):\n- \"\"\"Execute the commands\"\"\"\n-\n- raise NotImplementedError\ndiff --git a/agent/dnsagent/clis/start.py b/agent/dnsagent/clis/start.py\ndeleted file mode 100644\n--- a/agent/dnsagent/clis/start.py\n+++ /dev/null\n@@ -1,62 +0,0 @@\n-import os\n-import logging\n-\n-from dnsagent.clis.base import Base\n-from dnsagent.libs import kafka as kafka_lib\n-from dnsagent.libs import knot as knot_lib\n-\n-\n-logger = logging.getLogger(__name__)\n-\n-\n-class Start(Base):\n- \"\"\"\n- usage:\n- start\n-\n- Command :\n-\n- Options:\n- -h --help Print usage\n- \"\"\"\n-\n- def connect_kafka(self):\n- broker_host = os.environ.get(\"RESTKNOT_KAFKA_BROKER\")\n- broker_port = os.environ.get(\"RESTKNOT_KAFKA_PORTS\")\n- broker = f\"{broker_host}:{broker_port}\"\n- topic = os.environ.get(\"RESTKNOT_KAFKA_TOPIC\")\n-\n- if (broker_host and broker_port) is None:\n- logger.info(\"Can't find kafka host and port\")\n- exit()\n-\n- try:\n- logger.info(\"Connecting to broker : \" + broker)\n- consumer = kafka_lib.get_kafka_consumer(broker, topic)\n- return consumer\n- except Exception as e:\n- logger.info(f\"Can't Connect to broker: {e}\")\n- exit()\n-\n- def take_message(self, consumer):\n- agent_type = os.environ.get(\"RESTKNOT_AGENT_TYPE\")\n-\n- try:\n- for message in consumer:\n- message = message.value\n-\n- agent_type_msg = message[\"agent\"][\"agent_type\"]\n- if agent_type in agent_type_msg:\n-\n- knot_queries = message[\"knot\"]\n- for query in knot_queries:\n- knot_lib.execute(query)\n-\n- consumer.close()\n-\n- except KeyboardInterrupt:\n- print(\"Stopping dnsagent. Press Ctrl+C again to exit\")\n-\n- def execute(self):\n- consumer = self.connect_kafka()\n- self.take_message(consumer)\ndiff --git a/agent/dnsagent/libs/kafka.py b/agent/dnsagent/libs/kafka.py\ndeleted file mode 100644\n--- a/agent/dnsagent/libs/kafka.py\n+++ /dev/null\n@@ -1,16 +0,0 @@\n-from kafka import KafkaConsumer\n-from json import loads\n-\n-\n-def get_kafka_consumer(broker, topic):\n- try:\n- consumer = KafkaConsumer(\n- topic,\n- bootstrap_servers=[broker],\n- auto_offset_reset=\"earliest\",\n- enable_auto_commit=True,\n- value_deserializer=lambda x: loads(x.decode(\"utf-8\")),\n- )\n- return consumer\n- except Exception as e:\n- raise ValueError(f\"{e}\")\ndiff --git a/agent/dnsagent/start.py b/agent/dnsagent/start.py\nnew file mode 100644\n--- /dev/null\n+++ b/agent/dnsagent/start.py\n@@ -0,0 +1,74 @@\n+import os\n+import logging\n+import json\n+import sys\n+\n+from confluent_kafka import Consumer, KafkaException\n+\n+from dnsagent.libs import knot as knot_lib\n+\n+\n+def consume():\n+ brokers = os.environ.get(\"RESTKNOT_KAFKA_BROKERS\")\n+ topic = os.environ.get(\"RESTKNOT_KAFKA_TOPIC\")\n+ group_id = os.environ.get(\"RESTKNOT_KAFKA_GROUP_ID\")\n+ agent_type = os.environ.get(\"RESTKNOT_AGENT_TYPE\")\n+\n+ conf = {\n+ \"bootstrap.servers\": brokers,\n+ \"group.id\": group_id,\n+ \"auto.offset.reset\": \"earliest\",\n+ \"enable.auto.commit\": True,\n+ }\n+\n+ def print_assignment(consumer, partitions):\n+ print(\"Consumer assigned to:\", partitions)\n+\n+ consumer = Consumer(conf)\n+ consumer.subscribe([topic], on_assign=print_assignment)\n+\n+ try:\n+ while True:\n+ message = consumer.poll(timeout=1.0)\n+ if message is None:\n+ continue\n+ if message.error():\n+ raise KafkaException(message.error())\n+\n+ message = message.value()\n+ message = json.loads(message.decode(\"utf-8\"))\n+\n+ agent_type_msg = message[\"agent\"][\"agent_type\"]\n+ if agent_type in agent_type_msg:\n+\n+ knot_queries = message[\"knot\"]\n+ for query in knot_queries:\n+ knot_lib.execute(query)\n+\n+ except KeyboardInterrupt:\n+ print(\" dnsagent stopped. Aborted by user\")\n+ finally:\n+ # Close down consumer to commit final offsets.\n+ consumer.close()\n+\n+\n+def configure_logger():\n+ stdout_handler = logging.StreamHandler(sys.stdout)\n+ stdout_format = logging.Formatter(\n+ \"[%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s\"\n+ )\n+ stdout_handler.setFormatter(stdout_format)\n+ stdout_handler.setLevel(logging.INFO)\n+\n+ root = logging.getLogger()\n+ root.addHandler(stdout_handler)\n+ root.setLevel(logging.DEBUG)\n+\n+\n+def main():\n+ configure_logger()\n+ consume()\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/agent/setup.py b/agent/setup.py\n--- a/agent/setup.py\n+++ b/agent/setup.py\n@@ -36,5 +36,5 @@\n include_package_data=True,\n packages=[\"dnsagent\"],\n install_requires=requirements,\n- entry_points={\"console_scripts\": [\"dnsagent = dnsagent.cli:main\"]},\n+ entry_points={\"console_scripts\": [\"dnsagent = dnsagent.start:main\"]},\n )\ndiff --git a/api/app/controllers/api/__init__.py b/api/app/controllers/api/__init__.py\n--- a/api/app/controllers/api/__init__.py\n+++ b/api/app/controllers/api/__init__.py\n@@ -9,6 +9,7 @@\n GetDomainDataId,\n )\n from .health import HealthCheck\n+from .meta import MetaVersion, MetaConfig\n from .record import GetRecordData, GetRecordDataId, RecordAdd, RecordDelete, RecordEdit\n from .ttl import GetTtlData, GetTtlDataId, TtlAdd, TtlDelete, TtlEdit\n from .type_ import GetTypeData, GetTypeDataId, TypeAdd, TypeDelete, TypeEdit\n@@ -19,6 +20,8 @@\n \n \n api.add_resource(HealthCheck, \"/health\")\n+api.add_resource(MetaVersion, \"/meta/version\")\n+api.add_resource(MetaConfig, \"/meta/config\")\n \n api.add_resource(GetRecordData, \"/record/list\")\n api.add_resource(GetRecordDataId, \"/record/list/<record_id>\")\ndiff --git a/api/app/controllers/api/health.py b/api/app/controllers/api/health.py\n--- a/api/app/controllers/api/health.py\n+++ b/api/app/controllers/api/health.py\n@@ -1,12 +1,11 @@\n from flask_restful import Resource\n \n from app.vendors.rest import response\n-from app.helpers import helpers\n \n \n class HealthCheck(Resource):\n def get(self):\n- build = helpers.read_version(\"requirements.txt\", \"build-version.txt\")\n-\n- data = {\"status\": \"running\", \"build\": build}\n+ data = {\n+ \"status\": \"running\",\n+ }\n return response(200, data=data, message=\"OK\")\ndiff --git a/api/app/controllers/api/meta.py b/api/app/controllers/api/meta.py\nnew file mode 100644\n--- /dev/null\n+++ b/api/app/controllers/api/meta.py\n@@ -0,0 +1,24 @@\n+from flask_restful import Resource\n+\n+from app.vendors.rest import response\n+from app.helpers import helpers\n+from app.middlewares import auth\n+\n+\n+class MetaVersion(Resource):\n+ def get(self):\n+ build = helpers.read_version(\"requirements.txt\", \"build-version.txt\")\n+\n+ data = {\"build\": build}\n+ return response(200, data=data, message=\"OK\")\n+\n+\n+class MetaConfig(Resource):\n+ @auth.auth_required\n+ def get(self):\n+ config = helpers.get_config()\n+ brokers = config[\"brokers\"]\n+ clusters = config[\"knot_servers\"]\n+\n+ data = {\"knot_servers\": clusters, \"brokers\": brokers}\n+ return response(200, data=data, message=\"OK\")\ndiff --git a/api/app/helpers/command.py b/api/app/helpers/command.py\n--- a/api/app/helpers/command.py\n+++ b/api/app/helpers/command.py\n@@ -1,10 +1,7 @@\n import json\n-import os\n-import pathlib\n-\n-import yaml\n \n from app.helpers import producer\n+from app.helpers import helpers\n from app.models import model\n \n \n@@ -139,30 +136,14 @@ def set_default_zone(record_ids):\n producer.send(message)\n \n \n-def cluster_file():\n- \"\"\"Return cluster file path.\"\"\"\n- path = os.environ.get(\"RESTKNOT_CLUSTER_FILE\")\n- if not path:\n- current_path = pathlib.Path(__file__)\n- path = current_path.parents[2].joinpath(\"servers.yml\")\n-\n- is_exists = os.path.exists(path)\n- if is_exists:\n- return path\n- else:\n- raise ValueError(f\"Clustering File Not Found: {path}\")\n-\n-\n-def get_clusters():\n- \"\"\"Return cluster file content.\"\"\"\n- file_ = cluster_file()\n- clusters = yaml.safe_load(open(file_))\n- return clusters\n-\n-\n def delegate(zone, zone_id, command, agent_type):\n \"\"\"Send delegation config command with JSON structure to broker.\"\"\"\n- clusters = get_clusters()\n+ config = helpers.get_config()\n+ try:\n+ clusters = config[\"knot_servers\"]\n+ except KeyError:\n+ raise ValueError(\"Can't Knot server list in config\")\n+\n cluster = clusters[agent_type]\n \n # default for master\ndiff --git a/api/app/helpers/helpers.py b/api/app/helpers/helpers.py\n--- a/api/app/helpers/helpers.py\n+++ b/api/app/helpers/helpers.py\n@@ -1,7 +1,10 @@\n import datetime\n import pathlib\n+import os\n from functools import wraps\n \n+import yaml\n+\n from app.helpers import producer\n from app.vendors.rest import response\n \n@@ -97,3 +100,24 @@ def read_version(other_file_name, filename):\n version = \"__UNKNOWN__\"\n \n return version\n+\n+\n+def config_file():\n+ \"\"\"Return config file path.\"\"\"\n+ path = os.environ.get(\"RESTKNOT_CONFIG_FILE\")\n+ if not path:\n+ current_path = pathlib.Path(__file__)\n+ path = current_path.parents[2].joinpath(\"config.yml\")\n+\n+ is_exists = os.path.exists(path)\n+ if is_exists:\n+ return path\n+ else:\n+ raise ValueError(f\"Config File Not Found: {path}\")\n+\n+\n+def get_config():\n+ \"\"\"Return config file content.\"\"\"\n+ file_ = config_file()\n+ config = yaml.safe_load(open(file_))\n+ return config\ndiff --git a/api/app/helpers/producer.py b/api/app/helpers/producer.py\n--- a/api/app/helpers/producer.py\n+++ b/api/app/helpers/producer.py\n@@ -2,33 +2,43 @@\n import os\n \n from flask import current_app\n-from kafka import KafkaProducer\n+from confluent_kafka import Producer\n+\n+from app.helpers import helpers\n \n \n def kafka_producer():\n \"\"\"Create Kafka producer.\"\"\"\n- host = os.environ.get(\"KAFKA_HOST\")\n- port = os.environ.get(\"KAFKA_PORT\")\n- broker = f\"{host}:{port}\"\n-\n- producer = KafkaProducer(\n- bootstrap_servers=[broker],\n- value_serializer=lambda m: json.dumps(m).encode(\"utf-8\"),\n- )\n+ config = helpers.get_config()\n+ try:\n+ brokers = config[\"brokers\"]\n+ except KeyError:\n+ raise ValueError(\"Can't find brokers list in config\")\n+\n+ brokers = \",\".join(brokers)\n+ conf = {\"bootstrap.servers\": brokers}\n+ producer = Producer(**conf)\n return producer\n \n \n+def _delivery_report(err, msg):\n+ if err is not None:\n+ raise ValueError(f\"Message delivery failed: {err}\")\n+\n+\n def send(message):\n \"\"\"Send given message to Kafka broker.\"\"\"\n producer = None\n try:\n producer = kafka_producer()\n topic = os.environ.get(\"RESTKNOT_KAFKA_TOPIC\")\n- producer.send(topic, message)\n- producer.flush()\n+ encoded_message = json.dumps(message).encode(\"utf-8\")\n+ producer.produce(topic, encoded_message, callback=_delivery_report)\n except Exception as e:\n current_app.logger.error(f\"{e}\")\n raise ValueError(f\"{e}\")\n- finally:\n- if producer:\n- producer.close()\n+\n+ # Serve delivery callback queue.\n+ producer.poll(0)\n+ # Wait until all messages have been delivered\n+ producer.flush()\ndiff --git a/api/gunicorn.conf.py b/api/gunicorn.conf.py\nnew file mode 100644\n--- /dev/null\n+++ b/api/gunicorn.conf.py\n@@ -0,0 +1,13 @@\n+import os\n+import multiprocessing\n+\n+\n+def max_workers():\n+ return multiprocessing.cpu_count() * 2 + 1\n+\n+\n+host = os.environ.get(\"APP_HOST\", \"0.0.0.0\")\n+port = os.environ.get(\"APP_PORT\", \"8000\")\n+\n+bind = f\"{host}:{port}\"\n+workers = max_workers()\n", "test_patch": "diff --git a/api/tests/integration/conftest.py b/api/tests/integration/conftest.py\n--- a/api/tests/integration/conftest.py\n+++ b/api/tests/integration/conftest.py\n@@ -19,7 +19,7 @@ def clean_users():\n @pytest.fixture\n def client():\n current_path = pathlib.Path(__file__)\n- dotenv_path = current_path.parents[2].joinpath(\".env.example\")\n+ dotenv_path = current_path.parents[2].joinpath(\".example.env\")\n load_dotenv(dotenv_path)\n \n app = create_app()\n", "problem_statement": "", "hints_text": "", "created_at": "2021-04-14T10:12:08Z"}
PythonDataset/train/SempoBlockchain-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "teamsempo/SempoBlockchain", "pull_number": 130, "instance_id": "teamsempo__SempoBlockchain-130", "issue_numbers": "", "base_commit": "0b6ca1cac5009818a00ede1f973ddb8b8d8025a7", "patch": "diff --git a/app/server/__init__.py b/app/server/__init__.py\n--- a/app/server/__init__.py\n+++ b/app/server/__init__.py\n@@ -7,7 +7,8 @@\n from pusher import Pusher\n import boto3\n from twilio.rest import Client as TwilioClient\n-from raven.contrib.flask import Sentry\n+import sentry_sdk\n+from sentry_sdk.integrations.flask import FlaskIntegration\n import messagebird\n import africastalking\n from datetime import datetime\n@@ -85,13 +86,13 @@ def enable_form_raw_cache():\n return make_response(jsonify({'message': 'Payload too large'})), 413\n request.get_data(parse_form_data=False, cache=True)\n \n- if not config.IS_TEST:\n- sentry.init_app(app, dsn=app.config['SENTRY_SERVER_DSN'])\n # limiter.init_app(app)\n \n CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n \n celery_app.conf.update(app.config)\n+ if not config.IS_TEST:\n+ sentry_sdk.init(app.config['SENTRY_SERVER_DSN'], integrations=[FlaskIntegration()], release=config.VERSION)\n \n print('celery joined on {} at {}'.format(\n app.config['REDIS_URL'], datetime.utcnow()))\n@@ -125,7 +126,7 @@ def after_request(response):\n try:\n task.delay()\n except Exception as e:\n- sentry.captureException()\n+ sentry_sdk.capture_exception(e)\n \n return response\n \n@@ -213,7 +214,6 @@ def encrypt_string(raw_string):\n })\n \n basic_auth = BasicAuth()\n-sentry = Sentry()\n \n # limiter = Limiter(key_func=get_remote_address, default_limits=[\"20000 per day\", \"2000 per hour\"])\n \ndiff --git a/app/server/api/auth_api.py b/app/server/api/auth_api.py\n--- a/app/server/api/auth_api.py\n+++ b/app/server/api/auth_api.py\n@@ -1,6 +1,7 @@\n from flask import Blueprint, request, make_response, jsonify, g, current_app\n from flask.views import MethodView\n-from server import db, sentry\n+import sentry_sdk\n+from server import db\n # from server import limiter\n from phonenumbers.phonenumberutil import NumberParseException\n from server.models.user import User\n@@ -436,9 +437,7 @@ def post(self):\n return make_response(jsonify(response_object)), 200\n \n except Exception as e:\n-\n- sentry.captureException()\n-\n+ sentry_sdk.capture_exception(e)\n raise e\n \n # response_object = {\ndiff --git a/app/server/api/credit_transfer_api.py b/app/server/api/credit_transfer_api.py\n--- a/app/server/api/credit_transfer_api.py\n+++ b/app/server/api/credit_transfer_api.py\n@@ -3,6 +3,7 @@\n from sqlalchemy import or_\n from functools import partial\n import json\n+import sentry_sdk\n from server import db\n from server.models.token import Token\n from server.models.utils import paginate_query\n@@ -28,7 +29,6 @@ class CreditTransferAPI(MethodView):\n \n @requires_auth(allowed_roles={'ADMIN': 'any'})\n def get(self, credit_transfer_id):\n-\n transfer_account_ids = request.args.get('transfer_account_ids')\n transfer_type = request.args.get('transfer_type', 'ALL')\n get_transfer_stats = request.args.get('get_stats', False)\ndiff --git a/app/server/api/kyc_application_api.py b/app/server/api/kyc_application_api.py\n--- a/app/server/api/kyc_application_api.py\n+++ b/app/server/api/kyc_application_api.py\n@@ -1,6 +1,7 @@\n+import sentry_sdk\n from flask import Blueprint, request, make_response, jsonify, g\n from flask.views import MethodView\n-from server import db, sentry\n+from server import db\n from server.models.bank_account import BankAccount\n from server.models.kyc_application import KycApplication\n from server.models.upload import UploadedResource\n@@ -45,7 +46,7 @@ def handle_kyc_documents(data=None,document_country=None,document_type=None,kyc_\n uploaded_document.kyc_application_id = kyc_details.id\n except Exception as e:\n print(e)\n- sentry.captureException()\n+ sentry_sdk.capture_exception(e)\n pass\n \n \ndiff --git a/app/server/models/ip_address.py b/app/server/models/ip_address.py\n--- a/app/server/models/ip_address.py\n+++ b/app/server/models/ip_address.py\n@@ -1,4 +1,5 @@\n-from server import db, sentry, celery_app, mt\n+import sentry_sdk\n+from server import db, celery_app, mt\n from sqlalchemy.ext.hybrid import hybrid_property\n from sqlalchemy.dialects.postgresql import INET\n \n@@ -37,5 +38,5 @@ def ip(self, ip):\n mt.set_ip_location(self.id, ip)\n except Exception as e:\n print(e)\n- sentry.captureException()\n+ sentry_sdk.captureException(e)\n pass\ndiff --git a/app/server/models/user.py b/app/server/models/user.py\n--- a/app/server/models/user.py\n+++ b/app/server/models/user.py\n@@ -12,8 +12,9 @@\n import jwt\n import random\n import string\n+import sentry_sdk\n \n-from server import db, sentry, celery_app, bt\n+from server import db, celery_app, bt\n from server.utils.misc import encrypt_string, decrypt_string\n from server.utils.access_control import AccessControl\n from server.utils.phone import proccess_phone_number\n@@ -250,8 +251,7 @@ def location(self, location):\n \n geolocate_task.delay()\n except Exception as e:\n- print(e)\n- sentry.captureException()\n+ sentry_sdk.captureException(e)\n pass\n \n @hybrid_property\ndiff --git a/app/server/models/ussd.py b/app/server/models/ussd.py\n--- a/app/server/models/ussd.py\n+++ b/app/server/models/ussd.py\n@@ -1,7 +1,8 @@\n from sqlalchemy.dialects.postgresql import JSON\n from sqlalchemy.orm.attributes import flag_modified\n+import sentry_sdk\n \n-from server import db, sentry\n+from server import db\n from server.models.utils import ModelBase\n \n \n@@ -28,7 +29,7 @@ class UssdMenu(ModelBase):\n def find_by_name(name: str) -> \"UssdMenu\":\n menus = UssdMenu.query.filter_by(name=name)\n if menus.count() == 0:\n- sentry.captureMessage(\"No USSD Menu with name {}\".format(name))\n+ sentry_sdk.capture_message(\"No USSD Menu with name {}\".format(name))\n # should handle case if no invalid_request menu?\n return UssdMenu.query.filter_by(name='exit_invalid_request').first()\n else:\ndiff --git a/app/server/utils/blockchain_transaction.py b/app/server/utils/blockchain_transaction.py\n--- a/app/server/utils/blockchain_transaction.py\n+++ b/app/server/utils/blockchain_transaction.py\n@@ -3,11 +3,11 @@\n from server.models.blockchain_transaction import BlockchainTransaction\n from server.models.blockchain_address import BlockchainAddress\n from server.exceptions import BlockchainError\n-from server import db, celery_app, sentry\n+from server import db, celery_app\n import datetime\n import random\n import time\n-\n+import sentry_sdk\n \n def add_full_transaction_details(details_dict, method='POST', force_transaction_creation=False):\n \n@@ -193,7 +193,7 @@ def get_usd_to_satoshi_rate():\n \n except Exception as e:\n print(e)\n- sentry.captureException()\n+ sentry_sdk.capture_exception(e)\n raise BlockchainError(\"Blockchain Error\")\n \n finally:\ndiff --git a/app/server/utils/credit_transfer.py b/app/server/utils/credit_transfer.py\n--- a/app/server/utils/credit_transfer.py\n+++ b/app/server/utils/credit_transfer.py\n@@ -4,6 +4,7 @@\n from flask import make_response, jsonify, current_app, g\n from sqlalchemy.sql import func\n import datetime, json\n+import sentry_sdk\n \n from server.exceptions import (\n NoTransferAccountError,\n@@ -14,7 +15,7 @@\n TransferAccountNotFoundError\n )\n \n-from server import db, sentry, red, bt\n+from server import db, red, bt\n from server.models.transfer_usage import TransferUsage\n from server.models.transfer_account import TransferAccount\n from server.models.blockchain_address import BlockchainAddress\n@@ -507,7 +508,7 @@ def make_payment_transfer(transfer_amount,\n \n except Exception as e:\n print(e)\n- sentry.captureException()\n+ sentry_sdk.capture_exception(e)\n \n return transfer\n \ndiff --git a/app/server/utils/ge_migrations/rds_migrate.py b/app/server/utils/ge_migrations/rds_migrate.py\n--- a/app/server/utils/ge_migrations/rds_migrate.py\n+++ b/app/server/utils/ge_migrations/rds_migrate.py\n@@ -3,8 +3,9 @@\n import time\n from sqlalchemy.exc import IntegrityError, InvalidRequestError\n import pprint\n+import sentry_sdk\n \n-from server import db, sentry\n+from server import db\n \n from server.models.user import User\n from server.models.organisation import Organisation\n@@ -350,7 +351,7 @@ def migrate_balances(self, ge_address_to_user: dict):\n \n except Exception as e:\n print(e)\n- sentry.captureException()\n+ sentry_sdk.capture_exception(e)\n pass\n \n def store_wei(self, address, balance):\ndiff --git a/app/server/utils/phone.py b/app/server/utils/phone.py\n--- a/app/server/utils/phone.py\n+++ b/app/server/utils/phone.py\n@@ -2,6 +2,7 @@\n import enum\n from flask import current_app\n import server\n+import sentry_sdk\n \n def proccess_phone_number(phone_number, region=None, ignore_region=False):\n \"\"\"\n@@ -93,7 +94,7 @@ def _send_at_message(self, to_phone, message):\n \n # If that fails, fallback to no sender ID\n if resp['SMSMessageData']['Message'] == 'InvalidSenderId':\n- server.sentry.captureMessage(\"InvalidSenderId {}\".format(current_app.config.get('AT_SENDER_ID', None)))\n+ sentry_sdk.capture_message(\"InvalidSenderId {}\".format(current_app.config.get('AT_SENDER_ID', None)))\n \n resp = self.africastalking_client.send(\n message,\ndiff --git a/app/server/utils/pusher.py b/app/server/utils/pusher.py\n--- a/app/server/utils/pusher.py\n+++ b/app/server/utils/pusher.py\n@@ -1,5 +1,7 @@\n+import sentry_sdk\n+\n from flask import current_app\n-from server import pusher_client, sentry\n+from server import pusher_client\n from server.schemas import credit_transfer_schema\n from server.utils import credit_transfer\n \n@@ -17,7 +19,7 @@ def push_admin_credit_transfer(transfer):\n )\n except Exception as e:\n print(e)\n- sentry.captureException()\n+ sentry_sdk.capture_exception(e)\n \n def push_user_transfer_confirmation(receive_user, transfer_random_key):\n try:\n@@ -28,4 +30,4 @@ def push_user_transfer_confirmation(receive_user, transfer_random_key):\n )\n except Exception as e:\n print(e)\n- sentry.captureException()\n\\n+ sentry_sdk.capture_exception(e)\ndiff --git a/app/server/utils/user.py b/app/server/utils/user.py\n--- a/app/server/utils/user.py\n+++ b/app/server/utils/user.py\n@@ -7,6 +7,7 @@\n from bit import base58\n from flask import current_app, g\n from eth_utils import to_checksum_address\n+import sentry_sdk\n \n from server import db\n from server.models.device_info import DeviceInfo\n@@ -22,7 +23,7 @@\n from server.schemas import user_schema\n from server.constants import DEFAULT_ATTRIBUTES, KOBO_META_ATTRIBUTES\n from server.exceptions import PhoneVerificationError, TransferAccountNotFoundError\n-from server import celery_app, sentry, message_processor\n+from server import celery_app, message_processor\n from server.utils import credit_transfer as CreditTransferUtils\n from server.utils.phone import proccess_phone_number\n from server.utils.amazon_s3 import generate_new_filename, save_to_s3_from_url, LoadFileException\n@@ -42,7 +43,7 @@ def save_photo_and_check_for_duplicate(url, new_filename, image_id):\n rekognition_task.delay()\n except Exception as e:\n print(e)\n- sentry.captureException()\n+ sentry_sdk.capture_exception(e)\n pass\n \n \n@@ -617,7 +618,7 @@ def proccess_create_or_modify_user_request(\n send_onboarding_sms_messages(user)\n except Exception as e:\n print(e)\n- sentry.captureException()\n+ sentry_sdk.capture_exception(e)\n pass\n \n response_object = {\ndiff --git a/config.py b/config.py\n--- a/config.py\n+++ b/config.py\n@@ -5,6 +5,8 @@\n \n from web3 import Web3\n \n+VERSION = '1.0.0' # Remember to bump this in every PR\n+\n CONFIG_DIR = os.path.abspath(os.path.dirname(__file__))\n \n # ENV_DEPLOYMENT_NAME: dev, 'acmecorp-prod' etc\ndiff --git a/whatsApp/whatsapp.py b/whatsApp/whatsapp.py\n--- a/whatsApp/whatsapp.py\n+++ b/whatsApp/whatsapp.py\n@@ -1,7 +1,7 @@\n from time import sleep\n from datetime import datetime\n \n-from raven import Client\n+import sentry_sdk\n \n import requests\n from requests.auth import HTTPBasicAuth\n@@ -335,8 +335,8 @@ def parse_queue_task(task):\n if __name__ =='__main__':\n \n print('whatsapp starting: ' + str(datetime.utcnow()))\n+ sentry_sdk.init(config.SENTRY_SERVER_DSN, release=config.WEB_VERSION)\n \n- client = Client(config.SENTRY_SERVER_DSN)\n \n try:\n \ndiff --git a/worker/__init__.py b/worker/__init__.py\n--- a/worker/__init__.py\n+++ b/worker/__init__.py\n@@ -1,10 +1,10 @@\n+import sentry_sdk\n from celery import Celery, beat\n-from raven import Client\n import redis\n \n import config\n \n-client = Client(config.SENTRY_SERVER_DSN)\n+sentry_sdk.init(config.SENTRY_SERVER_DSN, release=config.VERSION)\n \n celery_app = Celery('tasks',\n broker=config.REDIS_URL,\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2020-01-22T18:42:55Z"}
PythonDataset/train/SpockBot-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "SpockBotMC/SpockBot", "pull_number": 81, "instance_id": "SpockBotMC__SpockBot-81", "issue_numbers": "", "base_commit": "c0cf152824695a03d1c5a358e807c452e2d8bce9", "patch": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,7 +12,7 @@\n url='https://github.com/SpockBotMC/SpockBot',\n packages=find_packages(exclude=['tests', 'tests.*']),\n install_requires=[\n- 'PyCrypto >= 2.6.1',\n+ 'cryptography >= 0.9',\n 'six',\n ],\n keywords=['minecraft'],\ndiff --git a/spock/plugins/core/auth.py b/spock/plugins/core/auth.py\n--- a/spock/plugins/core/auth.py\n+++ b/spock/plugins/core/auth.py\n@@ -12,10 +12,11 @@\n import urllib2 as request\n from urllib2 import URLError\n import logging\n+import os\n \n-from Crypto import Random\n-from Crypto.Cipher import PKCS1_v1_5\n-from Crypto.PublicKey import RSA\n+from cryptography.hazmat.backends import default_backend\n+from cryptography.hazmat.primitives import serialization\n+from cryptography.hazmat.primitives.asymmetric import padding\n \n from spock.mcp import yggdrasil\n from spock.plugins.base import PluginBase\n@@ -23,6 +24,7 @@\n \n \n logger = logging.getLogger('spock')\n+backend = default_backend()\n \n \n # This function courtesy of barneygale\n@@ -67,7 +69,7 @@ def start_session(self, username, password=''):\n return rep\n \n def gen_shared_secret(self):\n- self.shared_secret = Random._UserFriendlyRNG.get_random_bytes(16)\n+ self.shared_secret = os.urandom(16)\n return self.shared_secret\n \n \n@@ -101,12 +103,12 @@ def handle_session_error(self, name, data):\n \n # Encryption Key Request - Request for client to start encryption\n def handle_encryption_request(self, name, packet):\n- pubkey = packet.data['public_key']\n+ pubkey_raw = packet.data['public_key']\n if self.authenticated:\n serverid = java_hex_digest(hashlib.sha1(\n packet.data['server_id'].encode('ascii')\n + self.auth.shared_secret\n- + pubkey\n+ + pubkey_raw\n ))\n logger.info(\n \"AUTHPLUGIN: Attempting to authenticate session with \"\n@@ -127,13 +129,13 @@ def handle_encryption_request(self, name, packet):\n logger.warning(\"AUTHPLUGIN: %s\", rep)\n logger.info(\"AUTHPLUGIN: Session authentication successful\")\n \n- rsa_cipher = PKCS1_v1_5.new(RSA.importKey(pubkey))\n+ pubkey = serialization.load_der_public_key(pubkey_raw, backend)\n+ encrypt = lambda data: pubkey.encrypt(data, padding.PKCS1v15())\n self.net.push_packet(\n 'LOGIN>Encryption Response',\n {\n- 'shared_secret': rsa_cipher.encrypt(self.auth.shared_secret),\n- 'verify_token': rsa_cipher.encrypt(\n- packet.data['verify_token']),\n+ 'shared_secret': encrypt(self.auth.shared_secret),\n+ 'verify_token': encrypt(packet.data['verify_token']),\n }\n )\n self.net.enable_crypto(self.auth.shared_secret)\ndiff --git a/spock/plugins/core/net.py b/spock/plugins/core/net.py\n--- a/spock/plugins/core/net.py\n+++ b/spock/plugins/core/net.py\n@@ -9,7 +9,9 @@\n import socket\n import time\n \n-from Crypto.Cipher import AES\n+from cryptography.hazmat.backends import default_backend\n+from cryptography.hazmat.primitives import ciphers\n+from cryptography.hazmat.primitives.ciphers import algorithms, modes\n \n from spock import utils\n from spock.mcp import mcdata, mcpacket\n@@ -17,21 +19,22 @@\n from spock.utils import pl_announce\n \n logger = logging.getLogger('spock')\n+backend = default_backend()\n \n \n class AESCipher(object):\n def __init__(self, shared_secret):\n+ cipher = ciphers.Cipher(algorithms.AES(shared_secret),\n+ modes.CFB8(shared_secret), backend)\n # Name courtesy of dx\n- self.encryptifier = AES.new(shared_secret, AES.MODE_CFB,\n- IV=shared_secret)\n- self.decryptifier = AES.new(shared_secret, AES.MODE_CFB,\n- IV=shared_secret)\n+ self.encryptifier = cipher.encryptor()\n+ self.decryptifier = cipher.decryptor()\n \n def encrypt(self, data):\n- return self.encryptifier.encrypt(data)\n+ return self.encryptifier.update(data)\n \n def decrypt(self, data):\n- return self.decryptifier.decrypt(data)\n+ return self.decryptifier.update(data)\n \n \n class SelectSocket(socket.socket):\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2015-08-15T23:56:37Z"}
PythonDataset/train/TheOrgBook-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "bcgov/TheOrgBook", "pull_number": 547, "instance_id": "bcgov__TheOrgBook-547", "issue_numbers": "", "base_commit": "bf9fe23fb88fd75e36f5fd600152042599af97e9", "patch": "diff --git a/APISpec/gen/urls.py b/APISpec/gen/urls.py\n--- a/APISpec/gen/urls.py\n+++ b/APISpec/gen/urls.py\n@@ -25,7 +25,7 @@\n from rest_framework.schemas import SchemaGenerator\n from rest_framework.views import APIView\n from rest_framework.urlpatterns import format_suffix_patterns\n-from rest_framework_swagger import renderers\n+# from rest_framework_swagger import renderers\n # generated views\n from . import views\n # custom views\n@@ -34,8 +34,8 @@\n class SwaggerSchemaView(APIView):\n permission_classes = [AllowAny]\n renderer_classes = [\n- renderers.OpenAPIRenderer,\n- renderers.SwaggerUIRenderer\n+ # renderers.OpenAPIRenderer,\n+ # renderers.SwaggerUIRenderer\n ]\n _ignore_model_permissions = True\n exclude_from_schema = True \ndiff --git a/tob-api/api/urls.py b/tob-api/api/urls.py\n--- a/tob-api/api/urls.py\n+++ b/tob-api/api/urls.py\n@@ -25,7 +25,7 @@\n from rest_framework.schemas import SchemaGenerator\n from rest_framework.views import APIView\n from rest_framework.urlpatterns import format_suffix_patterns\n-from rest_framework_swagger import renderers\n+# from rest_framework_swagger import renderers\n # generated views\n from . import views\n # custom views\n@@ -37,7 +37,7 @@ class SwaggerSchemaView(APIView):\n \"\"\"\n \n permission_classes = [AllowAny]\n- renderer_classes = [renderers.OpenAPIRenderer, renderers.SwaggerUIRenderer]\n+ # renderer_classes = [renderers.OpenAPIRenderer, renderers.SwaggerUIRenderer]\n \n def get(self, request):\n generator = SchemaGenerator()\n@@ -46,65 +46,65 @@ def get(self, request):\n \n urlpatterns = [\n # Swagger documentation\n- url(r'^$', SwaggerSchemaView.as_view()),\n-\n- url(r'^admin/records/counts', views_custom.recordCounts.as_view()),\n- url(r'^quickload$', views_custom.quickLoad.as_view()),\n- url(r'^settings$', views_custom.custom_settings.as_view()),\n-\n- url(r'^doingbusinessas/bulk$', views.doingbusinessasBulkPost.as_view()),\n- url(r'^doingbusinessas$', views.doingbusinessasGet.as_view()),\n- url(r'^doingbusinessas/(?P<id>[0-9]+)/delete$', views.doingbusinessasIdDeletePost.as_view()),\n- url(r'^doingbusinessas/(?P<id>[0-9]+)$', views.doingbusinessasIdGet.as_view()),\n-\n- url(r'^inactiveclaimreasons/bulk$', views.inactiveclaimreasonsBulkPost.as_view()),\n- url(r'^inactiveclaimreasons$', views.inactiveclaimreasonsGet.as_view()),\n- url(r'^inactiveclaimreasons/(?P<id>[0-9]+)/delete$', views.inactiveclaimreasonsIdDeletePost.as_view()),\n- url(r'^inactiveclaimreasons/(?P<id>[0-9]+)$', views.inactiveclaimreasonsIdGet.as_view()),\n-\n- url(r'^issuerservices/bulk$', views.issuerservicesBulkPost.as_view()),\n- url(r'^issuerservices$', views.issuerservicesGet.as_view()),\n- url(r'^issuerservices/(?P<id>[0-9]+)/delete$', views.issuerservicesIdDeletePost.as_view()),\n- url(r'^issuerservices/(?P<id>[0-9]+)$', views.issuerservicesIdGet.as_view()),\n-\n- url(r'^jurisdictions/bulk$', views.jurisdictionsBulkPost.as_view()),\n- url(r'^jurisdictions$', views.jurisdictionsGet.as_view()),\n- url(r'^jurisdictions/(?P<id>[0-9]+)/delete$', views.jurisdictionsIdDeletePost.as_view()),\n- url(r'^jurisdictions/(?P<id>[0-9]+)$', views.jurisdictionsIdGet.as_view()),\n-\n- url(r'^locations/bulk$', views.locationsBulkPost.as_view()),\n- url(r'^locations$', views.locationsGet.as_view()),\n- url(r'^locations/(?P<id>[0-9]+)/delete$', views.locationsIdDeletePost.as_view()),\n- url(r'^locations/(?P<id>[0-9]+)$', views.locationsIdGet.as_view()),\n-\n- url(r'^locationtypes/bulk$', views.locationtypesBulkPost.as_view()),\n- url(r'^locationtypes$', views.locationtypesGet.as_view()),\n- url(r'^locationtypes/(?P<id>[0-9]+)/delete$', views.locationtypesIdDeletePost.as_view()),\n- url(r'^locationtypes/(?P<id>[0-9]+)$', views.locationtypesIdGet.as_view()),\n-\n- url(r'^verifiableclaims/bulk$', views.verifiableclaimsBulkPost.as_view()),\n- url(r'^verifiableclaims$', views.verifiableclaimsGet.as_view()),\n- url(r'^verifiableclaims/(?P<id>[0-9]+)/delete$', views.verifiableclaimsIdDeletePost.as_view()),\n- url(r'^verifiableclaims/(?P<id>[0-9]+)$', views.verifiableclaimsIdGet.as_view()),\n- #url(r'^verifiableclaims/(?P<id>[0-9]+)/verify$', indy_views.bcovrinVerifyCredential.as_view()),\n-\n- url(r'^verifiableclaimtypes/bulk$', views.verifiableclaimtypesBulkPost.as_view()),\n- url(r'^verifiableclaimtypes$', views.verifiableclaimtypesGet.as_view()),\n- url(r'^verifiableclaimtypes/(?P<id>[0-9]+)/delete$', views.verifiableclaimtypesIdDeletePost.as_view()),\n- url(r'^verifiableclaimtypes/(?P<id>[0-9]+)$', views.verifiableclaimtypesIdGet.as_view()),\n-\n- url(r'^verifiableorgs/bulk$', views.verifiableorgsBulkPost.as_view()),\n- url(r'^verifiableorgs$', views.verifiableorgsGet.as_view()),\n- url(r'^verifiableorgs/(?P<id>[0-9]+)/delete$', views.verifiableorgsIdDeletePost.as_view()),\n- url(r'^verifiableorgs/(?P<id>[0-9]+)$', views.verifiableorgsIdGet.as_view()),\n- url(r'^verifiableorgs/(?P<id>[0-9]+)/doingbusinessas$', views_custom.verifiableOrgsIdDoingBusinessAsGet.as_view()),\n- url(r'^verifiableorgs/(?P<id>[0-9]+)/locations$', views_custom.verifiableOrgsIdLocationsGet.as_view()),\n- url(r'^verifiableorgs/(?P<id>[0-9]+)/verifiableclaims$', views_custom.verifiableOrgsIdVerifiableclaimsGet.as_view()),\n-\n- url(r'^verifiableorgtypes/bulk$', views.verifiableorgtypesBulkPost.as_view()),\n- url(r'^verifiableorgtypes$', views.verifiableorgtypesGet.as_view()),\n- url(r'^verifiableorgtypes/(?P<id>[0-9]+)/delete$', views.verifiableorgtypesIdDeletePost.as_view()),\n- url(r'^verifiableorgtypes/(?P<id>[0-9]+)$', views.verifiableorgtypesIdGet.as_view()),\n+ # url(r'^$', SwaggerSchemaView.as_view()),\n+\n+ # url(r'^admin/records/counts', views_custom.recordCounts.as_view()),\n+ # url(r'^quickload$', views_custom.quickLoad.as_view()),\n+ # url(r'^settings$', views_custom.custom_settings.as_view()),\n+\n+ # url(r'^doingbusinessas/bulk$', views.doingbusinessasBulkPost.as_view()),\n+ # url(r'^doingbusinessas$', views.doingbusinessasGet.as_view()),\n+ # url(r'^doingbusinessas/(?P<id>[0-9]+)/delete$', views.doingbusinessasIdDeletePost.as_view()),\n+ # url(r'^doingbusinessas/(?P<id>[0-9]+)$', views.doingbusinessasIdGet.as_view()),\n+\n+ # url(r'^inactiveclaimreasons/bulk$', views.inactiveclaimreasonsBulkPost.as_view()),\n+ # url(r'^inactiveclaimreasons$', views.inactiveclaimreasonsGet.as_view()),\n+ # url(r'^inactiveclaimreasons/(?P<id>[0-9]+)/delete$', views.inactiveclaimreasonsIdDeletePost.as_view()),\n+ # url(r'^inactiveclaimreasons/(?P<id>[0-9]+)$', views.inactiveclaimreasonsIdGet.as_view()),\n+\n+ # url(r'^issuerservices/bulk$', views.issuerservicesBulkPost.as_view()),\n+ # url(r'^issuerservices$', views.issuerservicesGet.as_view()),\n+ # url(r'^issuerservices/(?P<id>[0-9]+)/delete$', views.issuerservicesIdDeletePost.as_view()),\n+ # url(r'^issuerservices/(?P<id>[0-9]+)$', views.issuerservicesIdGet.as_view()),\n+\n+ # url(r'^jurisdictions/bulk$', views.jurisdictionsBulkPost.as_view()),\n+ # url(r'^jurisdictions$', views.jurisdictionsGet.as_view()),\n+ # url(r'^jurisdictions/(?P<id>[0-9]+)/delete$', views.jurisdictionsIdDeletePost.as_view()),\n+ # url(r'^jurisdictions/(?P<id>[0-9]+)$', views.jurisdictionsIdGet.as_view()),\n+\n+ # url(r'^locations/bulk$', views.locationsBulkPost.as_view()),\n+ # url(r'^locations$', views.locationsGet.as_view()),\n+ # url(r'^locations/(?P<id>[0-9]+)/delete$', views.locationsIdDeletePost.as_view()),\n+ # url(r'^locations/(?P<id>[0-9]+)$', views.locationsIdGet.as_view()),\n+\n+ # url(r'^locationtypes/bulk$', views.locationtypesBulkPost.as_view()),\n+ # url(r'^locationtypes$', views.locationtypesGet.as_view()),\n+ # url(r'^locationtypes/(?P<id>[0-9]+)/delete$', views.locationtypesIdDeletePost.as_view()),\n+ # url(r'^locationtypes/(?P<id>[0-9]+)$', views.locationtypesIdGet.as_view()),\n+\n+ # url(r'^verifiableclaims/bulk$', views.verifiableclaimsBulkPost.as_view()),\n+ # url(r'^verifiableclaims$', views.verifiableclaimsGet.as_view()),\n+ # url(r'^verifiableclaims/(?P<id>[0-9]+)/delete$', views.verifiableclaimsIdDeletePost.as_view()),\n+ # url(r'^verifiableclaims/(?P<id>[0-9]+)$', views.verifiableclaimsIdGet.as_view()),\n+ # #url(r'^verifiableclaims/(?P<id>[0-9]+)/verify$', indy_views.bcovrinVerifyCredential.as_view()),\n+\n+ # url(r'^verifiableclaimtypes/bulk$', views.verifiableclaimtypesBulkPost.as_view()),\n+ # url(r'^verifiableclaimtypes$', views.verifiableclaimtypesGet.as_view()),\n+ # url(r'^verifiableclaimtypes/(?P<id>[0-9]+)/delete$', views.verifiableclaimtypesIdDeletePost.as_view()),\n+ # url(r'^verifiableclaimtypes/(?P<id>[0-9]+)$', views.verifiableclaimtypesIdGet.as_view()),\n+\n+ # url(r'^verifiableorgs/bulk$', views.verifiableorgsBulkPost.as_view()),\n+ # url(r'^verifiableorgs$', views.verifiableorgsGet.as_view()),\n+ # url(r'^verifiableorgs/(?P<id>[0-9]+)/delete$', views.verifiableorgsIdDeletePost.as_view()),\n+ # url(r'^verifiableorgs/(?P<id>[0-9]+)$', views.verifiableorgsIdGet.as_view()),\n+ # url(r'^verifiableorgs/(?P<id>[0-9]+)/doingbusinessas$', views_custom.verifiableOrgsIdDoingBusinessAsGet.as_view()),\n+ # url(r'^verifiableorgs/(?P<id>[0-9]+)/locations$', views_custom.verifiableOrgsIdLocationsGet.as_view()),\n+ # url(r'^verifiableorgs/(?P<id>[0-9]+)/verifiableclaims$', views_custom.verifiableOrgsIdVerifiableclaimsGet.as_view()),\n+\n+ # url(r'^verifiableorgtypes/bulk$', views.verifiableorgtypesBulkPost.as_view()),\n+ # url(r'^verifiableorgtypes$', views.verifiableorgtypesGet.as_view()),\n+ # url(r'^verifiableorgtypes/(?P<id>[0-9]+)/delete$', views.verifiableorgtypesIdDeletePost.as_view()),\n+ # url(r'^verifiableorgtypes/(?P<id>[0-9]+)$', views.verifiableorgtypesIdGet.as_view()),\n ]\n \n urlpatterns = format_suffix_patterns(urlpatterns)\ndiff --git a/tob-api/api_v2/swagger.py b/tob-api/api_v2/swagger.py\n--- a/tob-api/api_v2/swagger.py\n+++ b/tob-api/api_v2/swagger.py\n@@ -3,7 +3,9 @@\n from rest_framework.response import Response\n from rest_framework.schemas import SchemaGenerator\n from rest_framework.views import APIView\n-from rest_framework_swagger import renderers\n+\n+from drf_yasg.views import get_schema_view\n+from drf_yasg import openapi\n \n LOGGER = logging.getLogger(__name__)\n \ndiff --git a/tob-api/api_v2/urls.py b/tob-api/api_v2/urls.py\n--- a/tob-api/api_v2/urls.py\n+++ b/tob-api/api_v2/urls.py\n@@ -1,10 +1,27 @@\n from django.conf.urls import url\n from rest_framework.urlpatterns import format_suffix_patterns\n from rest_framework.routers import SimpleRouter\n+from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly, AllowAny\n \n-from .swagger import SwaggerSchemaView\n from api_v2.views import misc, rest, search\n \n+from drf_yasg.views import get_schema_view\n+from drf_yasg import openapi\n+\n+schema_view = get_schema_view(\n+ openapi.Info(\n+ title=\"TheOrgBook API\",\n+ default_version=\"v2\",\n+ description=\"Test description\",\n+ # terms_of_service=\"https://www.google.com/policies/terms/\",\n+ # contact=openapi.Contact(email=\"contact@snippets.local\"),\n+ # license=openapi.License(name=\"BSD License\"),\n+ ),\n+ validators=[\"flex\", \"ssv\"],\n+ public=True,\n+ permission_classes=(AllowAny,),\n+)\n+\n router = SimpleRouter(trailing_slash=False)\n \n router.register(r\"issuer\", rest.IssuerViewSet)\n@@ -12,30 +29,41 @@\n router.register(r\"credentialtype\", rest.CredentialTypeViewSet)\n router.register(r\"address\", rest.AddressViewSet)\n router.register(r\"attribute\", rest.AttributeViewSet)\n-#router.register(r\"category\", rest.CategoryViewSet)\n+# router.register(r\"category\", rest.CategoryViewSet)\n router.register(r\"credential\", rest.CredentialViewSet)\n router.register(r\"name\", rest.NameViewSet)\n router.register(r\"topic\", rest.TopicViewSet)\n \n # Search endpoints\n router.register(\n- r\"search/credential/topic\", search.CredentialTopicSearchView, \"Credential Topic Search\")\n-router.register(\n- r\"search/credential\", search.CredentialSearchView, \"Credential Search\")\n-searchPatterns = [\n- url(r\"^search/autocomplete$\", search.NameAutocompleteView.as_view()),\n-]\n+ r\"search/credential/topic\",\n+ search.CredentialTopicSearchView,\n+ \"Credential Topic Search\",\n+)\n+router.register(r\"search/credential\", search.CredentialSearchView, \"Credential Search\")\n+searchPatterns = [url(r\"^search/autocomplete$\", search.NameAutocompleteView.as_view())]\n \n # Misc endpoints\n miscPatterns = [\n # Swagger documentation\n- url(r'^$', SwaggerSchemaView.as_view()),\n+ # url(r\"^$\", SwaggerSchemaView.as_view()),\n # Stats and cacheable info for home page\n- url(r\"^quickload$\", misc.quickload),\n+ url(r\"^quickload$\", misc.quickload)\n ]\n \n+swaggerPatterns = [\n+ url(r\"^$\", schema_view.with_ui(\"swagger\", cache_timeout=None), name=\"api-docs\"),\n+ # url(\n+ # r\"^swagger/$\",\n+ # schema_view.with_ui(\"swagger\", cache_timeout=0),\n+ # name=\"schema-swagger-ui\",\n+ # ),\n+ # url(\n+ # r\"^redoc/$\", schema_view.with_ui(\"redoc\", cache_timeout=0), name=\"schema-redoc\"\n+ # ),\n+]\n # Indy endpoints (now handled elsewhere)\n-#indyPatterns = [\n+# indyPatterns = [\n # url(\n # r\"^indy/generate-credential-request$\", indy.generate_credential_request\n # ),\n@@ -44,8 +72,8 @@\n # url(r\"^indy/construct-proof$\", indy.construct_proof),\n # url(r\"^indy/status$\", indy.status),\n # url(r\"^credential/(?P<id>[0-9]+)/verify$\", indy.verify_credential),\n-#]\n+# ]\n \n urlpatterns = format_suffix_patterns(\n- router.urls + searchPatterns + miscPatterns # + indyPatterns\n+ router.urls + searchPatterns + miscPatterns + swaggerPatterns # + indyPatterns\n )\ndiff --git a/tob-api/api_v2/views/rest.py b/tob-api/api_v2/views/rest.py\n--- a/tob-api/api_v2/views/rest.py\n+++ b/tob-api/api_v2/views/rest.py\n@@ -6,7 +6,7 @@\n \n from rest_framework.exceptions import NotFound\n from rest_framework.decorators import detail_route, list_route\n-from rest_framework.viewsets import ModelViewSet, ViewSet\n+from rest_framework.viewsets import ReadOnlyModelViewSet, ViewSet\n from rest_framework.response import Response\n \n from api_v2.serializers.rest import (\n@@ -25,6 +25,8 @@\n \n from rest_framework.serializers import SerializerMethodField\n \n+from drf_yasg.utils import swagger_auto_schema\n+\n from api_v2.serializers.search import CustomTopicSerializer\n \n from api_v2.models.Issuer import Issuer\n@@ -40,18 +42,20 @@\n from api_v2 import utils\n \n \n-class IssuerViewSet(ModelViewSet):\n+class IssuerViewSet(ReadOnlyModelViewSet):\n serializer_class = IssuerSerializer\n queryset = Issuer.objects.all()\n \n- @detail_route(url_path=\"credentialtype\")\n+ @swagger_auto_schema(method='get')\n+ @detail_route(url_path=\"credentialtype\", methods=[\"get\"])\n def list_credential_types(self, request, pk=None):\n queryset = CredentialType.objects.filter(issuer__id=pk)\n get_object_or_404(queryset, pk=pk)\n serializer = CredentialTypeSerializer(queryset, many=True)\n return Response(serializer.data)\n \n- @detail_route(url_path=\"logo\")\n+ @swagger_auto_schema(method='get')\n+ @detail_route(url_path=\"logo\", methods=[\"get\"])\n def fetch_logo(self, request, pk=None):\n issuer = get_object_or_404(self.queryset, pk=pk)\n logo = None\n@@ -63,16 +67,16 @@ def fetch_logo(self, request, pk=None):\n return HttpResponse(logo, content_type=\"image/jpg\")\n \n \n-class SchemaViewSet(ModelViewSet):\n+class SchemaViewSet(ReadOnlyModelViewSet):\n serializer_class = SchemaSerializer\n queryset = Schema.objects.all()\n \n \n-class CredentialTypeViewSet(ModelViewSet):\n+class CredentialTypeViewSet(ReadOnlyModelViewSet):\n serializer_class = CredentialTypeSerializer\n queryset = CredentialType.objects.all()\n \n- @detail_route(url_path=\"logo\")\n+ @detail_route(url_path=\"logo\", methods=[\"get\"])\n def fetch_logo(self, request, pk=None):\n credType = get_object_or_404(self.queryset, pk=pk)\n logo = None\n@@ -86,42 +90,47 @@ def fetch_logo(self, request, pk=None):\n return HttpResponse(logo, content_type=\"image/jpg\")\n \n \n-class TopicViewSet(ModelViewSet):\n+class TopicViewSet(ReadOnlyModelViewSet):\n serializer_class = TopicSerializer\n queryset = Topic.objects.all()\n \n- @detail_route(url_path=\"formatted\")\n+ @detail_route(url_path=\"formatted\", methods=[\"get\"])\n def retrieve_formatted(self, request, pk=None):\n item = self.get_object()\n serializer = CustomTopicSerializer(item)\n return Response(serializer.data)\n \n- @detail_route(url_path=\"credential\")\n+ @detail_route(url_path=\"credential\", methods=[\"get\"])\n def list_credentials(self, request, pk=None):\n item = self.get_object()\n queryset = item.credentials\n serializer = ExpandedCredentialSerializer(queryset, many=True)\n return Response(serializer.data)\n \n- @detail_route(url_path=\"credential/active\")\n+ @detail_route(url_path=\"credential/active\", methods=[\"get\"])\n def list_active_credentials(self, request, pk=None):\n item = self.get_object()\n queryset = item.credentials.filter(revoked=False, inactive=False)\n serializer = ExpandedCredentialSerializer(queryset, many=True)\n return Response(serializer.data)\n \n- @detail_route(url_path=\"credential/historical\")\n+ @detail_route(url_path=\"credential/historical\", methods=[\"get\"])\n def list_historical_credentials(self, request, pk=None):\n item = self.get_object()\n queryset = item.credentials.filter(Q(revoked=True) | Q(inactive=True))\n serializer = ExpandedCredentialSerializer(queryset, many=True)\n return Response(serializer.data)\n \n- @list_route(methods=['get'], url_path=\"ident/(?P<type>[^/.]+)/(?P<source_id>[^/.]+)\")\n+ @list_route(\n+ methods=[\"get\"], url_path=\"ident/(?P<type>[^/.]+)/(?P<source_id>[^/.]+)\"\n+ )\n def retrieve_by_type(self, request, type=None, source_id=None):\n return self.retrieve(request)\n \n- @list_route(methods=['get'], url_path=\"ident/(?P<type>[^/.]+)/(?P<source_id>[^/.]+)/formatted\")\n+ @list_route(\n+ methods=[\"get\"],\n+ url_path=\"ident/(?P<type>[^/.]+)/(?P<source_id>[^/.]+)/formatted\",\n+ )\n def retrieve_by_type_formatted(self, request, type=None, source_id=None):\n return self.retrieve_formatted(request)\n \n@@ -142,23 +151,23 @@ def get_object(self):\n return obj\n \n \n-class CredentialViewSet(ModelViewSet):\n+class CredentialViewSet(ReadOnlyModelViewSet):\n serializer_class = CredentialSerializer\n queryset = Credential.objects.all()\n \n- @detail_route(url_path=\"formatted\")\n+ @detail_route(url_path=\"formatted\", methods=[\"get\"])\n def retrieve_formatted(self, request, pk=None):\n item = self.get_object()\n serializer = ExpandedCredentialSerializer(item)\n return Response(serializer.data)\n \n- @list_route(url_path=\"active\")\n+ @list_route(url_path=\"active\", methods=[\"get\"])\n def list_active(self, request, pk=None):\n queryset = self.queryset.filter(revoked=False, inactive=False)\n serializer = CredentialSerializer(queryset, many=True)\n return Response(serializer.data)\n \n- @list_route(url_path=\"historical\")\n+ @list_route(url_path=\"historical\", methods=[\"get\"])\n def list_historical(self, request, pk=None):\n queryset = self.queryset.filter(Q(revoked=True) | Q(inactive=True))\n serializer = CredentialSerializer(queryset, many=True)\n@@ -182,22 +191,22 @@ def get_object(self):\n return obj\n \n \n-class AddressViewSet(ModelViewSet):\n+class AddressViewSet(ReadOnlyModelViewSet):\n serializer_class = AddressSerializer\n queryset = Address.objects.all()\n \n \n-class AttributeViewSet(ModelViewSet):\n+class AttributeViewSet(ReadOnlyModelViewSet):\n serializer_class = AttributeSerializer\n queryset = Attribute.objects.all()\n \n \n-class NameViewSet(ModelViewSet):\n+class NameViewSet(ReadOnlyModelViewSet):\n serializer_class = NameSerializer\n queryset = Name.objects.all()\n \n \n-class CategoryViewSet(ModelViewSet):\n+class CategoryViewSet(ReadOnlyModelViewSet):\n serializer_class = CategorySerializer\n queryset = Category.objects.all()\n \ndiff --git a/tob-api/tob_api/settings.py b/tob-api/tob_api/settings.py\n--- a/tob-api/tob_api/settings.py\n+++ b/tob-api/tob_api/settings.py\n@@ -63,7 +63,7 @@\n \"haystack\",\n \"rest_framework\",\n \"drf_generators\",\n- \"rest_framework_swagger\",\n+ \"drf_yasg\",\n \"auditable\",\n \"api\",\n \"api_v2\",\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2018-10-04T18:58:51Z"}
PythonDataset/train/bert_score-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "Tiiiger/bert_score", "pull_number": 16, "instance_id": "Tiiiger__bert_score-16", "issue_numbers": "", "base_commit": "8781157cd18816bf94cedd0a819119c19431cbcb", "patch": "diff --git a/bert_score/__init__.py b/bert_score/__init__.py\n--- a/bert_score/__init__.py\n+++ b/bert_score/__init__.py\n@@ -1,3 +1,3 @@\n-__version__ = '0.1.2'\n+__version__ = '0.2.0'\n from .utils import *\n from .score import *\ndiff --git a/bert_score/score.py b/bert_score/score.py\n--- a/bert_score/score.py\n+++ b/bert_score/score.py\n@@ -2,19 +2,46 @@\n import time\n import argparse\n import torch\n-from collections import defaultdict\n-from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM\n import matplotlib\n import matplotlib.pyplot as plt\n import numpy as np\n \n-from .utils import get_idf_dict, bert_cos_score_idf,\\\n- get_bert_embedding, bert_types\n+from collections import defaultdict\n+from transformers import AutoModel, AutoTokenizer\n+\n+from .utils import (get_idf_dict, bert_cos_score_idf,\n+ get_bert_embedding, model_types,\n+ lang2model, model2layers, get_hash)\n+\n \n __all__ = ['score', 'plot_example']\n \n-def score(cands, refs, bert=\"bert-base-multilingual-cased\",\n- num_layers=8, verbose=False, no_idf=False, batch_size=64):\n+def get_model(model_type, num_layers, all_layers=None):\n+ model = AutoModel.from_pretrained(model_type)\n+ model.eval()\n+\n+ # drop unused layers\n+ if not all_layers:\n+ if 'bert' == model_type[:4] or 'roberta' == model_type[:7]:\n+ model.encoder.layer =\\\n+ torch.nn.ModuleList([layer for layer in model.encoder.layer[:num_layers]])\n+ elif 'xlnet' == model_type[:5]:\n+ model.layer =\\\n+ torch.nn.ModuleList([layer for layer in model.layer[:num_layers]])\n+ elif 'xlm' in model_type[:3]:\n+ model.n_layers = num_layers\n+ else:\n+ raise ValueError(\"Not supported\")\n+ else:\n+ if 'bert' == model_type[:4] or 'roberta' == model_type[:7]:\n+ model.encoder.output_hidden_states = True\n+ else:\n+ model.output_hidden_states = True\n+ return model\n+\n+def score(cands, refs, model_type=None, num_layers=None, verbose=False,\n+ idf=False, batch_size=64, nthreads=4, all_layers=False, lang=None,\n+ return_hash=False):\n \"\"\"\n BERTScore metric.\n \n@@ -24,31 +51,39 @@ def score(cands, refs, bert=\"bert-base-multilingual-cased\",\n - :param: `bert` (str): bert specification\n - :param: `num_layers` (int): the layer of representation to use\n - :param: `verbose` (bool): turn on intermediate status update\n- - :param: `no_idf` (bool): do not use idf weighting\n+ - :param: `idf` (bool): use idf weighting\n - :param: `batch_size` (int): bert score processing batch size\n+ - :param: `lang` (str): language of the sentences\n+ - :param: `return_hash` (bool): return hash code of the setting\n \"\"\"\n assert len(cands) == len(refs)\n- assert bert in bert_types\n \n- tokenizer = BertTokenizer.from_pretrained(bert)\n- model = BertModel.from_pretrained(bert)\n- model.eval()\n+ assert lang is not None or model_type is not None, \\\n+ 'Either lang or model_type should be specified'\n+\n+ if model_type is None:\n+ lang = lang.lower()\n+ model_type = lang2model[lang]\n+ if num_layers is None:\n+ num_layers = model2layers[model_type]\n+\n+\n+ assert model_type in model_types\n+ tokenizer = AutoTokenizer.from_pretrained(model_type)\n+ model = get_model(model_type, num_layers, all_layers)\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model.to(device)\n \n- # drop unused layers\n- model.encoder.layer = torch.nn.ModuleList([layer for layer in model.encoder.layer[:num_layers]])\n-\n- if no_idf:\n+ if not idf:\n idf_dict = defaultdict(lambda: 1.)\n # set idf for [SEP] and [CLS] to 0\n- idf_dict[101] = 0\n- idf_dict[102] = 0\n+ idf_dict[tokenizer.sep_token_id] = 0\n+ idf_dict[tokenizer.cls_token_id] = 0\n else:\n if verbose:\n print('preparing IDF dict...')\n start = time.perf_counter()\n- idf_dict = get_idf_dict(refs, tokenizer)\n+ idf_dict = get_idf_dict(refs, tokenizer, nthreads=nthreads)\n if verbose:\n print('done in {:.2f} seconds'.format(time.perf_counter() - start))\n \n@@ -56,18 +91,23 @@ def score(cands, refs, bert=\"bert-base-multilingual-cased\",\n print('calculating scores...')\n start = time.perf_counter()\n all_preds = bert_cos_score_idf(model, refs, cands, tokenizer, idf_dict,\n- verbose=verbose, device=device, batch_size=batch_size)\n+ verbose=verbose, device=device, \n+ batch_size=batch_size, all_layers=all_layers)\n \n- P = all_preds[:, 0].cpu()\n- R = all_preds[:, 1].cpu()\n- F1 = all_preds[:, 2].cpu()\n+ P = all_preds[..., 0].cpu()\n+ R = all_preds[..., 1].cpu()\n+ F1 = all_preds[..., 2].cpu()\n if verbose:\n- print('done in {:.2f} seconds'.format(time.perf_counter() - start))\n+ time_diff = time.perf_counter() - start\n+ print(f'done in {time_diff:.2f} seconds, {len(refs) / time_diff:.2f} sentences/sec')\n \n- return P, R, F1\n+ if return_hash:\n+ return (P, R, F1), get_hash(model_type, num_layers, idf)\n+ else:\n+ return P, R, F1\n \n-def plot_example(h, r, verbose=False, bert=\"bert-base-multilingual-cased\",\n- num_layers=8, fname=''):\n+# Under Construction\n+def plot_example(candidate, reference, model_type=None, lang=None, num_layers=None, fname=''):\n \"\"\"\n BERTScore metric.\n \n@@ -78,38 +118,41 @@ def plot_example(h, r, verbose=False, bert=\"bert-base-multilingual-cased\",\n - :param: `bert` (str): bert specification\n - :param: `num_layers` (int): the layer of representation to use\n \"\"\"\n- assert bert in bert_types\n+ assert isinstance(candidate, str)\n+ assert isinstance(reference, str)\n \n- if verbose:\n- print('loading BERT model...')\n- tokenizer = BertTokenizer.from_pretrained(bert)\n- model = BertModel.from_pretrained(bert)\n- model.eval()\n+ assert lang is not None or model_type is not None, \\\n+ 'Either lang or model_type should be specified'\n+\n+ if model_type is None:\n+ lang = lang.lower()\n+ model_type = lang2model[lang]\n+ if num_layers is None:\n+ num_layers = model2layers[model_type]\n+\n+ assert model_type in model_types\n+ tokenizer = AutoTokenizer.from_pretrained(model_type)\n+ model = get_model(model_type, num_layers)\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model.to(device)\n \n- h_tokens = ['[CLS]'] + tokenizer.tokenize(h) + ['[SEP]']\n- r_tokens = ['[CLS]'] + tokenizer.tokenize(r) + ['[SEP]']\n-\n- model.encoder.layer = torch.nn.ModuleList([layer for layer in model.encoder.layer[:num_layers]])\n idf_dict = defaultdict(lambda: 1.)\n-\n- ref_embedding, ref_lens, ref_masks, padded_idf = get_bert_embedding([r], model, tokenizer, idf_dict,\n- device=device)\n- hyp_embedding, ref_lens, ref_masks, padded_idf = get_bert_embedding([h], model, tokenizer, idf_dict,\n- device=device)\n-\n+ # set idf for [SEP] and [CLS] to 0\n+ idf_dict[tokenizer.sep_token_id] = 0\n+ idf_dict[tokenizer.cls_token_id] = 0\n+\n+ hyp_embedding, masks, padded_idf = get_bert_embedding([candidate], model, tokenizer, idf_dict,\n+ device=device, all_layers=False)\n+ ref_embedding, masks, padded_idf = get_bert_embedding([reference], model, tokenizer, idf_dict,\n+ device=device, all_layers=False)\n ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))\n hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))\n-\n- batch_size = ref_embedding.size(1)\n-\n- sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2)).cpu()\n- sim = sim.squeeze(0).numpy()\n+ sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2))\n+ sim = sim.squeeze(0).cpu()\n \n # remove [CLS] and [SEP] tokens \n- r_tokens = r_tokens[1:-1]\n- h_tokens = h_tokens[1:-1]\n+ r_tokens = [tokenizer.decode([i]) for i in tokenizer.encode(reference)]\n+ h_tokens = [tokenizer.decode([i]) for i in tokenizer.encode(candidate)]\n sim = sim[1:-1,1:-1]\n \n fig, ax = plt.subplots(figsize=(len(r_tokens)*0.8, len(h_tokens)*0.8))\n@@ -121,8 +164,9 @@ def plot_example(h, r, verbose=False, bert=\"bert-base-multilingual-cased\",\n # ... and label them with the respective list entries\n ax.set_xticklabels(r_tokens, fontsize=10)\n ax.set_yticklabels(h_tokens, fontsize=10)\n- plt.xlabel(\"Refernce\", fontsize=10)\n- plt.ylabel(\"Candidate\", fontsize=10)\n+ plt.xlabel(\"Refernce (tokenized)\", fontsize=14)\n+ plt.ylabel(\"Candidate (tokenized)\", fontsize=14)\n+ plt.title(\"Similarity Matrix\", fontsize=14)\n \n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n@@ -131,15 +175,10 @@ def plot_example(h, r, verbose=False, bert=\"bert-base-multilingual-cased\",\n # Loop over data dimensions and create text annotations.\n for i in range(len(h_tokens)):\n for j in range(len(r_tokens)):\n- text = ax.text(j, i, '{:.3f}'.format(sim[i, j]),\n- ha=\"center\", va=\"center\", color=\"k\" if sim[i, j] < 0.6 else \"w\")\n-\n-# P = sim.max(1).mean()\n-# R = sim.max(0).mean()\n-# F1 = 2 * P * R / (P + R)\n+ text = ax.text(j, i, '{:.3f}'.format(sim[i, j].item()),\n+ ha=\"center\", va=\"center\", color=\"k\" if sim[i, j].item() < 0.6 else \"w\")\n \n fig.tight_layout()\n-# plt.title(\"BERT-F1: {:.3f}\".format(F1), fontsize=10)\n if fname != \"\":\n print(\"Saved figure to file: \", fname+\".png\")\n plt.savefig(fname+'.png', dpi=100)\ndiff --git a/bert_score/utils.py b/bert_score/utils.py\n--- a/bert_score/utils.py\n+++ b/bert_score/utils.py\n@@ -1,3 +1,4 @@\n+import sys\n import torch\n from math import log\n from itertools import chain\n@@ -5,18 +6,40 @@\n from multiprocessing import Pool\n from functools import partial\n from tqdm.auto import tqdm\n-\n-__all__ = ['bert_types']\n-\n-bert_types = [\n- 'bert-base-uncased',\n- 'bert-large-uncased',\n- 'bert-base-cased',\n- 'bert-large-cased',\n- 'bert-base-multilingual-uncased',\n- 'bert-base-multilingual-cased',\n- 'bert-base-chinese',\n-]\n+from torch.nn.utils.rnn import pad_sequence\n+\n+from transformers import BertConfig, XLNetConfig, XLMConfig, RobertaConfig\n+\n+from . import __version__\n+\n+__all__ = ['model_types']\n+\n+model_types = list(BertConfig.pretrained_config_archive_map.keys())+\\\n+ list(XLNetConfig.pretrained_config_archive_map.keys())+\\\n+ list(RobertaConfig.pretrained_config_archive_map.keys())+\\\n+ list(XLMConfig.pretrained_config_archive_map.keys())\n+\n+lang2model = defaultdict(lambda: 'bert-base-multilingual-cased')\n+lang2model.update({\n+ 'en': 'roberta-large',\n+ 'zh': 'bert-base-chinese',\n+})\n+\n+model2layers = {\n+ 'bert-base-multilingual-cased' : 9,\n+ 'bert-base-uncased': 9,\n+ 'bert-large-uncased': 18,\n+ 'bert-base-cased-finetuned-mrpc': 9,\n+ 'bert-base-multilingual-cased': 9,\n+ 'bert-base-chinese': 8,\n+ 'roberta-base': 10,\n+ 'roberta-large': 17,\n+ 'roberta-large-mnli': 19,\n+ 'xlnet-base-cased': 5, \n+ 'xlnet-large-cased': 7, \n+ 'xlm-mlm-en-2048': 7, \n+ 'xlm-mlm-100-1280': 11,\n+}\n \n def padding(arr, pad_token, dtype=torch.long):\n lens = torch.LongTensor([len(a) for a in arr])\n@@ -28,19 +51,21 @@ def padding(arr, pad_token, dtype=torch.long):\n mask[i, :lens[i]] = 1\n return padded, lens, mask\n \n-\n-def bert_encode(model, x, attention_mask):\n+def bert_encode(model, x, attention_mask, all_layers=False):\n model.eval()\n x_seg = torch.zeros_like(x, dtype=torch.long)\n with torch.no_grad():\n- x_encoded_layers, pooled_output = model(x, x_seg, attention_mask=attention_mask, output_all_encoded_layers=False)\n- return x_encoded_layers\n+ out = model(x, attention_mask=attention_mask)\n+ if all_layers:\n+ emb = torch.stack(out[-1], dim=2)\n+ else:\n+ emb = out[0]\n+ return emb\n \n \n def process(a, tokenizer=None):\n if not tokenizer is None:\n- a = [\"[CLS]\"]+tokenizer.tokenize(a)+[\"[SEP]\"]\n- a = tokenizer.convert_tokens_to_ids(a)\n+ a = tokenizer.encode(a.strip(), add_special_tokens=True)\n return set(a)\n \n \n@@ -67,8 +92,7 @@ def get_idf_dict(arr, tokenizer, nthreads=4):\n return idf_dict\n \n \n-def collate_idf(arr, tokenize, numericalize, idf_dict,\n- pad=\"[PAD]\", device='cuda:0'):\n+def collate_idf(arr, tokenizer, idf_dict, device='cuda:0'):\n \"\"\"\n Helper function that pads a list of sentences to hvae the same length and\n loads idf score for words in the sentences.\n@@ -84,15 +108,14 @@ def collate_idf(arr, tokenize, numericalize, idf_dict,\n - :param: `pad` (str): the padding token.\n - :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'\n \"\"\"\n- arr = [[\"[CLS]\"]+tokenize(a)+[\"[SEP]\"] for a in arr]\n- arr = [numericalize(a) for a in arr]\n+ arr = [tokenizer.encode(a, add_special_tokens=True) for a in arr]\n \n idf_weights = [[idf_dict[i] for i in a] for a in arr]\n \n- pad_token = numericalize([pad])[0]\n+ pad_token = tokenizer._convert_token_to_id(tokenizer.pad_token)\n \n padded, lens, mask = padding(arr, pad_token, dtype=torch.long)\n- padded_idf, _, _ = padding(idf_weights, pad_token, dtype=torch.float)\n+ padded_idf, _, _ = padding(idf_weights, 0, dtype=torch.float)\n \n padded = padded.to(device=device)\n mask = mask.to(device=device)\n@@ -101,7 +124,8 @@ def collate_idf(arr, tokenize, numericalize, idf_dict,\n \n \n def get_bert_embedding(all_sens, model, tokenizer, idf_dict,\n- batch_size=-1, device='cuda:0'):\n+ batch_size=-1, device='cuda:0', \n+ all_layers=False):\n \"\"\"\n Compute BERT embedding in batches.\n \n@@ -115,7 +139,7 @@ def get_bert_embedding(all_sens, model, tokenizer, idf_dict,\n \"\"\"\n \n padded_sens, padded_idf, lens, mask = collate_idf(all_sens,\n- tokenizer.tokenize, tokenizer.convert_tokens_to_ids,\n+ tokenizer,\n idf_dict,\n device=device)\n \n@@ -125,18 +149,19 @@ def get_bert_embedding(all_sens, model, tokenizer, idf_dict,\n with torch.no_grad():\n for i in range(0, len(all_sens), batch_size):\n batch_embedding = bert_encode(model, padded_sens[i:i+batch_size],\n- attention_mask=mask[i:i+batch_size])\n- # batch_embedding = torch.stack(batch_embedding)\n+ attention_mask=mask[i:i+batch_size],\n+ all_layers=all_layers)\n embeddings.append(batch_embedding)\n del batch_embedding\n \n total_embedding = torch.cat(embeddings, dim=0)\n \n- return total_embedding, lens, mask, padded_idf\n+ return total_embedding, mask, padded_idf\n \n \n-def greedy_cos_idf(ref_embedding, ref_lens, ref_masks, ref_idf,\n- hyp_embedding, hyp_lens, hyp_masks, hyp_idf):\n+def greedy_cos_idf(ref_embedding, ref_masks, ref_idf,\n+ hyp_embedding, hyp_masks, hyp_idf,\n+ all_layers=False):\n \"\"\"\n Compute greedy matching based on cosine similarity.\n \n@@ -158,16 +183,24 @@ def greedy_cos_idf(ref_embedding, ref_lens, ref_masks, ref_idf,\n - :param: `hyp_idf` (torch.Tensor): BxK, idf score of each word\n piece in the candidate setence\n \"\"\"\n-\n ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))\n hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))\n \n+ if all_layers:\n+ B, _, L, D = hyp_embedding.size()\n+ hyp_embedding = hyp_embedding.transpose(1, 2).transpose(0, 1)\\\n+ .contiguous().view(L*B, hyp_embedding.size(1), D)\n+ ref_embedding = ref_embedding.transpose(1, 2).transpose(0, 1)\\\n+ .contiguous().view(L*B, ref_embedding.size(1), D)\n batch_size = ref_embedding.size(0)\n-\n sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2))\n masks = torch.bmm(hyp_masks.unsqueeze(2).float(), ref_masks.unsqueeze(1).float())\n- masks = masks.expand(batch_size, masks.size(1), masks.size(2))\\\n- .contiguous().view_as(sim)\n+ if all_layers:\n+ masks = masks.unsqueeze(0).expand(L, -1, -1, -1)\\\n+ .contiguous().view_as(sim)\n+ else:\n+ masks = masks.expand(batch_size, -1, -1)\\\n+ .contiguous().view_as(sim)\n \n masks = masks.float().to(sim.device)\n sim = sim * masks\n@@ -179,14 +212,38 @@ def greedy_cos_idf(ref_embedding, ref_lens, ref_masks, ref_idf,\n ref_idf.div_(ref_idf.sum(dim=1, keepdim=True))\n precision_scale = hyp_idf.to(word_precision.device)\n recall_scale = ref_idf.to(word_recall.device)\n+ if all_layers:\n+ precision_scale = precision_scale.unsqueeze(0)\\\n+ .expand(L, B, -1).contiguous().view_as(word_precision)\n+ recall_scale = recall_scale.unsqueeze(0)\\\n+ .expand(L, B, -1).contiguous().view_as(word_recall)\n P = (word_precision * precision_scale).sum(dim=1)\n R = (word_recall * recall_scale).sum(dim=1)\n- \n F = 2 * P * R / (P + R)\n+\n+ hyp_zero_mask = hyp_masks.sum(dim=1).eq(2)\n+ ref_zero_mask = ref_masks.sum(dim=1).eq(2)\n+\n+ if all_layers:\n+ P = P.view(L, B)\n+ R = R.view(L, B)\n+ F = F.view(L, B)\n+\n+ if torch.any(hyp_zero_mask):\n+ print(\"Warning: Empty candidate sentence; Setting precision to be 0.\", file=sys.stderr)\n+ P = P.masked_fill(hyp_zero_mask, 0.)\n+\n+ if torch.any(ref_zero_mask):\n+ print(\"Warning: Empty candidate sentence; Setting recall to be 0.\", file=sys.stderr)\n+ R = R.masked_fill(ref_zero_mask, 0.)\n+\n+ F = F.masked_fill(torch.isnan(F), 0.)\n+\n return P, R, F\n \n def bert_cos_score_idf(model, refs, hyps, tokenizer, idf_dict,\n- verbose=False, batch_size=64, device='cuda:0'):\n+ verbose=False, batch_size=64, device='cuda:0',\n+ all_layers=False):\n \"\"\"\n Compute BERTScore.\n \n@@ -202,17 +259,62 @@ def bert_cos_score_idf(model, refs, hyps, tokenizer, idf_dict,\n - :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'\n \"\"\"\n preds = []\n+ def dedup_and_sort(l):\n+ return sorted(list(set(l)), key= lambda x : len(x.split(\" \")))\n+ sentences = dedup_and_sort(refs+hyps)\n+ embs = []\n+ iter_range = range(0, len(sentences), batch_size)\n+ if verbose: \n+ print(\"computing bert embedding.\")\n+ iter_range = tqdm(iter_range)\n+ stats_dict = dict()\n+ for batch_start in iter_range:\n+ sen_batch = sentences[batch_start:batch_start+batch_size]\n+ embs, masks, padded_idf = get_bert_embedding(sen_batch, model, tokenizer, idf_dict,\n+ device=device, all_layers=all_layers)\n+ embs = embs.cpu()\n+ masks = masks.cpu()\n+ padded_idf = padded_idf.cpu()\n+ for i, sen in enumerate(sen_batch):\n+ sequence_len = masks[i].sum().item()\n+ emb = embs[i, :sequence_len]\n+ idf = padded_idf[i, :sequence_len]\n+ stats_dict[sen] = (emb, idf)\n+ \n+ def pad_batch_stats(sen_batch, stats_dict, device):\n+ stats = [stats_dict[s] for s in sen_batch]\n+ emb, idf = zip(*stats)\n+ lens = [e.size(0) for e in emb]\n+ emb_pad = pad_sequence(emb, batch_first=True, padding_value=2.)\n+ idf_pad = pad_sequence(idf, batch_first=True)\n+ def length_to_mask(lens):\n+ lens = torch.tensor(lens, dtype=torch.long)\n+ max_len = max(lens)\n+ base = torch.arange(max_len, dtype=torch.long)\\\n+ .expand(len(lens), max_len)\n+ return base < lens.unsqueeze(1)\n+ pad_mask = length_to_mask(lens)\n+ return emb_pad.to(device), pad_mask.to(device), idf_pad.to(device)\n+ \n+\n+ device = next(model.parameters()).device\n iter_range = range(0, len(refs), batch_size)\n- if verbose: iter_range = tqdm(iter_range)\n+ if verbose: \n+ print(\"computing greedy matching.\")\n+ iter_range = tqdm(iter_range)\n for batch_start in iter_range:\n batch_refs = refs[batch_start:batch_start+batch_size]\n batch_hyps = hyps[batch_start:batch_start+batch_size]\n- ref_stats = get_bert_embedding(batch_refs, model, tokenizer, idf_dict,\n- device=device)\n- hyp_stats = get_bert_embedding(batch_hyps, model, tokenizer, idf_dict,\n- device=device)\n-\n- P, R, F1 = greedy_cos_idf(*ref_stats, *hyp_stats)\n- preds.append(torch.stack((P, R, F1), dim=1).cpu())\n- preds = torch.cat(preds, dim=0)\n+ ref_stats = pad_batch_stats(batch_refs, stats_dict, device)\n+ hyp_stats = pad_batch_stats(batch_hyps, stats_dict, device)\n+\n+ P, R, F1 = greedy_cos_idf(*ref_stats, *hyp_stats, all_layers)\n+ preds.append(torch.stack((P, R, F1), dim=-1).cpu())\n+ preds = torch.cat(preds, dim=1 if all_layers else 0)\n return preds\n+\n+\n+def get_hash(model, num_layers, idf):\n+ msg = '{}_L{}{}_version={}'.format(\n+ model, num_layers, '_idf' if idf else '_no-idf', __version__)\n+ return msg\n\\ndiff --git a/cli/score.py b/cli/score.py\n--- a/cli/score.py\n+++ b/cli/score.py\n@@ -4,21 +4,20 @@\n import argparse\n import torch\n from collections import defaultdict\n-from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM\n \n import bert_score\n \n-VERSION=bert_score.__version__\n \n def main():\n torch.multiprocessing.set_sharing_strategy('file_system')\n \n parser = argparse.ArgumentParser('Calculate BERTScore')\n- parser.add_argument('--bert', default='bert-base-multilingual-cased',\n- choices=bert_score.bert_types, help='BERT model name (default: bert-base-uncased)')\n- parser.add_argument('-l', '--num_layers', type=int, default=8, help='use first N layer in BERT (default: 8)')\n+ parser.add_argument('--lang', type=str, default=None, help='two-letter abbreviation of the language (e.g., en)')\n+ parser.add_argument('-m', '--model', default=None,\n+ choices=bert_score.model_types, help='BERT model name (default: bert-base-uncased)')\n+ parser.add_argument('-l', '--num_layers', type=int, default=None, help='use first N layer in BERT (default: 8)')\n parser.add_argument('-b', '--batch_size', type=int, default=64, help='batch size (default: 64)')\n- parser.add_argument('--no_idf', action='store_true', help='BERT Score without IDF scaling')\n+ parser.add_argument('--idf', action='store_true', help='BERT Score with IDF scaling')\n parser.add_argument('-s', '--seg_level', action='store_true', help='show individual score of each pair')\n parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')\n parser.add_argument('-r', '--ref', type=str, required=True, help='reference file path or a string')\n@@ -35,18 +34,19 @@ def main():\n else:\n cands = [args.cand]\n refs = [args.ref]\n- assert args.no_idf, \"do not suuport idf fold for a single pair of sentences\"\n+ assert not args.idf, \"do not suuport idf fold for a single pair of sentences\"\n \n assert len(cands) == len(refs)\n \n- all_preds = bert_score.score(cands, refs, bert=args.bert, num_layers=args.num_layers, verbose=args.verbose,\n- no_idf=args.no_idf, batch_size=args.batch_size)\n+ all_preds, hash_code = bert_score.score(cands, refs, model_type=args.model, num_layers=args.num_layers,\n+ verbose=args.verbose, idf=args.idf, batch_size=args.batch_size,\n+ lang=args.lang, return_hash=True)\n avg_scores = [s.mean(dim=0) for s in all_preds]\n P = avg_scores[0].cpu().item()\n R = avg_scores[1].cpu().item()\n F1 = avg_scores[2].cpu().item()\n- msg = '{}_L{}{}_version={} BERT-P: {:.6f} BERT-R: {:.6f} BERT-F1: {:.6f}'.format(\n- args.bert, args.num_layers, '_no-idf' if args.no_idf else '', VERSION, P, R, F1)\n+ msg = hash_code + \\\n+ f' BERT-P: {P:.6f} BERT-R: {R:.6f} BERT-F1: {F1:.6f}'\n print(msg)\n if args.seg_level:\n ps, rs, fs = all_preds\ndiff --git a/example/demo.py b/example/demo.py\n--- a/example/demo.py\n+++ b/example/demo.py\n@@ -6,4 +6,5 @@\n with open(\"refs.txt\") as f:\n refs = [line.strip() for line in f]\n \n-P, R, F = score(cands, refs, bert=\"bert-base-uncased\")\n+P, R, F = score(cands, refs, lang='en')\n+print(f'P={P:.6f} R={R:.6f} F={F:.6f}')\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,7 +3,7 @@\n \n setup(\n name=\"bert_score\",\n- version=\"0.1.2\",\n+ version='0.2.0',\n author=\"Tianyi Zhang*, Varsha Kishore*, Felix Wu*, Kilian Q. Weinberger, and Yoav Artzi\",\n author_email=\"tzhang@asapp.com\",\n description=\"PyTorch implementation of BERT score\",\n@@ -14,12 +14,13 @@\n url=\"https://github.com/Tiiiger/bert_score\",\n packages=find_packages(exclude=[\"*.tests\", \"*.tests.*\",\n \"tests.*\", \"tests\"]),\n- install_requires=['torch>=0.4.1',\n+ install_requires=['torch>=1.0.0',\n 'numpy',\n 'requests',\n 'tqdm>=4.31.1',\n 'matplotlib',\n- 'pytorch-pretrained-bert>=0.6.1'],\n+ 'transformers>=2.0.0'\n+ ],\n entry_points={\n 'console_scripts': [\n \"bert-score=cli.score:main\",\n", "test_patch": "diff --git a/tests/__init__.py b/tests/__init__.py\nnew file mode 100644\ndiff --git a/tests/test_bert_score.py b/tests/test_bert_score.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/test_bert_score.py\n@@ -0,0 +1,46 @@\n+import unittest\n+import torch\n+import bert_score\n+\n+from collections import defaultdict\n+\n+eps = 1e-6\n+\n+cands = [\n+ \"28-year-old chef found dead in San Francisco mall\",\n+ \"A 28-year-old chef who recently moved to San Francisco was found dead in the staircase of a local shopping center.\",\n+ \"The victim's brother said he cannot imagine anyone who would want to harm him,\\\"Finally, it went uphill again at him.\\\"\",\n+]\n+refs = [\n+ \"28-Year-Old Chef Found Dead at San Francisco Mall\",\n+ \"A 28-year-old chef who had recently moved to San Francisco was found dead in the stairwell of a local mall this week.\",\n+ \"But the victim's brother says he can't think of anyone who would want to hurt him, saying, \\\"Things were finally going well for him.\\\"\"\n+]\n+\n+class TestScore(unittest.TestCase):\n+ def test_score(self):\n+ (P, R, F), hash_code = bert_score.score(cands, refs, model_type='roberta-large', num_layers=17,\n+ idf=False, batch_size=3, return_hash=True)\n+ print(P.tolist(), R.tolist(), F.tolist())\n+\n+ self.assertTrue(torch.is_tensor(P))\n+ self.assertTrue(torch.is_tensor(R))\n+ self.assertTrue(torch.is_tensor(F))\n+ self.assertEqual(hash_code, f'roberta-large_L17_no-idf_version={bert_score.__version__}')\n+ self.assertTrue((P - torch.tensor([0.9862896203994751, 0.9817618131637573, 0.9145744442939758])).abs_().max() < eps)\n+ self.assertTrue((R - torch.tensor([0.986611008644104, 0.9717907905578613, 0.9223880767822266])).abs_().max() < eps)\n+ def test_idf_score(self):\n+ (P, R, F), hash_code = bert_score.score(cands, refs, model_type='roberta-large', num_layers=17,\n+ idf=True, batch_size=3, return_hash=True)\n+ print(P.tolist(), R.tolist(), F.tolist())\n+\n+ self.assertTrue(torch.is_tensor(P))\n+ self.assertTrue(torch.is_tensor(R))\n+ self.assertTrue(torch.is_tensor(F))\n+ self.assertEqual(hash_code, f'roberta-large_L17_idf_version={bert_score.__version__}')\n+ self.assertTrue((P - torch.tensor([0.9841673374176025, 0.9752232432365417, 0.8989502787590027])).abs_().max() < eps)\n+ self.assertTrue((R - torch.tensor([0.9843330979347229, 0.9698787927627563, 0.9181708097457886])).abs_().max() < eps)\n+ self.assertTrue((F - torch.tensor([0.9842502474784851, 0.9725437164306641, 0.908458948135376])).abs_().max() < eps)\n+\n+if __name__ == '__main__':\n+ unittest.main()\n", "problem_statement": "", "hints_text": "", "created_at": "2019-10-02T20:45:52Z"}
PythonDataset/train/combine-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "mlsecproject/combine", "pull_number": 103, "instance_id": "mlsecproject__combine-103", "issue_numbers": "", "base_commit": "d662493af9f6ee7bc36c5509c277f93980009bec", "patch": "diff --git a/baler.py b/baler.py\n--- a/baler.py\n+++ b/baler.py\n@@ -1,21 +1,21 @@\n import ConfigParser\n-import csv\n import datetime as dt\n import gzip\n import json\n+import logging\n import os\n-import sys\n+import re\n import requests\n+import sys\n import time\n-import re\n-from Queue import Queue\n+import unicodecsv\n import threading\n from logger import get_logger\n-import logging\n-\n+from Queue import Queue\n \n logger = get_logger('baler')\n \n+\n def tiq_output(reg_file, enr_file):\n config = ConfigParser.SafeConfigParser()\n cfg_success = config.read('combine.cfg')\n@@ -43,8 +43,8 @@ def tiq_output(reg_file, enr_file):\n outbound_data = [row for row in reg_data if row[2] == 'outbound']\n \n try:\n- bale_reg_csvgz(inbound_data, os.path.join(tiq_dir, 'raw', 'public_inbound', today+'.csv.gz'))\n- bale_reg_csvgz(outbound_data, os.path.join(tiq_dir, 'raw', 'public_outbound', today+'.csv.gz'))\n+ bale_reg_csvgz(inbound_data, os.path.join(tiq_dir, 'raw', 'public_inbound', today + '.csv.gz'))\n+ bale_reg_csvgz(outbound_data, os.path.join(tiq_dir, 'raw', 'public_outbound', today + '.csv.gz'))\n except:\n pass\n \n@@ -52,8 +52,8 @@ def tiq_output(reg_file, enr_file):\n outbound_data = [row for row in enr_data if row[2] == 'outbound']\n \n try:\n- bale_enr_csvgz(inbound_data, os.path.join(tiq_dir, 'enriched', 'public_inbound', today+'.csv.gz'))\n- bale_enr_csvgz(outbound_data, os.path.join(tiq_dir, 'enriched', 'public_outbound', today+'.csv.gz'))\n+ bale_enr_csvgz(inbound_data, os.path.join(tiq_dir, 'enriched', 'public_inbound', today + '.csv.gz'))\n+ bale_enr_csvgz(outbound_data, os.path.join(tiq_dir, 'enriched', 'public_outbound', today + '.csv.gz'))\n except:\n pass\n \n@@ -64,7 +64,7 @@ def bale_reg_csvgz(harvest, output_file):\n \"\"\" bale the data as a gziped csv file\"\"\"\n logger.info('Output regular data as GZip CSV to %s' % output_file)\n with gzip.open(output_file, 'wb') as csv_file:\n- bale_writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)\n+ bale_writer = unicodecsv.writer(csv_file, quoting=unicodecsv.QUOTE_ALL)\n \n # header row\n bale_writer.writerow(('entity', 'type', 'direction', 'source', 'notes', 'date'))\n@@ -75,7 +75,7 @@ def bale_reg_csv(harvest, output_file):\n \"\"\" bale the data as a csv file\"\"\"\n logger.info('Output regular data as CSV to %s' % output_file)\n with open(output_file, 'wb') as csv_file:\n- bale_writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)\n+ bale_writer = unicodecsv.writer(csv_file, quoting=unicodecsv.QUOTE_ALL)\n \n # header row\n bale_writer.writerow(('entity', 'type', 'direction', 'source', 'notes', 'date'))\n@@ -86,112 +86,117 @@ def bale_enr_csv(harvest, output_file):\n \"\"\" output the data as an enriched csv file\"\"\"\n logger.info('Output enriched data as CSV to %s' % output_file)\n with open(output_file, 'wb') as csv_file:\n- bale_writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)\n+ bale_writer = unicodecsv.writer(csv_file, quoting=unicodecsv.QUOTE_ALL)\n \n # header row\n bale_writer.writerow(('entity', 'type', 'direction', 'source', 'notes', 'date', 'asnumber', 'asname', 'country', 'host', 'rhost'))\n bale_writer.writerows(harvest)\n \n+\n def bale_enr_csvgz(harvest, output_file):\n \"\"\" output the data as an enriched gziped csv file\"\"\"\n logger.info('Output enriched data as GZip CSV to %s' % output_file)\n with gzip.open(output_file, 'wb') as csv_file:\n- bale_writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)\n+ bale_writer = unicodecsv.writer(csv_file, quoting=unicodecsv.QUOTE_ALL)\n \n # header row\n bale_writer.writerow(('entity', 'type', 'direction', 'source', 'notes', 'date', 'asnumber', 'asname', 'country', 'host', 'rhost'))\n bale_writer.writerows(harvest)\n \n-def bale_CRITs_indicator(base_url,data,indicator_que):\n+\n+def bale_CRITs_indicator(base_url, data, indicator_que):\n \"\"\" One thread of adding indicators to CRITs\"\"\"\n while not indicator_que.empty():\n- indicator=indicator_que.get()\n+ indicator = indicator_que.get()\n if indicator[1] == 'IPv4':\n # using the IP API\n- url=base_url+'ips/'\n- data['add_indicator']=\"true\"\n- data['ip']=indicator[0]\n- data['ip_type']='Address - ipv4-addr'\n- data['reference']=indicator[3]\n+ url = base_url + 'ips/'\n+ data['add_indicator'] = \"true\"\n+ data['ip'] = indicator[0]\n+ data['ip_type'] = 'Address - ipv4-addr'\n+ data['reference'] = indicator[3]\n # getting the source automatically:\n- source=re.findall(r'\\/\\/(.*?)\\/',data['reference'])\n+ source = re.findall(r'\\/\\/(.*?)\\/', data['reference'])\n if source:\n- data['source']=source[0]\n- res = requests.post(url,data=data,verify=False)\n- if not res.status_code in [201,200,400]:\n+ data['source'] = source[0]\n+ res = requests.post(url, data=data, verify=False)\n+ if not res.status_code in [201, 200, 400]:\n logger.info(\"Issues with adding: %s\" % data['ip'])\n elif indicator[1] == \"FQDN\":\n # using the Domain API\n- url=base_url+'domains/'\n- data['add_indicator']=\"true\"\n- data['domain']=indicator[0]\n- data['reference']=indicator[3]\n+ url = base_url + 'domains/'\n+ data['add_indicator'] = \"true\"\n+ data['domain'] = indicator[0]\n+ data['reference'] = indicator[3]\n # getting the source automatically:\n- source=re.findall(r'\\/\\/(.*?)\\/',data['reference'])\n+ source = re.findall(r'\\/\\/(.*?)\\/', data['reference'])\n if source:\n- data['source']=source[0]\n- res = requests.post(url,data=data,verify=False)\n- if not res.status_code in [201,200,400]:\n+ data['source'] = source[0]\n+ res = requests.post(url, data=data, verify=False)\n+ if not res.status_code in [201, 200, 400]:\n logger.info(\"Issues with adding: %s\" % data['domain'])\n else:\n- logger.info(\"don't yet know what to do with: %s[%s]\" % (indicator[1],indicator[0]))\n+ logger.info(\"don't yet know what to do with: %s[%s]\" % (indicator[1], indicator[0]))\n+\n \n-def bale_CRITs(harvest,filename):\n+def bale_CRITs(harvest, filename):\n \"\"\" taking the output from combine and pushing it to the CRITs web API\"\"\"\n # checking the minimum requirements for parameters\n # it would be nice to have some metadata on the feeds that can be imported in the intel library:\n # -> confidence\n # -> type of feed (bot vs spam vs ddos, you get the picture)\n- data={'confidence':'medium'}\n- start_time=time.time()\n+ data = {'confidence': 'medium'}\n+ start_time = time.time()\n config = ConfigParser.SafeConfigParser()\n cfg_success = config.read('combine.cfg')\n if not cfg_success:\n logger.error('tiq_output: Could not read combine.cfg.\\n')\n logger.error('HINT: edit combine-example.cfg and save as combine.cfg.\\n')\n return\n- if config.has_option('Baler','crits_username'):\n- data['username']=config.get('Baler', 'crits_username')\n+ if config.has_option('Baler', 'crits_username'):\n+ data['username'] = config.get('Baler', 'crits_username')\n else:\n raise 'Please check the combine.cnf file for the crits_username field in the [Baler] section'\n- if config.has_option('Baler','crits_api_key'):\n- data['api_key']=config.get('Baler', 'crits_api_key')\n+ if config.has_option('Baler', 'crits_api_key'):\n+ data['api_key'] = config.get('Baler', 'crits_api_key')\n else:\n raise 'Please check the combine.cnf file for the crits_api_key field in the [Baler] section'\n- if config.has_option('Baler','crits_campaign'):\n- data['campaign']=config.get('Baler', 'crits_campaign')\n+ if config.has_option('Baler', 'crits_campaign'):\n+ data['campaign'] = config.get('Baler', 'crits_campaign')\n else:\n logger.info('Lacking a campaign name, we will default to \"combine.\" Errors might ensue if it does not exist in CRITs')\n- data['campaign']='combine'\n- if config.has_option('Baler','crits_url'):\n- base_url=config.get('Baler','crits_url')\n+ data['campaign'] = 'combine'\n+ if config.has_option('Baler', 'crits_url'):\n+ base_url = config.get('Baler', 'crits_url')\n else:\n raise 'Please check the combine.cnf file for the crits_url field in the [Baler] section'\n- if config.has_option('Baler','crits_maxThreads'):\n- maxThreads=int(config.get('Baler', 'crits_maxThreads'))\n+ if config.has_option('Baler', 'crits_maxThreads'):\n+ maxThreads = int(config.get('Baler', 'crits_maxThreads'))\n else:\n logger.info('No number of maximum Threads has been given, defaulting to 10')\n- maxThreads=10\n+ maxThreads = 10\n \n- data['source']='Combine'\n- data['method']='trawl'\n+ data['source'] = 'Combine'\n+ data['method'] = 'trawl'\n \n # initializing the Queue to the list of indicators in the harvest\n- ioc_queue=Queue()\n+ ioc_queue = Queue()\n for indicator in harvest:\n ioc_queue.put(indicator)\n- total_iocs=ioc_queue.qsize()\n+ total_iocs = ioc_queue.qsize()\n \n for x in range(maxThreads):\n- th=threading.Thread(target=bale_CRITs_indicator, args=(base_url,data,ioc_queue))\n+ th = threading.Thread(target=bale_CRITs_indicator, args=(base_url, data, ioc_queue))\n th.start()\n \n for x in threading.enumerate():\n- if x.name==\"MainThread\":\n+ if x.name == \"MainThread\":\n continue\n x.join()\n \n- logger.info('Output %d indicators to CRITs using %d threads. Operation tool %d seconds\\n' % (total_iocs,maxThreads,time.time()-start_time))\n+ logger.info('Output %d indicators to CRITs using %d threads. Operation tool %d seconds\\n' %\n+ (total_iocs, maxThreads, time.time() - start_time))\n+\n \n def bale(input_file, output_file, output_format, is_regular):\n config = ConfigParser.SafeConfigParser()\n@@ -203,13 +208,13 @@ def bale(input_file, output_file, output_format, is_regular):\n \n logger.info('Reading processed data from %s' % input_file)\n with open(input_file, 'rb') as f:\n- harvest = json.load(f)\n+ harvest = json.load(f, encoding='utf8')\n \n # TODO: also need plugins here (cf. #23)\n if is_regular:\n- format_funcs = {'csv': bale_reg_csv,'crits':bale_CRITs}\n+ format_funcs = {'csv': bale_reg_csv, 'crits': bale_CRITs}\n else:\n- format_funcs = {'csv': bale_enr_csv,'crits':bale_CRITs}\n+ format_funcs = {'csv': bale_enr_csv, 'crits': bale_CRITs}\n format_funcs[output_format](harvest, output_file)\n \n if __name__ == \"__main__\":\ndiff --git a/winnower.py b/winnower.py\n--- a/winnower.py\n+++ b/winnower.py\n@@ -9,30 +9,36 @@\n import sys\n \n from netaddr import IPAddress, IPRange, IPSet\n+from sortedcontainers import SortedDict\n \n from logger import get_logger\n import logging\n \n logger = get_logger('winnower')\n \n+# from http://en.wikipedia.org/wiki/Reserved_IP_addresses:\n+reserved_ranges = IPSet(['0.0.0.0/8', '100.64.0.0/10', '127.0.0.0/8', '192.88.99.0/24',\n+ '198.18.0.0/15', '198.51.100.0/24', '203.0.113.0/24', '233.252.0.0/24'])\n+gi_org = SortedDict()\n+\n \n def load_gi_org(filename):\n- gi_org = {}\n with open(filename, 'rb') as f:\n org_reader = csv.DictReader(f, fieldnames=['start', 'end', 'org'])\n for row in org_reader:\n- gi_org[IPRange(row['start'], row['end'])] = row['org']\n+ gi_org[row['start']] = (IPRange(row['start'], row['end']), unicode(row['org'], errors='replace'))\n+\n return gi_org\n \n \n-def org_by_addr(address, org_data):\n+def org_by_addr(address):\n as_num = None\n as_name = None\n- for iprange in org_data:\n- if address in iprange:\n- as_num, sep, as_name = org_data[iprange].partition(' ')\n- as_num = as_num.replace(\"AS\", \"\") # Making sure the variable only has the number\n- break\n+ gi_index = gi_org.bisect(str(int(address)))\n+ gi_net = gi_org[gi_org.iloc[gi_index - 1]]\n+ if address in gi_net[0]:\n+ as_num, sep, as_name = gi_net[1].partition(' ')\n+ as_num = as_num.replace(\"AS\", \"\") # Making sure the variable only has the number\n return as_num, as_name\n \n \n@@ -46,8 +52,8 @@ def maxhits(dns_records):\n return hostname\n \n \n-def enrich_IPv4(address, org_data, geo_data, dnsdb=None):\n- as_num, as_name = org_by_addr(address, org_data)\n+def enrich_IPv4(address, geo_data, dnsdb=None):\n+ as_num, as_name = org_by_addr(address)\n country = geo_data.country_code_by_addr('%s' % address)\n if dnsdb:\n hostname = maxhits(dnsdb.query_rdata_ip('%s' % address))\n@@ -73,12 +79,9 @@ def filter_date(records, date):\n \n \n def reserved(address):\n- # from http://en.wikipedia.org/wiki/Reserved_IP_addresses:\n- ranges = IPSet(['0.0.0.0/8', '100.64.0.0/10', '127.0.0.0/8', '192.88.99.0/24',\n- '198.18.0.0/15', '198.51.100.0/24', '203.0.113.0/24', '233.252.0.0/24'])\n a_reserved = address.is_reserved()\n a_private = address.is_private()\n- a_inr = address in ranges\n+ a_inr = address in reserved_ranges\n if a_reserved or a_private or a_inr:\n return True\n else:\n@@ -138,7 +141,7 @@ def winnow(in_file, out_file, enr_file):\n \n # TODO: make these locations configurable?\n logger.info('Loading GeoIP data')\n- org_data = load_gi_org('data/GeoIPASNum2.csv')\n+ gi_org = load_gi_org('data/GeoIPASNum2.csv')\n geo_data = pygeoip.GeoIP('data/GeoIP.dat', pygeoip.MEMORY_CACHE)\n \n wheat = []\n@@ -147,23 +150,21 @@ def winnow(in_file, out_file, enr_file):\n logger.info('Beginning winnowing process')\n for each in crop:\n (addr, addr_type, direction, source, note, date) = each\n- # TODO: enrich DNS indicators as well\n if addr_type == 'IPv4' and is_ipv4(addr):\n- logger.info('Enriching %s' % addr)\n+ #logger.info('Enriching %s' % addr)\n ipaddr = IPAddress(addr)\n if not reserved(ipaddr):\n wheat.append(each)\n if enrich_ip:\n- e_data = (addr, addr_type, direction, source, note, date) + enrich_IPv4(ipaddr, org_data, geo_data, dnsdb)\n+ e_data = (addr, addr_type, direction, source, note, date) + enrich_IPv4(ipaddr, geo_data, dnsdb)\n enriched.append(e_data)\n else:\n- e_data = (addr, addr_type, direction, source, note, date) + enrich_IPv4(ipaddr, org_data, geo_data)\n+ e_data = (addr, addr_type, direction, source, note, date) + enrich_IPv4(ipaddr, geo_data)\n enriched.append(e_data)\n else:\n logger.error('Found invalid address: %s from: %s' % (addr, source))\n elif addr_type == 'FQDN' and is_fqdn(addr):\n- # TODO: validate these (cf. https://github.com/mlsecproject/combine/issues/15 )\n- logger.info('Enriching %s' % addr)\n+ #logger.info('Enriching %s' % addr)\n wheat.append(each)\n if enrich_dns and dnsdb:\n e_data = (addr, addr_type, direction, source, note, date, enrich_FQDN(addr, date, dnsdb))\n@@ -173,10 +174,12 @@ def winnow(in_file, out_file, enr_file):\n \n logger.info('Dumping results')\n with open(out_file, 'wb') as f:\n- json.dump(wheat, f, indent=2)\n+ w_data = json.dumps(wheat, indent=2, ensure_ascii=False).encode('utf8')\n+ f.write(w_data)\n \n with open(enr_file, 'wb') as f:\n- json.dump(enriched, f, indent=2)\n+ e_data = json.dumps(enriched, indent=2, ensure_ascii=False).encode('utf8')\n+ f.write(e_data)\n \n \n if __name__ == \"__main__\":\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2014-12-26T18:31:08Z"}
PythonDataset/train/ctlearn-task-instances.jsonl.all ADDED
The diff for this file is too large to render. See raw diff
 
PythonDataset/train/cwltool-task-instances.jsonl.all ADDED
The diff for this file is too large to render. See raw diff
 
PythonDataset/train/discogs_client-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "discogs/discogs_client", "pull_number": 43, "instance_id": "discogs__discogs_client-43", "issue_numbers": "", "base_commit": "7a97275636e3524625d2743c93ac3c8826570bf3", "patch": "diff --git a/discogs_client/__init__.py b/discogs_client/__init__.py\n--- a/discogs_client/__init__.py\n+++ b/discogs_client/__init__.py\n@@ -1,4 +1,6 @@\n-__version_info__ = (2,0,2)\n+from __future__ import absolute_import, division, print_function, unicode_literals\n+\n+__version_info__ = 2, 0, 2\n __version__ = '2.0.2'\n \n from discogs_client.client import Client\ndiff --git a/discogs_client/client.py b/discogs_client/client.py\n--- a/discogs_client/client.py\n+++ b/discogs_client/client.py\n@@ -1,13 +1,20 @@\n-import requests\n+from __future__ import absolute_import, division, print_function, unicode_literals\n+\n+import warnings\n import json\n-import oauth2\n-import urllib\n+try:\n+ # python2\n+ from urllib import urlencode\n+except ImportError:\n+ # python3\n+ from urllib.parse import urlencode\n \n from discogs_client import models\n from discogs_client.exceptions import ConfigurationError, HTTPError\n from discogs_client.utils import update_qs\n from discogs_client.fetchers import RequestsFetcher, OAuth2Fetcher\n \n+\n class Client(object):\n _base_url = 'https://api.discogs.com'\n _request_token_url = 'https://api.discogs.com/oauth/request_token'\n@@ -44,9 +51,10 @@ def get_authorize_url(self, callback_url=None):\n \n params = {}\n params['User-Agent'] = self.user_agent\n+ params['Content-Type'] = 'application/x-www-form-urlencoded'\n if callback_url:\n params['oauth_callback'] = callback_url\n- postdata = urllib.urlencode(params)\n+ postdata = urlencode(params)\n \n content, status_code = self._fetcher.fetch(self, 'POST', self._request_token_url, data=postdata, headers=params)\n if status_code != 200:\n@@ -55,7 +63,7 @@ def get_authorize_url(self, callback_url=None):\n token, secret = self._fetcher.store_token_from_qs(content)\n \n params = {'oauth_token': token}\n- query_string = urllib.urlencode(params)\n+ query_string = urlencode(params)\n \n return (token, secret, '?'.join((self._authorize_url, query_string)))\n \n@@ -63,6 +71,9 @@ def get_access_token(self, verifier):\n \"\"\"\n Uses the verifier to exchange a request token for an access token.\n \"\"\"\n+ if isinstance(verifier, bytes):\n+ verifier = verifier.decode('utf8')\n+\n self._fetcher.set_verifier(verifier)\n \n params = {}\n@@ -82,7 +93,7 @@ def _check_user_agent(self):\n \n def _request(self, method, url, data=None):\n if self.verbose:\n- print ' '.join((method, url))\n+ print(' '.join((method, url)))\n \n self._check_user_agent()\n \n@@ -92,14 +103,14 @@ def _request(self, method, url, data=None):\n }\n \n if data:\n- headers['Content-Type'] = 'application/json'\n+ headers['Content-Type'] = 'application/x-www-form-urlencoded'\n \n content, status_code = self._fetcher.fetch(self, method, url, data=data, headers=headers)\n \n if status_code == 204:\n return None\n \n- body = json.loads(content)\n+ body = json.loads(content.decode('utf8'))\n \n if 200 <= status_code < 300:\n return body\ndiff --git a/discogs_client/exceptions.py b/discogs_client/exceptions.py\n--- a/discogs_client/exceptions.py\n+++ b/discogs_client/exceptions.py\n@@ -1,3 +1,6 @@\n+from __future__ import absolute_import, division, print_function, unicode_literals\n+\n+\n class DiscogsAPIError(Exception):\n \"\"\"Root Exception class for Discogs API errors.\"\"\"\n pass\ndiff --git a/discogs_client/fetchers.py b/discogs_client/fetchers.py\n--- a/discogs_client/fetchers.py\n+++ b/discogs_client/fetchers.py\n@@ -1,8 +1,17 @@\n+from __future__ import absolute_import, division, print_function, unicode_literals\n+\n import requests\n-import oauth2\n+from requests.api import request\n+from oauthlib import oauth1\n import json\n-import urlparse\n import os\n+try:\n+ # python2\n+ from urlparse import parse_qsl\n+except ImportError:\n+ # python3\n+ from urllib.parse import parse_qsl\n+\n \n class Fetcher(object):\n \"\"\"\n@@ -12,8 +21,14 @@ class Fetcher(object):\n (It's a slightly leaky abstraction designed to make testing easier.)\n \"\"\"\n def fetch(self, client, method, url, data=None, headers=None, json=True):\n- # Should return (content, status_code)\n- raise NotImplemented\n+ \"\"\"Fetch the given request\n+\n+ Returns\n+ -------\n+ content : str (python2) or bytes (python3)\n+ status_code : int\n+ \"\"\"\n+ raise NotImplementedError()\n \n \n class LoggingDelegator(object):\n@@ -41,42 +56,38 @@ def fetch(self, client, method, url, data=None, headers=None, json=True):\n class OAuth2Fetcher(Fetcher):\n \"\"\"Fetches via HTTP + OAuth 1.0a from the Discogs API.\"\"\"\n def __init__(self, consumer_key, consumer_secret, token=None, secret=None):\n- consumer = oauth2.Consumer(consumer_key, consumer_secret)\n- token_obj = None\n-\n- if token and secret:\n- token_obj = oauth2.Token(token, secret)\n-\n- self.oauth_client = oauth2.Client(consumer, token_obj)\n+ self.client = oauth1.Client(consumer_key, client_secret=consumer_secret)\n+ self.store_token(token, secret)\n \n def store_token_from_qs(self, query_string):\n- token_dict = dict(urlparse.parse_qsl(query_string))\n- token = token_dict['oauth_token']\n- secret = token_dict['oauth_token_secret']\n+ token_dict = dict(parse_qsl(query_string))\n+ token = token_dict[b'oauth_token'].decode('utf8')\n+ secret = token_dict[b'oauth_token_secret'].decode('utf8')\n self.store_token(token, secret)\n return token, secret\n \n def forget_token(self):\n- self.oauth_client.token = None\n+ self.store_token(None, None)\n \n def store_token(self, token, secret):\n- self.oauth_client.token = oauth2.Token(token, secret)\n+ self.client.resource_owner_key = token\n+ self.client.resource_owner_secret = secret\n \n def set_verifier(self, verifier):\n- self.oauth_client.token.set_verifier(verifier)\n+ self.client.verifier = verifier\n \n def fetch(self, client, method, url, data=None, headers=None, json_format=True):\n- if data:\n- body = json.dumps(data) if json_format else data\n- resp, content = self.oauth_client.request(url, method, body, headers=headers)\n- else:\n- resp, content = self.oauth_client.request(url, method, headers=headers)\n- return content, int(resp['status'])\n+ body = json.dumps(data) if json_format and data else data\n+ uri, headers, body = self.client.sign(url, http_method=method,\n+ body=data, headers=headers)\n+\n+ resp = request(method, uri, headers=headers, data=body)\n+ return resp.content, resp.status_code\n \n \n class FilesystemFetcher(Fetcher):\n \"\"\"Fetches from a directory of files.\"\"\"\n- default_response = json.dumps({'message': 'Resource not found.'}), 404\n+ default_response = json.dumps({'message': 'Resource not found.'}).encode('utf8'), 404\n \n def __init__(self, base_path):\n self.base_path = base_path\n@@ -92,7 +103,7 @@ def fetch(self, client, method, url, data=None, headers=None, json=True):\n path = os.path.join(self.base_path, base_name)\n try:\n with open(path, 'r') as f:\n- content = f.read()\n+ content = f.read().encode('utf8') # return bytes not unicode\n return content, 200\n except:\n return self.default_response\n@@ -100,7 +111,7 @@ def fetch(self, client, method, url, data=None, headers=None, json=True):\n \n class MemoryFetcher(Fetcher):\n \"\"\"Fetches from a dict of URL -> (content, status_code).\"\"\"\n- default_response = json.dumps({'message': 'Resource not found.'}), 404\n+ default_response = json.dumps({'message': 'Resource not found.'}).encode('utf8'), 404\n \n def __init__(self, responses):\n self.responses = responses\ndiff --git a/discogs_client/models.py b/discogs_client/models.py\n--- a/discogs_client/models.py\n+++ b/discogs_client/models.py\n@@ -1,3 +1,7 @@\n+from __future__ import absolute_import, division, print_function, unicode_literals\n+\n+from six import with_metaclass\n+\n from discogs_client.exceptions import HTTPError\n from discogs_client.utils import parse_timestamp, update_qs, omit_none\n \n@@ -171,14 +175,14 @@ class ObjectCollection(Field):\n \n class APIObjectMeta(type):\n def __new__(cls, name, bases, dict_):\n- for k, v in dict_.iteritems():\n+ for k, v in dict_.items():\n if isinstance(v, Field):\n dict_[k] = v.to_descriptor(k)\n return super(APIObjectMeta, cls).__new__(cls, name, bases, dict_)\n \n \n-class APIObject(object):\n- __metaclass__ = APIObjectMeta\n+class APIObject(with_metaclass(APIObjectMeta, object)):\n+ pass\n \n \n class PrimaryAPIObject(APIObject):\n@@ -307,7 +311,7 @@ def _url_for_page(self, page):\n return update_qs(self.url, base_qs)\n \n def sort(self, key, order='asc'):\n- if not order in ('asc', 'desc'):\n+ if order not in ('asc', 'desc'):\n raise ValueError(\"Order must be one of 'asc', 'desc'\")\n self._sort_key = key\n self._sort_order = order\n@@ -332,7 +336,7 @@ def count(self):\n return self._num_items\n \n def page(self, index):\n- if not index in self._pages:\n+ if index not in self._pages:\n data = self.client._get(self._url_for_page(index))\n self._pages[index] = [\n self._transform(item) for item in data[self._list_key]\n@@ -343,12 +347,12 @@ def _transform(self, item):\n return item\n \n def __getitem__(self, index):\n- page_index = index / self.per_page + 1\n+ page_index = index // self.per_page + 1\n offset = index % self.per_page\n \n try:\n page = self.page(page_index)\n- except HTTPError, e:\n+ except HTTPError as e:\n if e.status_code == 404:\n raise IndexError(e.msg)\n else:\n@@ -360,7 +364,7 @@ def __len__(self):\n return self.count\n \n def __iter__(self):\n- for i in xrange(1, self.pages + 1):\n+ for i in range(1, self.pages + 1):\n page = self.page(i)\n for item in page:\n yield item\n@@ -381,7 +385,7 @@ class Wantlist(PaginatedList):\n def add(self, release, notes=None, notes_public=None, rating=None):\n release_id = release.id if isinstance(release, Release) else release\n data = {\n- 'release_id': release_id,\n+ 'release_id': str(release_id),\n 'notes': notes,\n 'notes_public': notes_public,\n 'rating': rating,\ndiff --git a/discogs_client/utils.py b/discogs_client/utils.py\n--- a/discogs_client/utils.py\n+++ b/discogs_client/utils.py\n@@ -1,5 +1,15 @@\n+from __future__ import unicode_literals\n+\n from datetime import datetime\n-from urllib2 import quote\n+try:\n+ # python2\n+ from urllib2 import quote\n+ to_str = unicode\n+except ImportError:\n+ # python3\n+ from urllib.parse import quote\n+ to_str = str\n+\n \n def parse_timestamp(timestamp):\n \"\"\"Convert an ISO 8601 timestamp into a datetime.\"\"\"\n@@ -8,11 +18,12 @@ def parse_timestamp(timestamp):\n \n def update_qs(url, params):\n \"\"\"A not-very-intelligent function to glom parameters onto a query string.\"\"\"\n- joined_qs = '&'.join('='.join((str(k), quote(str(v)))) for k, v in params.iteritems())\n+ joined_qs = '&'.join('='.join((str(k), quote(to_str(v).encode('utf8'))))\n+ for k, v in params.items())\n separator = '&' if '?' in url else '?'\n return url + separator + joined_qs\n \n \n def omit_none(dict_):\n \"\"\"Removes any key from a dict that has a value of None.\"\"\"\n- return dict((k, v) for k, v in dict_.iteritems() if v is not None)\n+ return dict((k, v) for k, v in dict_.items() if v is not None)\ndiff --git a/setup.py b/setup.py\nold mode 100644\nnew mode 100755\n--- a/setup.py\n+++ b/setup.py\n@@ -21,10 +21,10 @@\n ],\n install_requires=[\n 'requests',\n- 'oauth2',\n+ 'six',\n+ 'oauthlib',\n ],\n packages=[\n 'discogs_client',\n ],\n )\n-\n", "test_patch": "diff --git a/discogs_client/tests/__init__.py b/discogs_client/tests/__init__.py\n--- a/discogs_client/tests/__init__.py\n+++ b/discogs_client/tests/__init__.py\n@@ -5,6 +5,7 @@\n from discogs_client.fetchers import LoggingDelegator, FilesystemFetcher, \\\n MemoryFetcher\n \n+\n class DiscogsClientTestCase(unittest.TestCase):\n def setUp(self):\n \n@@ -18,9 +19,9 @@ def setUp(self):\n \n # Memory client\n responses = {\n- '/artists/1': ('{\"id\": 1, \"name\": \"Badger\"}', 200),\n- '/500': ('{\"message\": \"mushroom\"}', 500),\n- '/204': ('', 204),\n+ '/artists/1': (b'{\"id\": 1, \"name\": \"Badger\"}', 200),\n+ '/500': (b'{\"message\": \"mushroom\"}', 500),\n+ '/204': (b'', 204),\n }\n self.m = Client('ua')\n self.m._base_url = ''\n@@ -40,6 +41,7 @@ def assertPosted(self, assert_url, assert_data):\n self.assertEqual(url, assert_url)\n self.assertEqual(data, json.dumps(assert_data))\n \n+\n def suite():\n from discogs_client.tests import test_core, test_models, test_fetchers\n suite = unittest.TestSuite(test_core.suite())\ndiff --git a/discogs_client/tests/make_symlinks.py b/discogs_client/tests/make_symlinks.py\nnew file mode 100755\n--- /dev/null\n+++ b/discogs_client/tests/make_symlinks.py\n@@ -0,0 +1,16 @@\n+#!/usr/bin/env python\n+import os\n+import sys\n+from itertools import permutations\n+\n+for name in sys.argv[1:]:\n+ print(\"doing {}\".format(name))\n+ root, next = name.split('?')\n+ data, ext = next.split('.')\n+ elems = data.split('&')\n+ for permut in permutations(elems):\n+ link_name = \"{}?{}.{}\".format(root, '&'.join(permut), ext)\n+ if link_name == name:\n+ continue\n+ os.symlink(name, link_name)\n+ print(\"wrote {}\".format(link_name))\ndiff --git a/discogs_client/tests/res/artists/1/releases?page=1&per_page=50.json b/discogs_client/tests/res/artists/1/releases?page=1&per_page=50.json\nnew file mode 120000\n--- /dev/null\n+++ b/discogs_client/tests/res/artists/1/releases?page=1&per_page=50.json\n@@ -0,0 +1 @@\n+releases?per_page=50&page=1.json\n\\ndiff --git a/discogs_client/tests/res/artists/1/releases?page=2&per_page=50.json b/discogs_client/tests/res/artists/1/releases?page=2&per_page=50.json\nnew file mode 120000\n--- /dev/null\n+++ b/discogs_client/tests/res/artists/1/releases?page=2&per_page=50.json\n@@ -0,0 +1 @@\n+releases?per_page=50&page=2.json\n\\ndiff --git a/discogs_client/tests/res/database/search?page=1&per_page=50&q=trash80.json b/discogs_client/tests/res/database/search?page=1&per_page=50&q=trash80.json\nnew file mode 120000\n--- /dev/null\n+++ b/discogs_client/tests/res/database/search?page=1&per_page=50&q=trash80.json\n@@ -0,0 +1 @@\n+search?q=trash80&per_page=50&page=1.json\n\\ndiff --git a/discogs_client/tests/res/database/search?page=1&q=trash80&per_page=50.json b/discogs_client/tests/res/database/search?page=1&q=trash80&per_page=50.json\nnew file mode 120000\n--- /dev/null\n+++ b/discogs_client/tests/res/database/search?page=1&q=trash80&per_page=50.json\n@@ -0,0 +1 @@\n+search?q=trash80&per_page=50&page=1.json\n\\ndiff --git a/discogs_client/tests/res/database/search?per_page=50&page=1&q=trash80.json b/discogs_client/tests/res/database/search?per_page=50&page=1&q=trash80.json\nnew file mode 120000\n--- /dev/null\n+++ b/discogs_client/tests/res/database/search?per_page=50&page=1&q=trash80.json\n@@ -0,0 +1 @@\n+search?q=trash80&per_page=50&page=1.json\n\\ndiff --git a/discogs_client/tests/res/database/search?per_page=50&q=trash80&page=1.json b/discogs_client/tests/res/database/search?per_page=50&q=trash80&page=1.json\nnew file mode 120000\n--- /dev/null\n+++ b/discogs_client/tests/res/database/search?per_page=50&q=trash80&page=1.json\n@@ -0,0 +1 @@\n+search?q=trash80&per_page=50&page=1.json\n\\ndiff --git a/discogs_client/tests/res/database/search?q=trash80&page=1&per_page=50.json b/discogs_client/tests/res/database/search?q=trash80&page=1&per_page=50.json\nnew file mode 120000\n--- /dev/null\n+++ b/discogs_client/tests/res/database/search?q=trash80&page=1&per_page=50.json\n@@ -0,0 +1 @@\n+search?q=trash80&per_page=50&page=1.json\n\\ndiff --git a/discogs_client/tests/res/masters/4242/versions?page=1&per_page=50.json b/discogs_client/tests/res/masters/4242/versions?page=1&per_page=50.json\nnew file mode 120000\n--- /dev/null\n+++ b/discogs_client/tests/res/masters/4242/versions?page=1&per_page=50.json\n@@ -0,0 +1 @@\n+versions?per_page=50&page=1.json\n\\ndiff --git a/discogs_client/tests/res/users/example/wants?page=1&per_page=50.json b/discogs_client/tests/res/users/example/wants?page=1&per_page=50.json\nnew file mode 120000\n--- /dev/null\n+++ b/discogs_client/tests/res/users/example/wants?page=1&per_page=50.json\n@@ -0,0 +1 @@\n+wants?per_page=50&page=1.json\n\\ndiff --git a/discogs_client/tests/test_core.py b/discogs_client/tests/test_core.py\n--- a/discogs_client/tests/test_core.py\n+++ b/discogs_client/tests/test_core.py\n@@ -1,9 +1,12 @@\n+from __future__ import absolute_import, division, print_function, unicode_literals\n+\n import unittest\n from discogs_client import Client\n from discogs_client.tests import DiscogsClientTestCase\n from discogs_client.exceptions import ConfigurationError, HTTPError\n from datetime import datetime\n \n+\n class CoreTestCase(DiscogsClientTestCase):\n def test_user_agent(self):\n \"\"\"User-Agent should be properly set\"\"\"\n@@ -28,7 +31,7 @@ def test_caching(self):\n self.assertEqual(a.name, 'Persuader, The')\n self.assertGot('/artists/1')\n \n- self.assertEqual(a.real_name, u'Jesper Dahlb\\u00e4ck')\n+ self.assertEqual(a.real_name, 'Jesper Dahlb\\u00e4ck')\n self.assertEqual(len(self.d._fetcher.requests), 1)\n \n # Get a key that's not in our cache\n@@ -44,7 +47,7 @@ def test_equality(self):\n \"\"\"APIObjects of the same class are equal if their IDs are\"\"\"\n a1 = self.d.artist(1)\n a1_ = self.d.artist(1)\n- a2 = self.d.artist(2)\n+ self.d.artist(2)\n \n r1 = self.d.release(1)\n \n@@ -65,6 +68,7 @@ def test_object_field(self):\n def test_read_only_simple_field(self):\n \"\"\"Can't write to a SimpleField when writable=False\"\"\"\n u = self.d.user('example')\n+\n def fail():\n u.rank = 9001\n self.assertRaises(AttributeError, fail)\n@@ -72,6 +76,7 @@ def fail():\n def test_read_only_object_field(self):\n \"\"\"Can't write to an ObjectField\"\"\"\n m = self.d.master(4242)\n+\n def fail():\n m.main_release = 'lol!'\n self.assertRaises(AttributeError, fail)\n@@ -103,7 +108,11 @@ def test_pagination(self):\n results.per_page = 10\n self.assertTrue(results._num_pages is None)\n \n+\n def suite():\n suite = unittest.TestSuite()\n suite = unittest.TestLoader().loadTestsFromTestCase(CoreTestCase)\n return suite\n+\n+if __name__ == '__main__':\n+ unittest.main(defaultTest='suite')\ndiff --git a/discogs_client/tests/test_fetchers.py b/discogs_client/tests/test_fetchers.py\n--- a/discogs_client/tests/test_fetchers.py\n+++ b/discogs_client/tests/test_fetchers.py\n@@ -1,7 +1,10 @@\n+from __future__ import absolute_import, division, print_function, unicode_literals\n+\n import unittest\n from discogs_client.tests import DiscogsClientTestCase\n from discogs_client.exceptions import HTTPError\n \n+\n class FetcherTestCase(DiscogsClientTestCase):\n def test_memory_fetcher(self):\n \"\"\"Client can fetch responses with MemoryFetcher\"\"\"\n@@ -23,3 +26,5 @@ def suite():\n suite = unittest.TestLoader().loadTestsFromTestCase(FetcherTestCase)\n return suite\n \n+if __name__ == '__main__':\n+ unittest.main(defaultTest='suite')\ndiff --git a/discogs_client/tests/test_models.py b/discogs_client/tests/test_models.py\n--- a/discogs_client/tests/test_models.py\n+++ b/discogs_client/tests/test_models.py\n@@ -1,7 +1,10 @@\n+from __future__ import absolute_import, division, print_function, unicode_literals\n+\n import unittest\n from discogs_client.models import Artist, Release\n from discogs_client.tests import DiscogsClientTestCase\n-from discogs_client.exceptions import ConfigurationError, HTTPError\n+from discogs_client.exceptions import HTTPError\n+\n \n class ModelsTestCase(DiscogsClientTestCase):\n def test_artist(self):\n@@ -79,7 +82,7 @@ def test_master_versions(self):\n def test_user_writable(self):\n \"\"\"User profile can be updated\"\"\"\n u = self.d.user('example')\n- u.name # Trigger a fetch\n+ u.name # Trigger a fetch\n \n method, url, data, headers = self.d._fetcher.requests[0]\n self.assertEqual(method, 'GET')\n@@ -111,8 +114,8 @@ def test_wantlist(self):\n \n # Stub out expected responses\n self.m._fetcher.fetcher.responses = {\n- '/users/example/wants/5': ('{\"id\": 5}', 201),\n- '/users/example/wants/1': ('', 204),\n+ '/users/example/wants/5': (b'{\"id\": 5}', 201),\n+ '/users/example/wants/1': (b'', 204),\n }\n \n # Now bind the user to the memory client\n@@ -148,3 +151,6 @@ def suite():\n suite = unittest.TestSuite()\n suite = unittest.TestLoader().loadTestsFromTestCase(ModelsTestCase)\n return suite\n+\n+if __name__ == '__main__':\n+ unittest.main(defaultTest='suite')\ndiff --git a/discogs_client/tests/test_utils.py b/discogs_client/tests/test_utils.py\n--- a/discogs_client/tests/test_utils.py\n+++ b/discogs_client/tests/test_utils.py\n@@ -1,15 +1,26 @@\n+from __future__ import absolute_import, division, print_function, unicode_literals\n+\n import unittest\n from datetime import datetime\n from discogs_client.tests import DiscogsClientTestCase\n from discogs_client import utils\n \n+\n class UtilsTestCase(DiscogsClientTestCase):\n def test_update_qs(self):\n \"\"\"update_qs helper works as intended\"\"\"\n u = utils.update_qs\n self.assertEqual(u('http://example.com', {'foo': 'bar'}), 'http://example.com?foo=bar')\n self.assertEqual(u('http://example.com?foo=bar', {'foo': 'baz'}), 'http://example.com?foo=bar&foo=baz')\n- self.assertEqual(u('http://example.com?c=3&a=yep', {'a': 1, 'b': '1'}), 'http://example.com?c=3&a=yep&a=1&b=1')\n+ # be careful for dict iteration order is not deterministic\n+ result = u('http://example.com?c=3&a=yep', {'a': 1, 'b': '1'})\n+ try:\n+ self.assertEqual(result, 'http://example.com?c=3&a=yep&a=1&b=1')\n+ except AssertionError:\n+ self.assertEqual(result, 'http://example.com?c=3&a=yep&b=1&a=1')\n+\n+ self.assertEqual(u('http://example.com', {'a': 't\\xe9st'}),\n+ 'http://example.com?a=t%C3%A9st')\n \n def test_omit_none(self):\n o = utils.omit_none\n@@ -32,7 +43,11 @@ def test_parse_timestamp(self):\n self.assertEqual(p('2012-01-01T00:00:00'), datetime(2012, 1, 1, 0, 0, 0))\n self.assertEqual(p('2001-05-25T00:00:42'), datetime(2001, 5, 25, 0, 0, 42))\n \n+\n def suite():\n suite = unittest.TestSuite()\n suite = unittest.TestLoader().loadTestsFromTestCase(UtilsTestCase)\n return suite\n+\n+if __name__ == '__main__':\n+ unittest.main(defaultTest='suite')\n", "problem_statement": "", "hints_text": "", "created_at": "2015-01-16T17:09:41Z"}
PythonDataset/train/django-rq-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "rq/django-rq", "pull_number": 384, "instance_id": "rq__django-rq-384", "issue_numbers": "", "base_commit": "87850ae4d5e9cc9d112acb993bf14cca503093b7", "patch": "diff --git a/django_rq/admin.py b/django_rq/admin.py\n--- a/django_rq/admin.py\n+++ b/django_rq/admin.py\n@@ -1,5 +1,6 @@\n from django.contrib import admin\n-from django_rq import settings\n+\n+from . import settings\n \n \n if settings.SHOW_ADMIN_LINK:\ndiff --git a/django_rq/decorators.py b/django_rq/decorators.py\n--- a/django_rq/decorators.py\n+++ b/django_rq/decorators.py\n@@ -1,7 +1,6 @@\n from rq.decorators import job as _rq_job\n \n from django.conf import settings\n-from django.utils import six\n \n from .queues import get_queue\n \n@@ -24,7 +23,7 @@ def job(func_or_queue, connection=None, *args, **kwargs):\n func = None\n queue = func_or_queue\n \n- if isinstance(queue, six.string_types):\n+ if isinstance(queue, str):\n try:\n queue = get_queue(queue)\n if connection is None:\ndiff --git a/django_rq/jobs.py b/django_rq/jobs.py\n--- a/django_rq/jobs.py\n+++ b/django_rq/jobs.py\n@@ -2,7 +2,6 @@\n from rq.utils import import_attribute\n \n from django.conf import settings\n-from django.utils import six\n \n \n def get_job_class(job_class=None):\n@@ -16,6 +15,6 @@ def get_job_class(job_class=None):\n if job_class is None:\n job_class = RQ.get('JOB_CLASS', Job)\n \n- if isinstance(job_class, six.string_types):\n+ if isinstance(job_class, str):\n job_class = import_attribute(job_class)\n return job_class\ndiff --git a/django_rq/management/commands/rqenqueue.py b/django_rq/management/commands/rqenqueue.py\n--- a/django_rq/management/commands/rqenqueue.py\n+++ b/django_rq/management/commands/rqenqueue.py\n@@ -3,7 +3,7 @@\n from django.core.management.base import BaseCommand\n from django.utils.version import get_version\n \n-from django_rq import get_queue\n+from ... import get_queue\n \n \n class Command(BaseCommand):\ndiff --git a/django_rq/management/commands/rqscheduler.py b/django_rq/management/commands/rqscheduler.py\n--- a/django_rq/management/commands/rqscheduler.py\n+++ b/django_rq/management/commands/rqscheduler.py\n@@ -4,7 +4,8 @@\n from django.core.exceptions import ImproperlyConfigured\n from django.core.management.base import BaseCommand\n from django.utils.version import get_version\n-from django_rq import get_scheduler\n+\n+from ... import get_scheduler\n \n try:\n from rq_scheduler.utils import setup_loghandlers\ndiff --git a/django_rq/management/commands/rqstats.py b/django_rq/management/commands/rqstats.py\n--- a/django_rq/management/commands/rqstats.py\n+++ b/django_rq/management/commands/rqstats.py\n@@ -2,7 +2,8 @@\n import time\n \n from django.core.management.base import BaseCommand\n-from django_rq.utils import get_statistics\n+\n+from ...utils import get_statistics\n \n \n class Command(BaseCommand):\ndiff --git a/django_rq/management/commands/rqworker.py b/django_rq/management/commands/rqworker.py\n--- a/django_rq/management/commands/rqworker.py\n+++ b/django_rq/management/commands/rqworker.py\n@@ -10,7 +10,8 @@\n from django.core.management.base import BaseCommand\n from django.db import connections\n from django.utils.version import get_version\n-from django_rq.workers import get_worker\n+\n+from ...workers import get_worker\n \n \n def reset_db_connections():\ndiff --git a/django_rq/models.py b/django_rq/models.py\n--- a/django_rq/models.py\n+++ b/django_rq/models.py\n@@ -1,6 +1,6 @@\n from django.core.signals import got_request_exception, request_finished\n \n-from django_rq import thread_queue\n+from . import thread_queue\n from .queues import get_commit_mode\n \n \ndiff --git a/django_rq/queues.py b/django_rq/queues.py\n--- a/django_rq/queues.py\n+++ b/django_rq/queues.py\n@@ -3,14 +3,12 @@\n import redis\n from redis.sentinel import Sentinel\n from rq.queue import Queue\n-from rq.registry import FailedJobRegistry\n from rq.utils import import_attribute\n \n from django.conf import settings\n from django.core.exceptions import ImproperlyConfigured\n-from django.utils import six\n-from django_rq import thread_queue\n \n+from . import thread_queue\n from .jobs import get_job_class\n \n \n@@ -45,7 +43,7 @@ def get_queue_class(config=None, queue_class=None):\n if config:\n queue_class = config.get('QUEUE_CLASS', queue_class)\n \n- if isinstance(queue_class, six.string_types):\n+ if isinstance(queue_class, str):\n queue_class = import_attribute(queue_class)\n return queue_class\n \n@@ -276,7 +274,7 @@ def get_scheduler(name='default', queue=None, interval=60):\n RQ = getattr(settings, 'RQ', {})\n scheduler_class = RQ.get('SCHEDULER_CLASS', DjangoScheduler)\n \n- if isinstance(scheduler_class, six.string_types):\n+ if isinstance(scheduler_class, str):\n scheduler_class = import_attribute(scheduler_class)\n \n if queue is None:\ndiff --git a/django_rq/urls.py b/django_rq/urls.py\n--- a/django_rq/urls.py\n+++ b/django_rq/urls.py\n@@ -1,12 +1,7 @@\n-from django_rq import views\n+from django.urls import re_path\n+\n+from . import views\n \n-try:\n- # Fixed supporting for Django 2.0 and greater\n- # See: https://docs.djangoproject.com/en/2.0/ref/urls/#django.urls.re_path\n- from django.urls import re_path\n-except ImportError:\n- # Fallback for Django 1.11 and lower\n- from django.conf.urls import url as re_path\n \n urlpatterns = [\n re_path(r'^$',\ndiff --git a/django_rq/workers.py b/django_rq/workers.py\n--- a/django_rq/workers.py\n+++ b/django_rq/workers.py\n@@ -2,10 +2,9 @@\n from rq.utils import import_attribute\n \n from django.conf import settings\n-from django.utils import six\n \n from .jobs import get_job_class\n-from .queues import filter_connection_params, get_connection, get_queues\n+from .queues import get_queues\n \n \n def get_exception_handlers():\n@@ -33,7 +32,7 @@ def get_worker_class(worker_class=None):\n if 'WORKER_CLASS' in RQ:\n worker_class = RQ.get('WORKER_CLASS')\n \n- if isinstance(worker_class, six.string_types):\n+ if isinstance(worker_class, str):\n worker_class = import_attribute(worker_class)\n return worker_class\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,7 @@\n zip_safe=False,\n include_package_data=True,\n package_data={'': ['README.rst']},\n- install_requires=['django>=1.8.0', 'rq>=1.0', 'redis>=3'],\n+ install_requires=['django>=2.0', 'rq>=1.0', 'redis>=3'],\n extras_require={\n 'Sentry': ['raven>=6.1.0'],\n 'testing': ['mock>=2.0.0'],\n@@ -27,8 +27,6 @@\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n- 'Programming Language :: Python :: 2',\n- 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n", "test_patch": "diff --git a/django_rq/tests/test_views.py b/django_rq/tests/test_views.py\n--- a/django_rq/tests/test_views.py\n+++ b/django_rq/tests/test_views.py\n@@ -1,15 +1,10 @@\n import uuid\n+from unittest.mock import patch, PropertyMock\n \n from django.contrib.auth.models import User\n from django.test import TestCase, override_settings\n from django.test.client import Client\n-\n-try:\n- from django.urls import reverse\n-except ImportError:\n- from django.core.urlresolvers import reverse\n-\n-from mock import patch, PropertyMock\n+from django.urls import reverse\n \n from rq.job import Job, JobStatus\n from rq.registry import (DeferredJobRegistry, FailedJobRegistry,\n@@ -17,7 +12,7 @@\n \n from django_rq import get_queue\n from django_rq.workers import get_worker\n-from django_rq.tests.fixtures import access_self\n+from .fixtures import access_self\n from .utils import get_queue_index\n \n \n@@ -81,7 +76,7 @@ def failing_job():\n {'requeue': 'Requeue'})\n self.assertIn(job, queue.jobs)\n job.delete()\n- \n+\n def test_requeue_all(self):\n \"\"\"\n Ensure that requeueing all failed job work properly\ndiff --git a/django_rq/tests/tests.py b/django_rq/tests/tests.py\n--- a/django_rq/tests/tests.py\n+++ b/django_rq/tests/tests.py\n@@ -1,20 +1,13 @@\n import datetime\n import time\n-from unittest import skipIf\n+from unittest import skipIf, mock\n+from unittest.mock import patch, PropertyMock, MagicMock\n from uuid import uuid4\n \n+from django.conf import settings\n from django.core.management import call_command\n from django.test import TestCase, override_settings\n-\n-try:\n- from django.urls import reverse\n-except ImportError:\n- from django.core.urlresolvers import reverse\n-\n-from django.conf import settings\n-\n-import mock\n-from mock import patch, PropertyMock, MagicMock\n+from django.urls import reverse\n \n from rq import get_current_job, Queue\n from rq.job import Job\n@@ -122,8 +115,8 @@ def test_get_connection_sentinel(self, sentinel_class_mock):\n connection = get_connection('sentinel')\n \n self.assertEqual(connection, sentinel_mock)\n- sentinel_class_mock.assert_called_once()\n- sentinel_mock.master_for.assert_called_once()\n+ self.assertEqual(sentinel_mock.master_for.call_count, 1)\n+ self.assertEqual(sentinel_class_mock.call_count, 1)\n \n sentinel_instances = sentinel_class_mock.call_args[0][0]\n self.assertListEqual(config['SENTINELS'], sentinel_instances)\ndiff --git a/django_rq/tests/urls.py b/django_rq/tests/urls.py\n--- a/django_rq/tests/urls.py\n+++ b/django_rq/tests/urls.py\n@@ -1,20 +1,14 @@\n-from django.conf.urls import include, url\n from django.contrib import admin\n-from django.core.exceptions import ImproperlyConfigured\n+from django.urls import path\n \n-from django_rq.tests import views\n from django_rq.urls import urlpatterns\n \n+from . import views\n+\n urlpatterns = [\n- # url(r'^django-rq/', (urlpatterns, '', 'django_rq')),\n- url(r'^admin/', admin.site.urls),\n- url(r'^success/$', views.success, name='success'),\n- url(r'^error/$', views.error, name='error'),\n+ path('admin/', admin.site.urls),\n+ path('success/', views.success, name='success'),\n+ path('error/', views.error, name='error'),\n+ path('django-rq/', (urlpatterns, '', 'django_rq'))\n ]\n \n-try:\n- # For Django < 2.0\n- urlpatterns += [url(r'^django-rq/', include('django_rq.urls'))]\n-except ImproperlyConfigured:\n- # Django >= 2.0 URL dispatcher syntax\n- urlpatterns += [url(r'^django-rq/', (urlpatterns, '', 'django_rq'))]\ndiff --git a/integration_test/_tests.py b/integration_test/_tests.py\n--- a/integration_test/_tests.py\n+++ b/integration_test/_tests.py\n@@ -10,11 +10,11 @@\n import sys\n import time\n import unittest\n+from urllib.parse import urlunsplit import urlunsplit\n \n import psycopg2\n import requests\n from django.conf import settings\n-from django.utils.six.moves.urllib.parse import urlunsplit\n \n DJANGO_SETTINGS_MODULE = \"integration_test.settings\"\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", DJANGO_SETTINGS_MODULE)\ndiff --git a/integration_test/integration_test/urls.py b/integration_test/integration_test/urls.py\n--- a/integration_test/integration_test/urls.py\n+++ b/integration_test/integration_test/urls.py\n@@ -13,11 +13,11 @@\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n \"\"\"\n-from django.conf.urls import url\n+from django.urls import path\n from django.contrib import admin\n from integration_app import views\n \n urlpatterns = [\n- url(r'^$', views.home, name='home'),\n- url(r'^admin/', admin.site.urls),\n+ path('', views.home, name='home'),\n+ path('admin/', admin.site.urls),\n ]\n", "problem_statement": "", "hints_text": "", "created_at": "2019-12-03T09:29:56Z"}
PythonDataset/train/fcn-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "wkentaro/fcn", "pull_number": 21, "instance_id": "wkentaro__fcn-21", "issue_numbers": "", "base_commit": "c3fd7f6e1ee1547df3bd4b11b6486915911a8c28", "patch": "diff --git a/fcn/models/fcn8s.py b/fcn/models/fcn8s.py\n--- a/fcn/models/fcn8s.py\n+++ b/fcn/models/fcn8s.py\n@@ -113,13 +113,7 @@ def __call__(self, x, t=None):\n upscore2 = h # 1/16\n \n # score_pool4c\n- # TODO(pfnet): Implement crop function\n- # h = F.crop(score_pool4, upscore2, axis=2, offset=5)\n- h = score_pool4\n- for axis in [2, 3]:\n- start = 5\n- end = start + upscore2.data.shape[axis]\n- _, h, _ = F.split_axis(h, [start, end], axis=axis)\n+ h = F.crop(score_pool4, upscore2, axes=[2, 3], offset=5)\n score_pool4c = h # 1/16\n \n # fuse_pool4\n@@ -131,13 +125,7 @@ def __call__(self, x, t=None):\n upscore_pool4 = h # 1/8\n \n # score_pool4c\n- # TODO(pfnet): Implement crop function\n- # h = F.crop(score_pool3, upscore_pool4, axis=2, offset=9)\n- h = score_pool3\n- for axis in [2, 3]:\n- start = 9\n- end = start + upscore_pool4.data.shape[axis]\n- _, h, _ = F.split_axis(h, [start, end], axis=axis)\n+ h = F.crop(score_pool3, upscore_pool4, axes=[2, 3], offset=9)\n score_pool3c = h # 1/8\n \n # fuse_pool3\n@@ -149,13 +137,7 @@ def __call__(self, x, t=None):\n upscore8 = h # 1/1\n \n # score\n- # TODO(pfnet): Implement crop function\n- # h = F.crop(upscore8, x, axis=2, offset=31)\n- h = upscore8\n- for axis in [2, 3]:\n- start = 31\n- end = start + x.data.shape[axis]\n- _, h, _ = F.split_axis(h, [start, end], axis=axis)\n+ h = F.crop(upscore8, x, axes=[2, 3], offset=31)\n score = h # 1/1\n \n # testing without t\n@@ -175,5 +157,6 @@ def accuracy_score(self, y_pred, y_true):\n y_true = cuda.to_cpu(y_true.data)\n # reduce values along classes axis\n reduced_y_pred = np.argmax(y_pred, axis=1)\n- s = (reduced_y_pred == y_true).mean()\n- return s\n+ assert reduced_y_pred.shape == y_true.shape\n+ score = (reduced_y_pred == y_true).mean()\n+ return score\ndiff --git a/scripts/fcn_train.py b/scripts/fcn_train.py\n--- a/scripts/fcn_train.py\n+++ b/scripts/fcn_train.py\n@@ -10,7 +10,7 @@\n import chainer.serializers as S\n from chainer import Variable\n import numpy as np\n-import progressbar\n+import tqdm\n \n import fcn\n from fcn.models import FCN8s\n@@ -22,6 +22,7 @@ class Trainer(object):\n \n def __init__(self, gpu):\n self.gpu = gpu\n+ self.epoch = 0\n # pretrained model\n pretrained_model = self._setup_pretrained_model()\n # dataset\n@@ -54,7 +55,7 @@ def batch_loop(self, type):\n \n Args:\n \n- - type (str): train, trainval, or val\n+ - type (str): 'train' or 'val'\n \n .. note::\n \n@@ -63,11 +64,12 @@ def batch_loop(self, type):\n self.model.train = True if type == 'train' else False\n N_data = len(self.dataset[type])\n sum_loss, sum_accuracy = 0, 0\n- pbar = progressbar.ProgressBar(max_value=N_data)\n- for i in xrange(0, N_data):\n- pbar.update(i+1)\n+ desc = 'epoch{0}: {1} batch_loop'.format(self.epoch, type)\n+ batch_size = 1\n+ assert batch_size == 1 # FCN8s only supports 1 size batch\n+ for i in tqdm.tqdm(xrange(0, N_data, batch_size), ncols=80, desc=desc):\n # load batch\n- batch = self.dataset.next_batch(batch_size=1, type=type)\n+ batch = self.dataset.next_batch(batch_size=batch_size, type=type)\n img, label = batch.img[0], batch.label[0]\n # x\n x_datum = self.dataset.img_to_datum(img)\n@@ -86,9 +88,8 @@ def batch_loop(self, type):\n self.optimizer.update(self.model, x, y)\n else:\n self.model(x, y)\n- sum_loss += cuda.to_cpu(self.model.loss.data) * len(batch)\n- sum_accuracy += self.model.accuracy * len(batch)\n- pbar.finish()\n+ sum_loss += cuda.to_cpu(self.model.loss.data) * batch_size\n+ sum_accuracy += self.model.accuracy * batch_size\n mean_loss = sum_loss / N_data\n mean_accuracy = sum_accuracy / N_data\n return mean_loss, mean_accuracy\n@@ -96,7 +97,8 @@ def batch_loop(self, type):\n def main_loop(self):\n log_csv = osp.join(fcn.get_data_dir(), 'log.csv')\n for epoch in xrange(100):\n- for type in ['train', 'trainval', 'val']:\n+ self.epoch = epoch\n+ for type in ['train', 'val']:\n mean_loss, mean_accuracy = self.batch_loop(type=type)\n log = dict(epoch=epoch, type=type, loss=mean_loss,\n accuracy=mean_accuracy)\n@@ -104,6 +106,12 @@ def main_loop(self):\n 'mean_accuracy: {accuracy}'.format(**log))\n with open(log_csv, 'a') as f:\n f.write('{epoch},{type},{loss},{accuracy}\\n'.format(**log))\n+ if epoch % 10 == 0:\n+ data_dir = fcn.get_data_dir()\n+ chainermodel = osp.join(data_dir, 'fcn8s_{0}.chainermodel'.format(epoch))\n+ optimizer_file = osp.join(data_dir, 'fcn8s_{0}.adam'.format(epoch))\n+ S.save_hdf5(chainermodel, self.model)\n+ S.save_hdf5(optimizer_file, self.optimizer)\n \n \n if __name__ == '__main__':\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2016-05-08T18:40:02Z"}
PythonDataset/train/flake8-todo-task-instances.jsonl.all ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repo": "schlamar/flake8-todo", "pull_number": 6, "instance_id": "schlamar__flake8-todo-6", "issue_numbers": "", "base_commit": "c7738d4e6ff441bdbc52232c44b00859cafa70ac", "patch": "diff --git a/flake8_todo.py b/flake8_todo.py\n--- a/flake8_todo.py\n+++ b/flake8_todo.py\n@@ -3,13 +3,13 @@\n \n import re\n \n-import pep8\n+import pycodestyle\n \n NOTE_REGEX = re.compile(r'(TODO|FIXME|XXX)') # noqa\n \n \n def check_todo_notes(physical_line):\n- if pep8.noqa(physical_line):\n+ if pycodestyle.noqa(physical_line):\n return\n match = NOTE_REGEX.search(physical_line)\n if match:\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,6 +30,9 @@ def get_long_description():\n url='https://github.com/schlamar/flake8-todo',\n license='MIT',\n py_modules=['flake8_todo'],\n+ install_requires=[\n+ 'pycodestyle >= 2.0.0, < 2.1.0'\n+ ],\n zip_safe=False,\n entry_points={\n 'flake8.extension': [\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2016-06-21T09:33:21Z"}