code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
class Station(Named):
<|reserved_special_token_0|>
def __init__(self, name, long_name, time_zone_name, latitude=None,
longitude=None, elevation=None):
super().__init__(name)
self._long_name = long_name
self._time_zone = ZoneInfo(time_zone_name)
self._latitude = latitude
self._longitude = longitude
self._elevation = elevation
<|reserved_special_token_0|>
@property
def time_zone(self):
return self._time_zone
@property
def latitude(self):
return self._latitude
@property
def longitude(self):
return self._longitude
@property
def elevation(self):
return self._elevation
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Station(Named):
<|reserved_special_token_0|>
def __init__(self, name, long_name, time_zone_name, latitude=None,
longitude=None, elevation=None):
super().__init__(name)
self._long_name = long_name
self._time_zone = ZoneInfo(time_zone_name)
self._latitude = latitude
self._longitude = longitude
self._elevation = elevation
<|reserved_special_token_0|>
@property
def time_zone(self):
return self._time_zone
@property
def latitude(self):
return self._latitude
@property
def longitude(self):
return self._longitude
@property
def elevation(self):
return self._elevation
def get_night(self, time):
"""
Gets the station-local night that includes the specified time.
:Parameters:
time : `datetime`
the time whose night is to be gotten.
The time may be either naive or aware. If the time
is naive, it is assumed to be in the station's
time zone.
:Returns:
the station-local night that includes the specified time, a `date`.
The station-local night of a time is the starting date of the
local 24-hour period starting at noon that contains the time.
"""
if time.tzinfo is not None:
time = time.astimezone(self.time_zone)
if time.hour < 12:
time -= datetime.timedelta(hours=12)
return time.date()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Station(Named):
"""Recording station."""
def __init__(self, name, long_name, time_zone_name, latitude=None,
longitude=None, elevation=None):
super().__init__(name)
self._long_name = long_name
self._time_zone = ZoneInfo(time_zone_name)
self._latitude = latitude
self._longitude = longitude
self._elevation = elevation
@property
def long_name(self):
return self._long_name
@property
def time_zone(self):
return self._time_zone
@property
def latitude(self):
return self._latitude
@property
def longitude(self):
return self._longitude
@property
def elevation(self):
return self._elevation
def get_night(self, time):
"""
Gets the station-local night that includes the specified time.
:Parameters:
time : `datetime`
the time whose night is to be gotten.
The time may be either naive or aware. If the time
is naive, it is assumed to be in the station's
time zone.
:Returns:
the station-local night that includes the specified time, a `date`.
The station-local night of a time is the starting date of the
local 24-hour period starting at noon that contains the time.
"""
if time.tzinfo is not None:
time = time.astimezone(self.time_zone)
if time.hour < 12:
time -= datetime.timedelta(hours=12)
return time.date()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from zoneinfo import ZoneInfo
import datetime
from vesper.util.named import Named
class Station(Named):
"""Recording station."""
def __init__(self, name, long_name, time_zone_name, latitude=None,
longitude=None, elevation=None):
super().__init__(name)
self._long_name = long_name
self._time_zone = ZoneInfo(time_zone_name)
self._latitude = latitude
self._longitude = longitude
self._elevation = elevation
@property
def long_name(self):
return self._long_name
@property
def time_zone(self):
return self._time_zone
@property
def latitude(self):
return self._latitude
@property
def longitude(self):
return self._longitude
@property
def elevation(self):
return self._elevation
def get_night(self, time):
"""
Gets the station-local night that includes the specified time.
:Parameters:
time : `datetime`
the time whose night is to be gotten.
The time may be either naive or aware. If the time
is naive, it is assumed to be in the station's
time zone.
:Returns:
the station-local night that includes the specified time, a `date`.
The station-local night of a time is the starting date of the
local 24-hour period starting at noon that contains the time.
"""
if time.tzinfo is not None:
time = time.astimezone(self.time_zone)
if time.hour < 12:
time -= datetime.timedelta(hours=12)
return time.date()
<|reserved_special_token_1|>
"""Module containing class `Station`."""
from zoneinfo import ZoneInfo
import datetime
from vesper.util.named import Named
class Station(Named):
"""Recording station."""
def __init__(
self, name, long_name, time_zone_name,
latitude=None, longitude=None, elevation=None):
super().__init__(name)
self._long_name = long_name
self._time_zone = ZoneInfo(time_zone_name)
self._latitude = latitude
self._longitude = longitude
self._elevation = elevation
@property
def long_name(self):
return self._long_name
@property
def time_zone(self):
return self._time_zone
@property
def latitude(self):
return self._latitude
@property
def longitude(self):
return self._longitude
@property
def elevation(self):
return self._elevation
def get_night(self, time):
"""
Gets the station-local night that includes the specified time.
:Parameters:
time : `datetime`
the time whose night is to be gotten.
The time may be either naive or aware. If the time
is naive, it is assumed to be in the station's
time zone.
:Returns:
the station-local night that includes the specified time, a `date`.
The station-local night of a time is the starting date of the
local 24-hour period starting at noon that contains the time.
"""
if time.tzinfo is not None:
# time is aware
# convert time to station time zone
time = time.astimezone(self.time_zone)
if time.hour < 12:
time -= datetime.timedelta(hours=12)
return time.date()
|
flexible
|
{
"blob_id": "ad09880b9e06a129b9623be2a086ebcc8dc55c2c",
"index": 9079,
"step-1": "<mask token>\n\n\nclass Station(Named):\n <mask token>\n\n def __init__(self, name, long_name, time_zone_name, latitude=None,\n longitude=None, elevation=None):\n super().__init__(name)\n self._long_name = long_name\n self._time_zone = ZoneInfo(time_zone_name)\n self._latitude = latitude\n self._longitude = longitude\n self._elevation = elevation\n <mask token>\n\n @property\n def time_zone(self):\n return self._time_zone\n\n @property\n def latitude(self):\n return self._latitude\n\n @property\n def longitude(self):\n return self._longitude\n\n @property\n def elevation(self):\n return self._elevation\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Station(Named):\n <mask token>\n\n def __init__(self, name, long_name, time_zone_name, latitude=None,\n longitude=None, elevation=None):\n super().__init__(name)\n self._long_name = long_name\n self._time_zone = ZoneInfo(time_zone_name)\n self._latitude = latitude\n self._longitude = longitude\n self._elevation = elevation\n <mask token>\n\n @property\n def time_zone(self):\n return self._time_zone\n\n @property\n def latitude(self):\n return self._latitude\n\n @property\n def longitude(self):\n return self._longitude\n\n @property\n def elevation(self):\n return self._elevation\n\n def get_night(self, time):\n \"\"\"\n Gets the station-local night that includes the specified time.\n \n :Parameters:\n time : `datetime`\n the time whose night is to be gotten.\n \n The time may be either naive or aware. If the time\n is naive, it is assumed to be in the station's\n time zone.\n \n :Returns:\n the station-local night that includes the specified time, a `date`.\n \n The station-local night of a time is the starting date of the\n local 24-hour period starting at noon that contains the time.\n \"\"\"\n if time.tzinfo is not None:\n time = time.astimezone(self.time_zone)\n if time.hour < 12:\n time -= datetime.timedelta(hours=12)\n return time.date()\n",
"step-3": "<mask token>\n\n\nclass Station(Named):\n \"\"\"Recording station.\"\"\"\n\n def __init__(self, name, long_name, time_zone_name, latitude=None,\n longitude=None, elevation=None):\n super().__init__(name)\n self._long_name = long_name\n self._time_zone = ZoneInfo(time_zone_name)\n self._latitude = latitude\n self._longitude = longitude\n self._elevation = elevation\n\n @property\n def long_name(self):\n return self._long_name\n\n @property\n def time_zone(self):\n return self._time_zone\n\n @property\n def latitude(self):\n return self._latitude\n\n @property\n def longitude(self):\n return self._longitude\n\n @property\n def elevation(self):\n return self._elevation\n\n def get_night(self, time):\n \"\"\"\n Gets the station-local night that includes the specified time.\n \n :Parameters:\n time : `datetime`\n the time whose night is to be gotten.\n \n The time may be either naive or aware. If the time\n is naive, it is assumed to be in the station's\n time zone.\n \n :Returns:\n the station-local night that includes the specified time, a `date`.\n \n The station-local night of a time is the starting date of the\n local 24-hour period starting at noon that contains the time.\n \"\"\"\n if time.tzinfo is not None:\n time = time.astimezone(self.time_zone)\n if time.hour < 12:\n time -= datetime.timedelta(hours=12)\n return time.date()\n",
"step-4": "<mask token>\nfrom zoneinfo import ZoneInfo\nimport datetime\nfrom vesper.util.named import Named\n\n\nclass Station(Named):\n \"\"\"Recording station.\"\"\"\n\n def __init__(self, name, long_name, time_zone_name, latitude=None,\n longitude=None, elevation=None):\n super().__init__(name)\n self._long_name = long_name\n self._time_zone = ZoneInfo(time_zone_name)\n self._latitude = latitude\n self._longitude = longitude\n self._elevation = elevation\n\n @property\n def long_name(self):\n return self._long_name\n\n @property\n def time_zone(self):\n return self._time_zone\n\n @property\n def latitude(self):\n return self._latitude\n\n @property\n def longitude(self):\n return self._longitude\n\n @property\n def elevation(self):\n return self._elevation\n\n def get_night(self, time):\n \"\"\"\n Gets the station-local night that includes the specified time.\n \n :Parameters:\n time : `datetime`\n the time whose night is to be gotten.\n \n The time may be either naive or aware. If the time\n is naive, it is assumed to be in the station's\n time zone.\n \n :Returns:\n the station-local night that includes the specified time, a `date`.\n \n The station-local night of a time is the starting date of the\n local 24-hour period starting at noon that contains the time.\n \"\"\"\n if time.tzinfo is not None:\n time = time.astimezone(self.time_zone)\n if time.hour < 12:\n time -= datetime.timedelta(hours=12)\n return time.date()\n",
"step-5": "\"\"\"Module containing class `Station`.\"\"\"\n\n\nfrom zoneinfo import ZoneInfo\nimport datetime\n\nfrom vesper.util.named import Named\n\n\nclass Station(Named):\n \n \"\"\"Recording station.\"\"\"\n \n \n def __init__(\n self, name, long_name, time_zone_name,\n latitude=None, longitude=None, elevation=None):\n \n super().__init__(name)\n self._long_name = long_name\n self._time_zone = ZoneInfo(time_zone_name)\n self._latitude = latitude\n self._longitude = longitude\n self._elevation = elevation\n \n \n @property\n def long_name(self):\n return self._long_name\n \n \n @property\n def time_zone(self):\n return self._time_zone\n \n \n @property\n def latitude(self):\n return self._latitude\n \n \n @property\n def longitude(self):\n return self._longitude\n \n \n @property\n def elevation(self):\n return self._elevation\n \n \n def get_night(self, time):\n \n \"\"\"\n Gets the station-local night that includes the specified time.\n \n :Parameters:\n time : `datetime`\n the time whose night is to be gotten.\n \n The time may be either naive or aware. If the time\n is naive, it is assumed to be in the station's\n time zone.\n \n :Returns:\n the station-local night that includes the specified time, a `date`.\n \n The station-local night of a time is the starting date of the\n local 24-hour period starting at noon that contains the time.\n \"\"\"\n \n if time.tzinfo is not None:\n # time is aware\n \n # convert time to station time zone\n time = time.astimezone(self.time_zone)\n \n if time.hour < 12:\n time -= datetime.timedelta(hours=12)\n \n return time.date()\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
<|reserved_special_token_0|>
def autorotate(angle):
v.set_rotation_angle([0.0, -angle, 0.0])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def autorotate(angle):
v.set_rotation_angle([0.0, -angle, 0.0])
<|reserved_special_token_0|>
v.animate(0, 360, autorotate, milliseconds=3000, steps=100)
v.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
v = Viewer()
path = f'{pathlib.Path(__file__).parent.parent}/test_files/caseron.off'
mesh = v.load_mesh(path, highlight=True)
def autorotate(angle):
v.set_rotation_angle([0.0, -angle, 0.0])
<|reserved_special_token_0|>
v.animate(0, 360, autorotate, milliseconds=3000, steps=100)
v.show()
<|reserved_special_token_1|>
import pathlib
from blastsight.view.viewer import Viewer
<|reserved_special_token_0|>
v = Viewer()
path = f'{pathlib.Path(__file__).parent.parent}/test_files/caseron.off'
mesh = v.load_mesh(path, highlight=True)
def autorotate(angle):
v.set_rotation_angle([0.0, -angle, 0.0])
<|reserved_special_token_0|>
v.animate(0, 360, autorotate, milliseconds=3000, steps=100)
v.show()
<|reserved_special_token_1|>
#!/usr/bin/env python
import pathlib
from blastsight.view.viewer import Viewer
"""
In this demo, we'll show how you can create a basic animation.
An animation is interpreted as changing the state of the viewer one frame at the time.
That means we'll define a function that makes a change in one single frame.
The function must receive a single argument, of the same type of the 'start' and 'end' values.
"""
v = Viewer()
path = f'{pathlib.Path(__file__).parent.parent}/test_files/caseron.off'
mesh = v.load_mesh(path, highlight=True)
def autorotate(angle):
v.set_rotation_angle([0.0, -angle, 0.0])
"""
The animate() method receives a 'start' value, an 'end' value, a 'method' (the function that changes
one frame in the viewer), and two optional kwargs: 'milliseconds' (how much time should the
animation last) and 'steps' (smoothness of the animation depends on this).
"""
# Start animation
v.animate(0, 360, autorotate, milliseconds=3000, steps=100)
# Show viewer
v.show()
|
flexible
|
{
"blob_id": "00be3d813ce4335ff9ea02ed9f1884d3210f3d5a",
"index": 3101,
"step-1": "<mask token>\n\n\ndef autorotate(angle):\n v.set_rotation_angle([0.0, -angle, 0.0])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef autorotate(angle):\n v.set_rotation_angle([0.0, -angle, 0.0])\n\n\n<mask token>\nv.animate(0, 360, autorotate, milliseconds=3000, steps=100)\nv.show()\n",
"step-3": "<mask token>\nv = Viewer()\npath = f'{pathlib.Path(__file__).parent.parent}/test_files/caseron.off'\nmesh = v.load_mesh(path, highlight=True)\n\n\ndef autorotate(angle):\n v.set_rotation_angle([0.0, -angle, 0.0])\n\n\n<mask token>\nv.animate(0, 360, autorotate, milliseconds=3000, steps=100)\nv.show()\n",
"step-4": "import pathlib\nfrom blastsight.view.viewer import Viewer\n<mask token>\nv = Viewer()\npath = f'{pathlib.Path(__file__).parent.parent}/test_files/caseron.off'\nmesh = v.load_mesh(path, highlight=True)\n\n\ndef autorotate(angle):\n v.set_rotation_angle([0.0, -angle, 0.0])\n\n\n<mask token>\nv.animate(0, 360, autorotate, milliseconds=3000, steps=100)\nv.show()\n",
"step-5": "#!/usr/bin/env python\n\nimport pathlib\n\nfrom blastsight.view.viewer import Viewer\n\n\"\"\"\nIn this demo, we'll show how you can create a basic animation.\n\nAn animation is interpreted as changing the state of the viewer one frame at the time.\nThat means we'll define a function that makes a change in one single frame.\nThe function must receive a single argument, of the same type of the 'start' and 'end' values.\n\"\"\"\n\nv = Viewer()\npath = f'{pathlib.Path(__file__).parent.parent}/test_files/caseron.off'\nmesh = v.load_mesh(path, highlight=True)\n\n\ndef autorotate(angle):\n v.set_rotation_angle([0.0, -angle, 0.0])\n\n\n\"\"\"\nThe animate() method receives a 'start' value, an 'end' value, a 'method' (the function that changes\none frame in the viewer), and two optional kwargs: 'milliseconds' (how much time should the\nanimation last) and 'steps' (smoothness of the animation depends on this).\n\"\"\"\n\n# Start animation\nv.animate(0, 360, autorotate, milliseconds=3000, steps=100)\n\n# Show viewer\nv.show()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# zip(),可以压缩 N 个列表成为一个zip对象(可迭代对象)。
a =['a', 'b', 'c']
b =[1, 2, 3]
[x for x in zip(a, b)] # [('a', 1), ('b', 2), ('c', 3)]
# 列表长度不等时,以短的为准
c =['x','y']
[x for x in zip(a, c)] # [('a', 'x'), ('b', 'y')]
# 例子
books =['简爱','小王子','瓦尔登湖']
prices =[56, 78, 66]
for book, price in zip(books, prices):
print("%s的价格是:%3.1f"% (book, price))
# reversed() 实现反向遍历,参数可以是各种序列
[y for y in reversed(b)] # [3, 2, 1]
# sorted() 接受一个可迭代对象,返回其升序。可传参数,reverse=True,key=?(排序关键字)
for book in sorted(books, reverse=True, key=len):
print(book)
|
normal
|
{
"blob_id": "0eab23f4271f724da587707599eb0cbf2144efa1",
"index": 8178,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n[x for x in zip(a, b)]\n<mask token>\n[x for x in zip(a, c)]\n<mask token>\nfor book, price in zip(books, prices):\n print('%s的价格是:%3.1f' % (book, price))\n[y for y in reversed(b)]\nfor book in sorted(books, reverse=True, key=len):\n print(book)\n",
"step-3": "a = ['a', 'b', 'c']\nb = [1, 2, 3]\n[x for x in zip(a, b)]\nc = ['x', 'y']\n[x for x in zip(a, c)]\nbooks = ['简爱', '小王子', '瓦尔登湖']\nprices = [56, 78, 66]\nfor book, price in zip(books, prices):\n print('%s的价格是:%3.1f' % (book, price))\n[y for y in reversed(b)]\nfor book in sorted(books, reverse=True, key=len):\n print(book)\n",
"step-4": "# zip(),可以压缩 N 个列表成为一个zip对象(可迭代对象)。\na =['a', 'b', 'c']\nb =[1, 2, 3]\n[x for x in zip(a, b)] # [('a', 1), ('b', 2), ('c', 3)]\n\n# 列表长度不等时,以短的为准\nc =['x','y']\n[x for x in zip(a, c)] # [('a', 'x'), ('b', 'y')]\n\n# 例子\nbooks =['简爱','小王子','瓦尔登湖']\nprices =[56, 78, 66]\nfor book, price in zip(books, prices):\n print(\"%s的价格是:%3.1f\"% (book, price))\n\n# reversed() 实现反向遍历,参数可以是各种序列\n[y for y in reversed(b)] # [3, 2, 1]\n\n# sorted() 接受一个可迭代对象,返回其升序。可传参数,reverse=True,key=?(排序关键字)\nfor book in sorted(books, reverse=True, key=len):\n print(book)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from ..general.utils import log_errors
from googleapiclient import discovery
from oauth2client.client import SignedJwtAssertionCredentials
from django.conf import settings
from celery import shared_task
from logging import getLogger
import httplib2
_logger = getLogger(__name__)
def create_events_calendar():
""" Create an events calendar if none already exists. This function mostly exists for
creating calendars for dev environments, not used in prod.
"""
service = get_calendar_service()
if not service:
return
calendar = {
'summary': 'Ting som skjer i Telemarkgruppa',
'timeZone': 'Europe/Oslo',
}
cal_insert_response = service.calendars().insert(body=calendar).execute()
public_acl = {
'role': 'reader',
'scope': {
'type': 'default'
}
}
acl_insert_response = service.acl().insert(calendarId=cal_insert_response['id'], body=public_acl).execute()
return acl_insert_response
def get_calendar_service():
name = 'calendar'
version = 'v3'
scope = 'https://www.googleapis.com/auth/calendar'
# Provide a mock fallback for test environments where real interaction with
# Google calendar is not needed
if not hasattr(settings, 'GOOGLE_API_PRIVATE_KEY'):
_logger.info('Skipping Google calendar integration due to missing GOOGLE_API_PRIVATE_KEY '
'in settings.')
return
# Prepare credentials, and authorize HTTP object with them.
credentials = SignedJwtAssertionCredentials(settings.GOOGLE_API_EMAIL,
settings.GOOGLE_API_PRIVATE_KEY, scope)
http = credentials.authorize(http=httplib2.Http())
# Construct a service object via the discovery service.
service = discovery.build(name, version, http=http)
return service
@shared_task
@log_errors
def update_google_calendar_event(event_id):
from .models import Event
event = Event.objects.get(pk=event_id)
# If the event doesn't already exist on google calendar, create it
if not event.google_calendar_id:
_logger.info('Adding missing event to google calendar: %s', event.name)
add_google_calender_event(event.id)
return
# Authenticate and construct service.
service = get_calendar_service()
if not service:
return
payload = get_google_calendar_payload_for_event(event)
results = service.events().update(calendarId=settings.GOOGLE_CALENDAR_ID,
eventId=event.google_calendar_id, body=payload).execute()
_logger.info('Google calendar event for %s updated: %s', event.name, results)
@shared_task
@log_errors
def add_google_calender_event(event_id):
from .models import Event
event = Event.objects.get(pk=event_id)
if not event:
_logger.warning('Could not find event to add to Google Calendar: %d', event_id)
return
google_payload = get_google_calendar_payload_for_event(event)
service = get_calendar_service()
if not service:
return
results = service.events().insert(calendarId=settings.GOOGLE_CALENDAR_ID,
body=google_payload).execute()
if results.get('id'):
event.google_calendar_id = results['id']
event.save()
_logger.info("Google Calendar event for event '%s' created successfully", event.name)
else:
_logger.error("New Google Calendar event did not have id in response, was: %s", results)
@shared_task
@log_errors
def delete_google_calendar_event(google_calendar_event_id):
service = get_calendar_service()
if not service:
return
result = service.events().delete(calendarId=settings.GOOGLE_CALENDAR_ID,
eventId=google_calendar_event_id).execute()
_logger.info('Google calendar event %s deleted: %s', google_calendar_event_id, result)
def get_google_calendar_payload_for_event(event):
return {
'summary': event.name,
'location': event.location,
'description': event.summary,
'start': {
'dateTime': event.startdate.isoformat(),
'timeZone': 'Europe/Oslo',
},
'end': {
'dateTime': event.enddate.isoformat(),
'timeZone': 'Europe/Oslo',
}
}
|
normal
|
{
"blob_id": "36fb0d936be5c5d305c4076fd1c497664c9b770a",
"index": 8374,
"step-1": "<mask token>\n\n\ndef create_events_calendar():\n \"\"\" Create an events calendar if none already exists. This function mostly exists for\n creating calendars for dev environments, not used in prod.\n \"\"\"\n service = get_calendar_service()\n if not service:\n return\n calendar = {'summary': 'Ting som skjer i Telemarkgruppa', 'timeZone':\n 'Europe/Oslo'}\n cal_insert_response = service.calendars().insert(body=calendar).execute()\n public_acl = {'role': 'reader', 'scope': {'type': 'default'}}\n acl_insert_response = service.acl().insert(calendarId=\n cal_insert_response['id'], body=public_acl).execute()\n return acl_insert_response\n\n\ndef get_calendar_service():\n name = 'calendar'\n version = 'v3'\n scope = 'https://www.googleapis.com/auth/calendar'\n if not hasattr(settings, 'GOOGLE_API_PRIVATE_KEY'):\n _logger.info(\n 'Skipping Google calendar integration due to missing GOOGLE_API_PRIVATE_KEY in settings.'\n )\n return\n credentials = SignedJwtAssertionCredentials(settings.GOOGLE_API_EMAIL,\n settings.GOOGLE_API_PRIVATE_KEY, scope)\n http = credentials.authorize(http=httplib2.Http())\n service = discovery.build(name, version, http=http)\n return service\n\n\n@shared_task\n@log_errors\ndef update_google_calendar_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event.google_calendar_id:\n _logger.info('Adding missing event to google calendar: %s', event.name)\n add_google_calender_event(event.id)\n return\n service = get_calendar_service()\n if not service:\n return\n payload = get_google_calendar_payload_for_event(event)\n results = service.events().update(calendarId=settings.\n GOOGLE_CALENDAR_ID, eventId=event.google_calendar_id, body=payload\n ).execute()\n _logger.info('Google calendar event for %s updated: %s', event.name,\n results)\n\n\n<mask token>\n\n\ndef get_google_calendar_payload_for_event(event):\n return {'summary': event.name, 'location': event.location,\n 'description': event.summary, 'start': {'dateTime': event.startdate\n .isoformat(), 'timeZone': 'Europe/Oslo'}, 'end': {'dateTime': event\n .enddate.isoformat(), 'timeZone': 'Europe/Oslo'}}\n",
"step-2": "<mask token>\n\n\ndef create_events_calendar():\n \"\"\" Create an events calendar if none already exists. This function mostly exists for\n creating calendars for dev environments, not used in prod.\n \"\"\"\n service = get_calendar_service()\n if not service:\n return\n calendar = {'summary': 'Ting som skjer i Telemarkgruppa', 'timeZone':\n 'Europe/Oslo'}\n cal_insert_response = service.calendars().insert(body=calendar).execute()\n public_acl = {'role': 'reader', 'scope': {'type': 'default'}}\n acl_insert_response = service.acl().insert(calendarId=\n cal_insert_response['id'], body=public_acl).execute()\n return acl_insert_response\n\n\ndef get_calendar_service():\n name = 'calendar'\n version = 'v3'\n scope = 'https://www.googleapis.com/auth/calendar'\n if not hasattr(settings, 'GOOGLE_API_PRIVATE_KEY'):\n _logger.info(\n 'Skipping Google calendar integration due to missing GOOGLE_API_PRIVATE_KEY in settings.'\n )\n return\n credentials = SignedJwtAssertionCredentials(settings.GOOGLE_API_EMAIL,\n settings.GOOGLE_API_PRIVATE_KEY, scope)\n http = credentials.authorize(http=httplib2.Http())\n service = discovery.build(name, version, http=http)\n return service\n\n\n@shared_task\n@log_errors\ndef update_google_calendar_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event.google_calendar_id:\n _logger.info('Adding missing event to google calendar: %s', event.name)\n add_google_calender_event(event.id)\n return\n service = get_calendar_service()\n if not service:\n return\n payload = get_google_calendar_payload_for_event(event)\n results = service.events().update(calendarId=settings.\n GOOGLE_CALENDAR_ID, eventId=event.google_calendar_id, body=payload\n ).execute()\n _logger.info('Google calendar event for %s updated: %s', event.name,\n results)\n\n\n@shared_task\n@log_errors\ndef add_google_calender_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event:\n _logger.warning('Could not find event to add to Google Calendar: %d',\n event_id)\n return\n google_payload = get_google_calendar_payload_for_event(event)\n service = get_calendar_service()\n if not service:\n return\n results = service.events().insert(calendarId=settings.\n GOOGLE_CALENDAR_ID, body=google_payload).execute()\n if results.get('id'):\n event.google_calendar_id = results['id']\n event.save()\n _logger.info(\n \"Google Calendar event for event '%s' created successfully\",\n event.name)\n else:\n _logger.error(\n 'New Google Calendar event did not have id in response, was: %s',\n results)\n\n\n@shared_task\n@log_errors\ndef delete_google_calendar_event(google_calendar_event_id):\n service = get_calendar_service()\n if not service:\n return\n result = service.events().delete(calendarId=settings.GOOGLE_CALENDAR_ID,\n eventId=google_calendar_event_id).execute()\n _logger.info('Google calendar event %s deleted: %s',\n google_calendar_event_id, result)\n\n\ndef get_google_calendar_payload_for_event(event):\n return {'summary': event.name, 'location': event.location,\n 'description': event.summary, 'start': {'dateTime': event.startdate\n .isoformat(), 'timeZone': 'Europe/Oslo'}, 'end': {'dateTime': event\n .enddate.isoformat(), 'timeZone': 'Europe/Oslo'}}\n",
"step-3": "<mask token>\n_logger = getLogger(__name__)\n\n\ndef create_events_calendar():\n \"\"\" Create an events calendar if none already exists. This function mostly exists for\n creating calendars for dev environments, not used in prod.\n \"\"\"\n service = get_calendar_service()\n if not service:\n return\n calendar = {'summary': 'Ting som skjer i Telemarkgruppa', 'timeZone':\n 'Europe/Oslo'}\n cal_insert_response = service.calendars().insert(body=calendar).execute()\n public_acl = {'role': 'reader', 'scope': {'type': 'default'}}\n acl_insert_response = service.acl().insert(calendarId=\n cal_insert_response['id'], body=public_acl).execute()\n return acl_insert_response\n\n\ndef get_calendar_service():\n name = 'calendar'\n version = 'v3'\n scope = 'https://www.googleapis.com/auth/calendar'\n if not hasattr(settings, 'GOOGLE_API_PRIVATE_KEY'):\n _logger.info(\n 'Skipping Google calendar integration due to missing GOOGLE_API_PRIVATE_KEY in settings.'\n )\n return\n credentials = SignedJwtAssertionCredentials(settings.GOOGLE_API_EMAIL,\n settings.GOOGLE_API_PRIVATE_KEY, scope)\n http = credentials.authorize(http=httplib2.Http())\n service = discovery.build(name, version, http=http)\n return service\n\n\n@shared_task\n@log_errors\ndef update_google_calendar_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event.google_calendar_id:\n _logger.info('Adding missing event to google calendar: %s', event.name)\n add_google_calender_event(event.id)\n return\n service = get_calendar_service()\n if not service:\n return\n payload = get_google_calendar_payload_for_event(event)\n results = service.events().update(calendarId=settings.\n GOOGLE_CALENDAR_ID, eventId=event.google_calendar_id, body=payload\n ).execute()\n _logger.info('Google calendar event for %s updated: %s', event.name,\n results)\n\n\n@shared_task\n@log_errors\ndef add_google_calender_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event:\n _logger.warning('Could not find event to add to Google Calendar: %d',\n event_id)\n return\n google_payload = get_google_calendar_payload_for_event(event)\n service = get_calendar_service()\n if not service:\n return\n results = service.events().insert(calendarId=settings.\n GOOGLE_CALENDAR_ID, body=google_payload).execute()\n if results.get('id'):\n event.google_calendar_id = results['id']\n event.save()\n _logger.info(\n \"Google Calendar event for event '%s' created successfully\",\n event.name)\n else:\n _logger.error(\n 'New Google Calendar event did not have id in response, was: %s',\n results)\n\n\n@shared_task\n@log_errors\ndef delete_google_calendar_event(google_calendar_event_id):\n service = get_calendar_service()\n if not service:\n return\n result = service.events().delete(calendarId=settings.GOOGLE_CALENDAR_ID,\n eventId=google_calendar_event_id).execute()\n _logger.info('Google calendar event %s deleted: %s',\n google_calendar_event_id, result)\n\n\ndef get_google_calendar_payload_for_event(event):\n return {'summary': event.name, 'location': event.location,\n 'description': event.summary, 'start': {'dateTime': event.startdate\n .isoformat(), 'timeZone': 'Europe/Oslo'}, 'end': {'dateTime': event\n .enddate.isoformat(), 'timeZone': 'Europe/Oslo'}}\n",
"step-4": "from ..general.utils import log_errors\nfrom googleapiclient import discovery\nfrom oauth2client.client import SignedJwtAssertionCredentials\nfrom django.conf import settings\nfrom celery import shared_task\nfrom logging import getLogger\nimport httplib2\n_logger = getLogger(__name__)\n\n\ndef create_events_calendar():\n \"\"\" Create an events calendar if none already exists. This function mostly exists for\n creating calendars for dev environments, not used in prod.\n \"\"\"\n service = get_calendar_service()\n if not service:\n return\n calendar = {'summary': 'Ting som skjer i Telemarkgruppa', 'timeZone':\n 'Europe/Oslo'}\n cal_insert_response = service.calendars().insert(body=calendar).execute()\n public_acl = {'role': 'reader', 'scope': {'type': 'default'}}\n acl_insert_response = service.acl().insert(calendarId=\n cal_insert_response['id'], body=public_acl).execute()\n return acl_insert_response\n\n\ndef get_calendar_service():\n name = 'calendar'\n version = 'v3'\n scope = 'https://www.googleapis.com/auth/calendar'\n if not hasattr(settings, 'GOOGLE_API_PRIVATE_KEY'):\n _logger.info(\n 'Skipping Google calendar integration due to missing GOOGLE_API_PRIVATE_KEY in settings.'\n )\n return\n credentials = SignedJwtAssertionCredentials(settings.GOOGLE_API_EMAIL,\n settings.GOOGLE_API_PRIVATE_KEY, scope)\n http = credentials.authorize(http=httplib2.Http())\n service = discovery.build(name, version, http=http)\n return service\n\n\n@shared_task\n@log_errors\ndef update_google_calendar_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event.google_calendar_id:\n _logger.info('Adding missing event to google calendar: %s', event.name)\n add_google_calender_event(event.id)\n return\n service = get_calendar_service()\n if not service:\n return\n payload = get_google_calendar_payload_for_event(event)\n results = service.events().update(calendarId=settings.\n GOOGLE_CALENDAR_ID, eventId=event.google_calendar_id, body=payload\n ).execute()\n _logger.info('Google calendar event for %s updated: %s', event.name,\n results)\n\n\n@shared_task\n@log_errors\ndef add_google_calender_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event:\n _logger.warning('Could not find event to add to Google Calendar: %d',\n event_id)\n return\n google_payload = get_google_calendar_payload_for_event(event)\n service = get_calendar_service()\n if not service:\n return\n results = service.events().insert(calendarId=settings.\n GOOGLE_CALENDAR_ID, body=google_payload).execute()\n if results.get('id'):\n event.google_calendar_id = results['id']\n event.save()\n _logger.info(\n \"Google Calendar event for event '%s' created successfully\",\n event.name)\n else:\n _logger.error(\n 'New Google Calendar event did not have id in response, was: %s',\n results)\n\n\n@shared_task\n@log_errors\ndef delete_google_calendar_event(google_calendar_event_id):\n service = get_calendar_service()\n if not service:\n return\n result = service.events().delete(calendarId=settings.GOOGLE_CALENDAR_ID,\n eventId=google_calendar_event_id).execute()\n _logger.info('Google calendar event %s deleted: %s',\n google_calendar_event_id, result)\n\n\ndef get_google_calendar_payload_for_event(event):\n return {'summary': event.name, 'location': event.location,\n 'description': event.summary, 'start': {'dateTime': event.startdate\n .isoformat(), 'timeZone': 'Europe/Oslo'}, 'end': {'dateTime': event\n .enddate.isoformat(), 'timeZone': 'Europe/Oslo'}}\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom ..general.utils import log_errors\n\nfrom googleapiclient import discovery\nfrom oauth2client.client import SignedJwtAssertionCredentials\nfrom django.conf import settings\nfrom celery import shared_task\nfrom logging import getLogger\nimport httplib2\n\n_logger = getLogger(__name__)\n\ndef create_events_calendar():\n \"\"\" Create an events calendar if none already exists. This function mostly exists for\n creating calendars for dev environments, not used in prod.\n \"\"\"\n service = get_calendar_service()\n if not service:\n return\n calendar = {\n 'summary': 'Ting som skjer i Telemarkgruppa',\n 'timeZone': 'Europe/Oslo',\n }\n cal_insert_response = service.calendars().insert(body=calendar).execute()\n public_acl = {\n 'role': 'reader',\n 'scope': {\n 'type': 'default'\n }\n }\n acl_insert_response = service.acl().insert(calendarId=cal_insert_response['id'], body=public_acl).execute()\n return acl_insert_response\n\n\ndef get_calendar_service():\n name = 'calendar'\n version = 'v3'\n scope = 'https://www.googleapis.com/auth/calendar'\n\n # Provide a mock fallback for test environments where real interaction with\n # Google calendar is not needed\n if not hasattr(settings, 'GOOGLE_API_PRIVATE_KEY'):\n _logger.info('Skipping Google calendar integration due to missing GOOGLE_API_PRIVATE_KEY '\n 'in settings.')\n return\n\n # Prepare credentials, and authorize HTTP object with them.\n credentials = SignedJwtAssertionCredentials(settings.GOOGLE_API_EMAIL,\n settings.GOOGLE_API_PRIVATE_KEY, scope)\n http = credentials.authorize(http=httplib2.Http())\n\n # Construct a service object via the discovery service.\n service = discovery.build(name, version, http=http)\n return service\n\n\n@shared_task\n@log_errors\ndef update_google_calendar_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n\n # If the event doesn't already exist on google calendar, create it\n if not event.google_calendar_id:\n _logger.info('Adding missing event to google calendar: %s', event.name)\n add_google_calender_event(event.id)\n return\n\n # Authenticate and construct service.\n service = get_calendar_service()\n\n if not service:\n return\n\n payload = get_google_calendar_payload_for_event(event)\n results = service.events().update(calendarId=settings.GOOGLE_CALENDAR_ID,\n eventId=event.google_calendar_id, body=payload).execute()\n _logger.info('Google calendar event for %s updated: %s', event.name, results)\n\n\n@shared_task\n@log_errors\ndef add_google_calender_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n\n if not event:\n _logger.warning('Could not find event to add to Google Calendar: %d', event_id)\n return\n\n google_payload = get_google_calendar_payload_for_event(event)\n service = get_calendar_service()\n if not service:\n return\n\n results = service.events().insert(calendarId=settings.GOOGLE_CALENDAR_ID,\n body=google_payload).execute()\n if results.get('id'):\n event.google_calendar_id = results['id']\n event.save()\n _logger.info(\"Google Calendar event for event '%s' created successfully\", event.name)\n else:\n _logger.error(\"New Google Calendar event did not have id in response, was: %s\", results)\n\n\n@shared_task\n@log_errors\ndef delete_google_calendar_event(google_calendar_event_id):\n service = get_calendar_service()\n if not service:\n return\n\n result = service.events().delete(calendarId=settings.GOOGLE_CALENDAR_ID,\n eventId=google_calendar_event_id).execute()\n _logger.info('Google calendar event %s deleted: %s', google_calendar_event_id, result)\n\n\ndef get_google_calendar_payload_for_event(event):\n return {\n 'summary': event.name,\n 'location': event.location,\n 'description': event.summary,\n 'start': {\n 'dateTime': event.startdate.isoformat(),\n 'timeZone': 'Europe/Oslo',\n },\n 'end': {\n 'dateTime': event.enddate.isoformat(),\n 'timeZone': 'Europe/Oslo',\n }\n }\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
ghj=input("enter your first name:")
print("Welcome to my Quiz:\nIf you go wrong once you lose but if you give all the answers correct then you win but no CHEATING.")
print("Q1:-Who is the president of India?")
winlist=("ramnath govind","multiple choice question","multiple choice questions","mumbai")
enter=input("enter your answer here:")
seat=enter.lower()
x=0
if seat in winlist:
print("woah you surely are smart you are correct!!!!")
x=x+1
else:
print("you went wrong at the first question")
x=x-1
print("Q2:-What is the full form of MCQ?")
enter2=input("enter your answer here:")
seat2=enter2.lower()
if seat2 in winlist:
print("you are right!!!!!!")
x=x+1
else:
print("I told you this is a hard quiz, ur answer is wrong")
x=x-1
print("Q3:-which city is the india's largest city by population")
enter3=input("enter ur answer here:")
seat3=enter3.lower()
if seat3 in winlist:
print("you are right!!!")
x=x+1
else:
print("you were wrong you lose 1 mark")
x=x-1
print("well " +str(ghj)+ " you have completed the quiz and scored: "+str(x)+" marks")
|
normal
|
{
"blob_id": "351421ef6a40e3a4bd4549a1851fbf4bed9ddf30",
"index": 5024,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n \"\"\"Welcome to my Quiz:\nIf you go wrong once you lose but if you give all the answers correct then you win but no CHEATING.\"\"\"\n )\nprint('Q1:-Who is the president of India?')\n<mask token>\nif seat in winlist:\n print('woah you surely are smart you are correct!!!!')\n x = x + 1\nelse:\n print('you went wrong at the first question')\n x = x - 1\nprint('Q2:-What is the full form of MCQ?')\n<mask token>\nif seat2 in winlist:\n print('you are right!!!!!!')\n x = x + 1\nelse:\n print('I told you this is a hard quiz, ur answer is wrong')\n x = x - 1\nprint(\"Q3:-which city is the india's largest city by population\")\n<mask token>\nif seat3 in winlist:\n print('you are right!!!')\n x = x + 1\nelse:\n print('you were wrong you lose 1 mark')\n x = x - 1\nprint('well ' + str(ghj) + ' you have completed the quiz and scored: ' +\n str(x) + ' marks')\n",
"step-3": "ghj = input('enter your first name:')\nprint(\n \"\"\"Welcome to my Quiz:\nIf you go wrong once you lose but if you give all the answers correct then you win but no CHEATING.\"\"\"\n )\nprint('Q1:-Who is the president of India?')\nwinlist = ('ramnath govind', 'multiple choice question',\n 'multiple choice questions', 'mumbai')\nenter = input('enter your answer here:')\nseat = enter.lower()\nx = 0\nif seat in winlist:\n print('woah you surely are smart you are correct!!!!')\n x = x + 1\nelse:\n print('you went wrong at the first question')\n x = x - 1\nprint('Q2:-What is the full form of MCQ?')\nenter2 = input('enter your answer here:')\nseat2 = enter2.lower()\nif seat2 in winlist:\n print('you are right!!!!!!')\n x = x + 1\nelse:\n print('I told you this is a hard quiz, ur answer is wrong')\n x = x - 1\nprint(\"Q3:-which city is the india's largest city by population\")\nenter3 = input('enter ur answer here:')\nseat3 = enter3.lower()\nif seat3 in winlist:\n print('you are right!!!')\n x = x + 1\nelse:\n print('you were wrong you lose 1 mark')\n x = x - 1\nprint('well ' + str(ghj) + ' you have completed the quiz and scored: ' +\n str(x) + ' marks')\n",
"step-4": "ghj=input(\"enter your first name:\")\r\nprint(\"Welcome to my Quiz:\\nIf you go wrong once you lose but if you give all the answers correct then you win but no CHEATING.\")\r\nprint(\"Q1:-Who is the president of India?\")\r\nwinlist=(\"ramnath govind\",\"multiple choice question\",\"multiple choice questions\",\"mumbai\")\r\nenter=input(\"enter your answer here:\")\r\nseat=enter.lower()\r\nx=0\r\nif seat in winlist:\r\n print(\"woah you surely are smart you are correct!!!!\")\r\n x=x+1\r\nelse:\r\n print(\"you went wrong at the first question\")\r\n x=x-1\r\nprint(\"Q2:-What is the full form of MCQ?\")\r\nenter2=input(\"enter your answer here:\")\r\nseat2=enter2.lower()\r\nif seat2 in winlist:\r\n print(\"you are right!!!!!!\")\r\n x=x+1\r\nelse:\r\n print(\"I told you this is a hard quiz, ur answer is wrong\")\r\n x=x-1\r\nprint(\"Q3:-which city is the india's largest city by population\")\r\nenter3=input(\"enter ur answer here:\")\r\nseat3=enter3.lower()\r\nif seat3 in winlist:\r\n print(\"you are right!!!\")\r\n x=x+1\r\nelse:\r\n print(\"you were wrong you lose 1 mark\")\r\n x=x-1\r\nprint(\"well \" +str(ghj)+ \" you have completed the quiz and scored: \"+str(x)+\" marks\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 2 08:04:11 2019
@author: yocoy
"""
import serial, time
arduino = serial.Serial('COM7', 9600)
time.sleep(4)
lectura = []
for i in range(100):
lectura.append(arduino.readline())
arduino.close()
print(lectura)
|
normal
|
{
"blob_id": "d514413c303dd174d8f56685158780a1681e1aba",
"index": 7925,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntime.sleep(4)\n<mask token>\nfor i in range(100):\n lectura.append(arduino.readline())\narduino.close()\nprint(lectura)\n",
"step-3": "<mask token>\narduino = serial.Serial('COM7', 9600)\ntime.sleep(4)\nlectura = []\nfor i in range(100):\n lectura.append(arduino.readline())\narduino.close()\nprint(lectura)\n",
"step-4": "<mask token>\nimport serial, time\narduino = serial.Serial('COM7', 9600)\ntime.sleep(4)\nlectura = []\nfor i in range(100):\n lectura.append(arduino.readline())\narduino.close()\nprint(lectura)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 2 08:04:11 2019\n\n@author: yocoy\n\"\"\"\n\nimport serial, time\n\narduino = serial.Serial('COM7', 9600)\ntime.sleep(4)\n\nlectura = []\n\nfor i in range(100):\n lectura.append(arduino.readline())\narduino.close()\n\nprint(lectura)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from enum import unique
from django.db import models
import secrets
import string
CARD_PACK_CHOICES = (
('1', 'Traditional Cards'),
('2', 'Special Cards'),
('3', 'Other Themed Cards')
)
MARKER_CHOICES = (
('1', 'Plastic Dots'),
('2', 'Quarters'),
('3', 'Beans')
)
def generate_game_code() -> int:
""" Generates a unique game code.
Returns
-------
int
- a unique 7 digit numerical code
"""
while True:
# code will only contain digits
code_options = string.digits
generated_game_code = ''.join(secrets.choice(code_options) for i in range(7))
if Game.objects.filter(game_code=generated_game_code).count() == 0:
break
return int(generated_game_code)
def generate_player_id() -> string:
"""Generates a unique player id.
Returns
-------
string
- a unique 5 digit alphaneumeric code
"""
while True:
# code will have uppercase letters and numbers
code_options = string.ascii_uppercase + string.digits
generated_player_id = ''.join(secrets.choice(code_options) for i in range(5))
if Player.objects.filter(player_id=generated_player_id).count() == 0:
break
return generated_player_id
# Create your models here.
class Game( models.Model):
""" Model that describes a loteria game
Fields
------
cards_id : int
- the id of the card theme chosen by user during creation of game.
created_at : dateTime
- the time that the game was started.
game_code : int
- a unique 7 digit code assigned during creation
needed to join games.
host : string
- the session key of the person who started the game
ensures that users do not have more that 1 running game.
game_over : bool
- defaults to True for now but will default to False upon creation.
maker_id : int
- the id of the marker type chosen by user during creation of game.
Notes
-----
- Considering making game_code primary key instead
"""
# default 0 will just be regular loteria cards
# TODO cards_id and marker_id should be choices not harded coded values
game_code = models.IntegerField(null=False, default=generate_game_code, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
host = models.CharField(max_length=100, unique=True)
cards_id = models.CharField(max_length=10, choices=CARD_PACK_CHOICES, default='1')
marker_id = models.CharField(max_length=10, choices=MARKER_CHOICES, default='1')
game_over = models.BooleanField(default=True)
class Player(models.Model):
""" Model that describes a Player in the Game
Attributes
----------
name : string
the display name of the player.
wins : int
the number of times this player has won.
losses : int
the number of times this player has lost.
player_id : string
the id assigned to a player during a game.
game_code : int
the game code of the game joined, will be null if no game has been joined.
"""
player_id = models.CharField(max_length=15, default=generate_player_id, unique=True)
name = models.CharField(max_length=100, unique=False)
game_code = models.IntegerField(null=False, unique=False)
wins = models.IntegerField(null=False, default=0)
losses = models.IntegerField(null=False, default=0)
host_key = models.CharField(max_length=100, unique=True)
|
normal
|
{
"blob_id": "2fd33439d4403ec72f890a1d1b4f35f2b38d033b",
"index": 9268,
"step-1": "<mask token>\n\n\nclass Game(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Player(models.Model):\n \"\"\" Model that describes a Player in the Game\n\n Attributes\n ----------\n\n name : string\n the display name of the player.\n wins : int\n the number of times this player has won.\n losses : int\n the number of times this player has lost.\n player_id : string\n the id assigned to a player during a game.\n game_code : int\n the game code of the game joined, will be null if no game has been joined.\n \"\"\"\n player_id = models.CharField(max_length=15, default=generate_player_id,\n unique=True)\n name = models.CharField(max_length=100, unique=False)\n game_code = models.IntegerField(null=False, unique=False)\n wins = models.IntegerField(null=False, default=0)\n losses = models.IntegerField(null=False, default=0)\n host_key = models.CharField(max_length=100, unique=True)\n",
"step-2": "<mask token>\n\n\ndef generate_game_code() ->int:\n \"\"\" Generates a unique game code.\n \n Returns\n -------\n int\n - a unique 7 digit numerical code\n \"\"\"\n while True:\n code_options = string.digits\n generated_game_code = ''.join(secrets.choice(code_options) for i in\n range(7))\n if Game.objects.filter(game_code=generated_game_code).count() == 0:\n break\n return int(generated_game_code)\n\n\n<mask token>\n\n\nclass Game(models.Model):\n \"\"\" Model that describes a loteria game\n\n Fields\n ------\n cards_id : int\n - the id of the card theme chosen by user during creation of game.\n\n created_at : dateTime\n - the time that the game was started.\n\n game_code : int\n - a unique 7 digit code assigned during creation \n needed to join games.\n\n host : string\n - the session key of the person who started the game\n ensures that users do not have more that 1 running game.\n\n game_over : bool\n - defaults to True for now but will default to False upon creation.\n\n maker_id : int\n - the id of the marker type chosen by user during creation of game.\n\n Notes\n -----\n - Considering making game_code primary key instead\n \"\"\"\n game_code = models.IntegerField(null=False, default=generate_game_code,\n unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n host = models.CharField(max_length=100, unique=True)\n cards_id = models.CharField(max_length=10, choices=CARD_PACK_CHOICES,\n default='1')\n marker_id = models.CharField(max_length=10, choices=MARKER_CHOICES,\n default='1')\n game_over = models.BooleanField(default=True)\n\n\nclass Player(models.Model):\n \"\"\" Model that describes a Player in the Game\n\n Attributes\n ----------\n\n name : string\n the display name of the player.\n wins : int\n the number of times this player has won.\n losses : int\n the number of times this player has lost.\n player_id : string\n the id assigned to a player during a game.\n game_code : int\n the game code of the game joined, will be null if no game has been joined.\n \"\"\"\n player_id = models.CharField(max_length=15, default=generate_player_id,\n unique=True)\n name = models.CharField(max_length=100, unique=False)\n game_code = models.IntegerField(null=False, unique=False)\n wins = models.IntegerField(null=False, default=0)\n losses = models.IntegerField(null=False, default=0)\n host_key = models.CharField(max_length=100, unique=True)\n",
"step-3": "<mask token>\nCARD_PACK_CHOICES = ('1', 'Traditional Cards'), ('2', 'Special Cards'), ('3',\n 'Other Themed Cards')\nMARKER_CHOICES = ('1', 'Plastic Dots'), ('2', 'Quarters'), ('3', 'Beans')\n\n\ndef generate_game_code() ->int:\n \"\"\" Generates a unique game code.\n \n Returns\n -------\n int\n - a unique 7 digit numerical code\n \"\"\"\n while True:\n code_options = string.digits\n generated_game_code = ''.join(secrets.choice(code_options) for i in\n range(7))\n if Game.objects.filter(game_code=generated_game_code).count() == 0:\n break\n return int(generated_game_code)\n\n\ndef generate_player_id() ->string:\n \"\"\"Generates a unique player id.\n \n Returns\n -------\n string\n - a unique 5 digit alphaneumeric code\n \"\"\"\n while True:\n code_options = string.ascii_uppercase + string.digits\n generated_player_id = ''.join(secrets.choice(code_options) for i in\n range(5))\n if Player.objects.filter(player_id=generated_player_id).count() == 0:\n break\n return generated_player_id\n\n\nclass Game(models.Model):\n \"\"\" Model that describes a loteria game\n\n Fields\n ------\n cards_id : int\n - the id of the card theme chosen by user during creation of game.\n\n created_at : dateTime\n - the time that the game was started.\n\n game_code : int\n - a unique 7 digit code assigned during creation \n needed to join games.\n\n host : string\n - the session key of the person who started the game\n ensures that users do not have more that 1 running game.\n\n game_over : bool\n - defaults to True for now but will default to False upon creation.\n\n maker_id : int\n - the id of the marker type chosen by user during creation of game.\n\n Notes\n -----\n - Considering making game_code primary key instead\n \"\"\"\n game_code = models.IntegerField(null=False, default=generate_game_code,\n unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n host = models.CharField(max_length=100, unique=True)\n cards_id = models.CharField(max_length=10, choices=CARD_PACK_CHOICES,\n default='1')\n marker_id = models.CharField(max_length=10, choices=MARKER_CHOICES,\n default='1')\n game_over = models.BooleanField(default=True)\n\n\nclass Player(models.Model):\n \"\"\" Model that describes a Player in the Game\n\n Attributes\n ----------\n\n name : string\n the display name of the player.\n wins : int\n the number of times this player has won.\n losses : int\n the number of times this player has lost.\n player_id : string\n the id assigned to a player during a game.\n game_code : int\n the game code of the game joined, will be null if no game has been joined.\n \"\"\"\n player_id = models.CharField(max_length=15, default=generate_player_id,\n unique=True)\n name = models.CharField(max_length=100, unique=False)\n game_code = models.IntegerField(null=False, unique=False)\n wins = models.IntegerField(null=False, default=0)\n losses = models.IntegerField(null=False, default=0)\n host_key = models.CharField(max_length=100, unique=True)\n",
"step-4": "from enum import unique\nfrom django.db import models\nimport secrets\nimport string\nCARD_PACK_CHOICES = ('1', 'Traditional Cards'), ('2', 'Special Cards'), ('3',\n 'Other Themed Cards')\nMARKER_CHOICES = ('1', 'Plastic Dots'), ('2', 'Quarters'), ('3', 'Beans')\n\n\ndef generate_game_code() ->int:\n \"\"\" Generates a unique game code.\n \n Returns\n -------\n int\n - a unique 7 digit numerical code\n \"\"\"\n while True:\n code_options = string.digits\n generated_game_code = ''.join(secrets.choice(code_options) for i in\n range(7))\n if Game.objects.filter(game_code=generated_game_code).count() == 0:\n break\n return int(generated_game_code)\n\n\ndef generate_player_id() ->string:\n \"\"\"Generates a unique player id.\n \n Returns\n -------\n string\n - a unique 5 digit alphaneumeric code\n \"\"\"\n while True:\n code_options = string.ascii_uppercase + string.digits\n generated_player_id = ''.join(secrets.choice(code_options) for i in\n range(5))\n if Player.objects.filter(player_id=generated_player_id).count() == 0:\n break\n return generated_player_id\n\n\nclass Game(models.Model):\n \"\"\" Model that describes a loteria game\n\n Fields\n ------\n cards_id : int\n - the id of the card theme chosen by user during creation of game.\n\n created_at : dateTime\n - the time that the game was started.\n\n game_code : int\n - a unique 7 digit code assigned during creation \n needed to join games.\n\n host : string\n - the session key of the person who started the game\n ensures that users do not have more that 1 running game.\n\n game_over : bool\n - defaults to True for now but will default to False upon creation.\n\n maker_id : int\n - the id of the marker type chosen by user during creation of game.\n\n Notes\n -----\n - Considering making game_code primary key instead\n \"\"\"\n game_code = models.IntegerField(null=False, default=generate_game_code,\n unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n host = models.CharField(max_length=100, unique=True)\n cards_id = models.CharField(max_length=10, choices=CARD_PACK_CHOICES,\n default='1')\n marker_id = models.CharField(max_length=10, choices=MARKER_CHOICES,\n default='1')\n game_over = models.BooleanField(default=True)\n\n\nclass Player(models.Model):\n \"\"\" Model that describes a Player in the Game\n\n Attributes\n ----------\n\n name : string\n the display name of the player.\n wins : int\n the number of times this player has won.\n losses : int\n the number of times this player has lost.\n player_id : string\n the id assigned to a player during a game.\n game_code : int\n the game code of the game joined, will be null if no game has been joined.\n \"\"\"\n player_id = models.CharField(max_length=15, default=generate_player_id,\n unique=True)\n name = models.CharField(max_length=100, unique=False)\n game_code = models.IntegerField(null=False, unique=False)\n wins = models.IntegerField(null=False, default=0)\n losses = models.IntegerField(null=False, default=0)\n host_key = models.CharField(max_length=100, unique=True)\n",
"step-5": "from enum import unique\nfrom django.db import models\n\nimport secrets\nimport string\n\nCARD_PACK_CHOICES = (\n ('1', 'Traditional Cards'),\n ('2', 'Special Cards'),\n ('3', 'Other Themed Cards')\n)\n\nMARKER_CHOICES = (\n ('1', 'Plastic Dots'),\n ('2', 'Quarters'),\n ('3', 'Beans')\n)\n\ndef generate_game_code() -> int:\n \"\"\" Generates a unique game code.\n \n Returns\n -------\n int\n - a unique 7 digit numerical code\n \"\"\"\n while True:\n # code will only contain digits\n code_options = string.digits\n generated_game_code = ''.join(secrets.choice(code_options) for i in range(7))\n if Game.objects.filter(game_code=generated_game_code).count() == 0:\n break\n return int(generated_game_code)\n\ndef generate_player_id() -> string:\n \"\"\"Generates a unique player id.\n \n Returns\n -------\n string\n - a unique 5 digit alphaneumeric code\n \"\"\"\n while True:\n # code will have uppercase letters and numbers\n code_options = string.ascii_uppercase + string.digits\n generated_player_id = ''.join(secrets.choice(code_options) for i in range(5))\n if Player.objects.filter(player_id=generated_player_id).count() == 0:\n break\n return generated_player_id\n\n# Create your models here.\nclass Game( models.Model):\n \"\"\" Model that describes a loteria game\n\n Fields\n ------\n cards_id : int\n - the id of the card theme chosen by user during creation of game.\n\n created_at : dateTime\n - the time that the game was started.\n\n game_code : int\n - a unique 7 digit code assigned during creation \n needed to join games.\n\n host : string\n - the session key of the person who started the game\n ensures that users do not have more that 1 running game.\n\n game_over : bool\n - defaults to True for now but will default to False upon creation.\n\n maker_id : int\n - the id of the marker type chosen by user during creation of game.\n\n Notes\n -----\n - Considering making game_code primary key instead\n \"\"\"\n # default 0 will just be regular loteria cards\n # TODO cards_id and marker_id should be choices not harded coded values\n game_code = models.IntegerField(null=False, default=generate_game_code, unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n host = models.CharField(max_length=100, unique=True)\n cards_id = models.CharField(max_length=10, choices=CARD_PACK_CHOICES, default='1')\n marker_id = models.CharField(max_length=10, choices=MARKER_CHOICES, default='1')\n game_over = models.BooleanField(default=True)\n \n\nclass Player(models.Model):\n \"\"\" Model that describes a Player in the Game\n\n Attributes\n ----------\n\n name : string\n the display name of the player.\n wins : int\n the number of times this player has won.\n losses : int\n the number of times this player has lost.\n player_id : string\n the id assigned to a player during a game.\n game_code : int\n the game code of the game joined, will be null if no game has been joined.\n \"\"\"\n player_id = models.CharField(max_length=15, default=generate_player_id, unique=True)\n name = models.CharField(max_length=100, unique=False)\n game_code = models.IntegerField(null=False, unique=False)\n wins = models.IntegerField(null=False, default=0)\n losses = models.IntegerField(null=False, default=0)\n host_key = models.CharField(max_length=100, unique=True)\n\n\n ",
"step-ids": [
4,
7,
9,
10,
11
]
}
|
[
4,
7,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def cnt():
s1 = input('enter a string :').strip()
count = 0
countu = 0
for i in s1:
if i.islower():
count += 1
elif i.isupper():
countu += 1
else:
pass
print('THE NUMBER OF UPPER CASES ARE :', countu)
print('THE NUMBER OF LOWER CASSES ARE: ', count)
cnt()
<|reserved_special_token_1|>
#CALCULATE NUMBER OF UPPER AND LOWER CASES
def cnt():
s1=input("enter a string :").strip()
count=0
countu=0
for i in s1:
if(i.islower()):
count+=1
elif(i.isupper()):
countu+=1
else:
pass
print("THE NUMBER OF UPPER CASES ARE :",countu)
print("THE NUMBER OF LOWER CASSES ARE: ",count)
cnt()
|
flexible
|
{
"blob_id": "6cfda09f360aaa560011b91db8316e5e3889eea1",
"index": 2017,
"step-1": "<mask token>\n",
"step-2": "def cnt():\n s1 = input('enter a string :').strip()\n count = 0\n countu = 0\n for i in s1:\n if i.islower():\n count += 1\n elif i.isupper():\n countu += 1\n else:\n pass\n print('THE NUMBER OF UPPER CASES ARE :', countu)\n print('THE NUMBER OF LOWER CASSES ARE: ', count)\n cnt()\n",
"step-3": "#CALCULATE NUMBER OF UPPER AND LOWER CASES\r\ndef cnt():\r\n \r\n s1=input(\"enter a string :\").strip()\r\n count=0\r\n countu=0\r\n for i in s1:\r\n if(i.islower()):\r\n count+=1\r\n \r\n elif(i.isupper()):\r\n countu+=1\r\n \r\n else:\r\n pass\r\n print(\"THE NUMBER OF UPPER CASES ARE :\",countu)\r\n print(\"THE NUMBER OF LOWER CASSES ARE: \",count)\r\n cnt()\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import unittest
import A1
import part_manager
import security
class test_A1(unittest.TestCase):
# ----------------------------------- set up the mock data for test cases -----------------------------------
def setUp(self):
self.security1 = security.Security("XXX-1234-ABCD-1234", None)
self.security2 = security.Security(None, "kkklas8882kk23nllfjj88290")
self.security3 = security.Security("XXX-1234-ABCD-1234", "kkklas8882kk23nllfjj88290")
self.part_check1 = part_manager.Part_Manager("1233", "2")
self.part_check2 = part_manager.Part_Manager(None, "5")
self.part_check3 = part_manager.Part_Manager("2222", None)
self.delivery1 = part_manager.DeliveryAddress("Mr. Jadeja", "South Park St", "Halifax", "NS", "B3J2K9")
self.delivery2 = part_manager.DeliveryAddress(None, "South Park St", "Halifax", "NS", "B3J2K9")
self.delivery3 = part_manager.DeliveryAddress("Mr. Jadeja", None, "Halifax", "NS", "B3J2K9")
self.delivery4 = part_manager.DeliveryAddress("Mr. Jadeja", "South Park St", None, "NS", "B3J2K9")
self.delivery5 = part_manager.DeliveryAddress("Mr. Jadeja", "South Park St", "Halifax", None, "B3J2K9")
self.delivery6 = part_manager.DeliveryAddress("Mr. Jadeja", "South Park St", "Halifax", "NS", None)
self.auth1 = security.Security("FAKEDEALER", "FAKEACCEESKEY")
self.auth2 = security.Security("XXX-1111-ABCD-1111", "abcd123wxyz456qwerty78901")
self.auth3 = security.Security("XXX-2222-ABCD-2222", "kkklas8882kk23nllfjj88292")
self.part_status1 = part_manager.Part_Manager(["1234", "1111", "2222", "3333", "4444", "fake_part_number"],
["1","2","3","4","5","6"])
# ----------------------------------- Class: Security -----------------------------------
# -----------------------------------------------------------------------------------------
# ------------------------------ Method: validate_dealer -----------------------------
def test_dealerCheck(self):
self.assertEqual(self.security1.validate_dealer(), "Invalid Input XML Response Error: in Dealer Access Key")
self.assertEqual(self.security2.validate_dealer(), "Invalid Input XML Response Error: in Dealer Id")
self.assertEqual(self.security3.validate_dealer(), "Dealer details validated")
# ------------------------------ Method: isDealerAuthorized ---------------------------
def test_dealer_auth(self):
self.assertEqual(self.auth1.isDealerAuthorized(), "dealer not authorized.")
self.assertEqual(self.auth2.isDealerAuthorized(), "dealer not authorized.")
self.assertEqual(self.auth3.isDealerAuthorized(), "dealer authenticated")
# ----------------------------------- Class: part_manager --------------------------------
# ------------------------------------------------------------------------------------------
# ------------------------------ Method: validate_parts -------------------------------
def test_partsCheck(self):
self.assertEqual(self.part_check1.validate_parts(), "Part Number and Quantity are good.")
self.assertEqual(self.part_check2.validate_parts(), "Invalid Input XML Response: Error in Part number")
self.assertEqual(self.part_check3.validate_parts(), "Invalid Input XML Response: Error in Quantity")
# ------------------------------ Method: validate_delivery ----------------------------
def test_delivery(self):
self.assertEqual(self.delivery1.validate_delivery(), "Delivery Details are good")
self.assertEqual(self.delivery2.validate_delivery(), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(self.delivery3.validate_delivery(), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(self.delivery4.validate_delivery(), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(self.delivery5.validate_delivery(), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(self.delivery6.validate_delivery(), "Invalid Input XML Response: Error in Delivery Details")
# ------------------------------ Method: SubmitPartForManufactureAndDelivery -----------
def test_part_status_check(self):
self.assertEqual(self.part_status1.SubmitPartForManufactureAndDelivery(),
['success', 'out of stock', 'no longer manufactured', 'invalid part', 'success', 'Invalid Part'])
# ----------------------------------- Class: A1 -------------------------------------------
# -------------------------------------------------------------------------------------------
# ------------------------------ Method: main_function ---------------------------------
def test_main_function(self):
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],
['1234', '5678'], ['2', '25']), "Dealer is authorized, check the response in output.xml")
self.assertEqual(A1.main_function([None, 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'],
['2', '25']), "Invalid Input XML Response Error: in Dealer Id")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', None], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],
['1234', '5678'], ['2', '25']), "Invalid Input XML Response Error: in Dealer Access Key")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], [None, '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'],
['2', '25']), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', None, 'Halifax', 'NS', 'B2T1A4'],
['1234', '5678'], ['2', '25']), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', None, 'NS', 'B2T1A4'],
['1234', '5678'], ['2', '25']), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', None, 'B2T1A4'],
['1234', '5678'], ['2', '25']), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', None],
['1234', '5678'], ['2', '25']), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],
["0000", '5678'], ['2', '25']), "Dealer is authorized, check the response in output.xml")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],
['1234', '5678'], ['0', '25']), "Invalid Input XML Response: Error in Quantity")
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "2ba5cb1265090b42b9a4838b792a3e81b209ba1a",
"index": 3822,
"step-1": "<mask token>\n\n\nclass test_A1(unittest.TestCase):\n\n def setUp(self):\n self.security1 = security.Security('XXX-1234-ABCD-1234', None)\n self.security2 = security.Security(None, 'kkklas8882kk23nllfjj88290')\n self.security3 = security.Security('XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290')\n self.part_check1 = part_manager.Part_Manager('1233', '2')\n self.part_check2 = part_manager.Part_Manager(None, '5')\n self.part_check3 = part_manager.Part_Manager('2222', None)\n self.delivery1 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', 'Halifax', 'NS', 'B3J2K9')\n self.delivery2 = part_manager.DeliveryAddress(None, 'South Park St',\n 'Halifax', 'NS', 'B3J2K9')\n self.delivery3 = part_manager.DeliveryAddress('Mr. Jadeja', None,\n 'Halifax', 'NS', 'B3J2K9')\n self.delivery4 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', None, 'NS', 'B3J2K9')\n self.delivery5 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', 'Halifax', None, 'B3J2K9')\n self.delivery6 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', 'Halifax', 'NS', None)\n self.auth1 = security.Security('FAKEDEALER', 'FAKEACCEESKEY')\n self.auth2 = security.Security('XXX-1111-ABCD-1111',\n 'abcd123wxyz456qwerty78901')\n self.auth3 = security.Security('XXX-2222-ABCD-2222',\n 'kkklas8882kk23nllfjj88292')\n self.part_status1 = part_manager.Part_Manager(['1234', '1111',\n '2222', '3333', '4444', 'fake_part_number'], ['1', '2', '3',\n '4', '5', '6'])\n\n def test_dealerCheck(self):\n self.assertEqual(self.security1.validate_dealer(),\n 'Invalid Input XML Response Error: in Dealer Access Key')\n self.assertEqual(self.security2.validate_dealer(),\n 'Invalid Input XML Response Error: in Dealer Id')\n self.assertEqual(self.security3.validate_dealer(),\n 'Dealer details validated')\n\n def test_dealer_auth(self):\n self.assertEqual(self.auth1.isDealerAuthorized(),\n 'dealer not authorized.')\n self.assertEqual(self.auth2.isDealerAuthorized(),\n 'dealer not authorized.')\n self.assertEqual(self.auth3.isDealerAuthorized(),\n 'dealer authenticated')\n <mask token>\n\n def test_delivery(self):\n self.assertEqual(self.delivery1.validate_delivery(),\n 'Delivery Details are good')\n self.assertEqual(self.delivery2.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery3.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery4.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery5.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery6.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n\n def test_part_status_check(self):\n self.assertEqual(self.part_status1.\n SubmitPartForManufactureAndDelivery(), ['success',\n 'out of stock', 'no longer manufactured', 'invalid part',\n 'success', 'Invalid Part'])\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass test_A1(unittest.TestCase):\n\n def setUp(self):\n self.security1 = security.Security('XXX-1234-ABCD-1234', None)\n self.security2 = security.Security(None, 'kkklas8882kk23nllfjj88290')\n self.security3 = security.Security('XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290')\n self.part_check1 = part_manager.Part_Manager('1233', '2')\n self.part_check2 = part_manager.Part_Manager(None, '5')\n self.part_check3 = part_manager.Part_Manager('2222', None)\n self.delivery1 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', 'Halifax', 'NS', 'B3J2K9')\n self.delivery2 = part_manager.DeliveryAddress(None, 'South Park St',\n 'Halifax', 'NS', 'B3J2K9')\n self.delivery3 = part_manager.DeliveryAddress('Mr. Jadeja', None,\n 'Halifax', 'NS', 'B3J2K9')\n self.delivery4 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', None, 'NS', 'B3J2K9')\n self.delivery5 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', 'Halifax', None, 'B3J2K9')\n self.delivery6 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', 'Halifax', 'NS', None)\n self.auth1 = security.Security('FAKEDEALER', 'FAKEACCEESKEY')\n self.auth2 = security.Security('XXX-1111-ABCD-1111',\n 'abcd123wxyz456qwerty78901')\n self.auth3 = security.Security('XXX-2222-ABCD-2222',\n 'kkklas8882kk23nllfjj88292')\n self.part_status1 = part_manager.Part_Manager(['1234', '1111',\n '2222', '3333', '4444', 'fake_part_number'], ['1', '2', '3',\n '4', '5', '6'])\n\n def test_dealerCheck(self):\n self.assertEqual(self.security1.validate_dealer(),\n 'Invalid Input XML Response Error: in Dealer Access Key')\n self.assertEqual(self.security2.validate_dealer(),\n 'Invalid Input XML Response Error: in Dealer Id')\n self.assertEqual(self.security3.validate_dealer(),\n 'Dealer details validated')\n\n def test_dealer_auth(self):\n self.assertEqual(self.auth1.isDealerAuthorized(),\n 'dealer not authorized.')\n self.assertEqual(self.auth2.isDealerAuthorized(),\n 'dealer not authorized.')\n self.assertEqual(self.auth3.isDealerAuthorized(),\n 'dealer authenticated')\n\n def test_partsCheck(self):\n self.assertEqual(self.part_check1.validate_parts(),\n 'Part Number and Quantity are good.')\n self.assertEqual(self.part_check2.validate_parts(),\n 'Invalid Input XML Response: Error in Part number')\n self.assertEqual(self.part_check3.validate_parts(),\n 'Invalid Input XML Response: Error in Quantity')\n\n def test_delivery(self):\n self.assertEqual(self.delivery1.validate_delivery(),\n 'Delivery Details are good')\n self.assertEqual(self.delivery2.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery3.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery4.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery5.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery6.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n\n def test_part_status_check(self):\n self.assertEqual(self.part_status1.\n SubmitPartForManufactureAndDelivery(), ['success',\n 'out of stock', 'no longer manufactured', 'invalid part',\n 'success', 'Invalid Part'])\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass test_A1(unittest.TestCase):\n\n def setUp(self):\n self.security1 = security.Security('XXX-1234-ABCD-1234', None)\n self.security2 = security.Security(None, 'kkklas8882kk23nllfjj88290')\n self.security3 = security.Security('XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290')\n self.part_check1 = part_manager.Part_Manager('1233', '2')\n self.part_check2 = part_manager.Part_Manager(None, '5')\n self.part_check3 = part_manager.Part_Manager('2222', None)\n self.delivery1 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', 'Halifax', 'NS', 'B3J2K9')\n self.delivery2 = part_manager.DeliveryAddress(None, 'South Park St',\n 'Halifax', 'NS', 'B3J2K9')\n self.delivery3 = part_manager.DeliveryAddress('Mr. Jadeja', None,\n 'Halifax', 'NS', 'B3J2K9')\n self.delivery4 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', None, 'NS', 'B3J2K9')\n self.delivery5 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', 'Halifax', None, 'B3J2K9')\n self.delivery6 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', 'Halifax', 'NS', None)\n self.auth1 = security.Security('FAKEDEALER', 'FAKEACCEESKEY')\n self.auth2 = security.Security('XXX-1111-ABCD-1111',\n 'abcd123wxyz456qwerty78901')\n self.auth3 = security.Security('XXX-2222-ABCD-2222',\n 'kkklas8882kk23nllfjj88292')\n self.part_status1 = part_manager.Part_Manager(['1234', '1111',\n '2222', '3333', '4444', 'fake_part_number'], ['1', '2', '3',\n '4', '5', '6'])\n\n def test_dealerCheck(self):\n self.assertEqual(self.security1.validate_dealer(),\n 'Invalid Input XML Response Error: in Dealer Access Key')\n self.assertEqual(self.security2.validate_dealer(),\n 'Invalid Input XML Response Error: in Dealer Id')\n self.assertEqual(self.security3.validate_dealer(),\n 'Dealer details validated')\n\n def test_dealer_auth(self):\n self.assertEqual(self.auth1.isDealerAuthorized(),\n 'dealer not authorized.')\n self.assertEqual(self.auth2.isDealerAuthorized(),\n 'dealer not authorized.')\n self.assertEqual(self.auth3.isDealerAuthorized(),\n 'dealer authenticated')\n\n def test_partsCheck(self):\n self.assertEqual(self.part_check1.validate_parts(),\n 'Part Number and Quantity are good.')\n self.assertEqual(self.part_check2.validate_parts(),\n 'Invalid Input XML Response: Error in Part number')\n self.assertEqual(self.part_check3.validate_parts(),\n 'Invalid Input XML Response: Error in Quantity')\n\n def test_delivery(self):\n self.assertEqual(self.delivery1.validate_delivery(),\n 'Delivery Details are good')\n self.assertEqual(self.delivery2.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery3.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery4.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery5.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery6.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n\n def test_part_status_check(self):\n self.assertEqual(self.part_status1.\n SubmitPartForManufactureAndDelivery(), ['success',\n 'out of stock', 'no longer manufactured', 'invalid part',\n 'success', 'Invalid Part'])\n\n def test_main_function(self):\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'],\n ['2', '25']),\n 'Dealer is authorized, check the response in output.xml')\n self.assertEqual(A1.main_function([None,\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'],\n ['2', '25']), 'Invalid Input XML Response Error: in Dealer Id')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', None], [\n 'Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],\n ['1234', '5678'], ['2', '25']),\n 'Invalid Input XML Response Error: in Dealer Access Key')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], [None, '35 Streetname', 'Halifax',\n 'NS', 'B2T1A4'], ['1234', '5678'], ['2', '25']),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', None,\n 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'], ['2', '25']),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', None, 'NS', 'B2T1A4'], ['1234', '5678'], ['2',\n '25']), 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', 'Halifax', None, 'B2T1A4'], ['1234', '5678'],\n ['2', '25']),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', 'Halifax', 'NS', None], ['1234', '5678'], ['2',\n '25']), 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['0000', '5678'],\n ['2', '25']),\n 'Dealer is authorized, check the response in output.xml')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'],\n ['0', '25']), 'Invalid Input XML Response: Error in Quantity')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass test_A1(unittest.TestCase):\n\n def setUp(self):\n self.security1 = security.Security('XXX-1234-ABCD-1234', None)\n self.security2 = security.Security(None, 'kkklas8882kk23nllfjj88290')\n self.security3 = security.Security('XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290')\n self.part_check1 = part_manager.Part_Manager('1233', '2')\n self.part_check2 = part_manager.Part_Manager(None, '5')\n self.part_check3 = part_manager.Part_Manager('2222', None)\n self.delivery1 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', 'Halifax', 'NS', 'B3J2K9')\n self.delivery2 = part_manager.DeliveryAddress(None, 'South Park St',\n 'Halifax', 'NS', 'B3J2K9')\n self.delivery3 = part_manager.DeliveryAddress('Mr. Jadeja', None,\n 'Halifax', 'NS', 'B3J2K9')\n self.delivery4 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', None, 'NS', 'B3J2K9')\n self.delivery5 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', 'Halifax', None, 'B3J2K9')\n self.delivery6 = part_manager.DeliveryAddress('Mr. Jadeja',\n 'South Park St', 'Halifax', 'NS', None)\n self.auth1 = security.Security('FAKEDEALER', 'FAKEACCEESKEY')\n self.auth2 = security.Security('XXX-1111-ABCD-1111',\n 'abcd123wxyz456qwerty78901')\n self.auth3 = security.Security('XXX-2222-ABCD-2222',\n 'kkklas8882kk23nllfjj88292')\n self.part_status1 = part_manager.Part_Manager(['1234', '1111',\n '2222', '3333', '4444', 'fake_part_number'], ['1', '2', '3',\n '4', '5', '6'])\n\n def test_dealerCheck(self):\n self.assertEqual(self.security1.validate_dealer(),\n 'Invalid Input XML Response Error: in Dealer Access Key')\n self.assertEqual(self.security2.validate_dealer(),\n 'Invalid Input XML Response Error: in Dealer Id')\n self.assertEqual(self.security3.validate_dealer(),\n 'Dealer details validated')\n\n def test_dealer_auth(self):\n self.assertEqual(self.auth1.isDealerAuthorized(),\n 'dealer not authorized.')\n self.assertEqual(self.auth2.isDealerAuthorized(),\n 'dealer not authorized.')\n self.assertEqual(self.auth3.isDealerAuthorized(),\n 'dealer authenticated')\n\n def test_partsCheck(self):\n self.assertEqual(self.part_check1.validate_parts(),\n 'Part Number and Quantity are good.')\n self.assertEqual(self.part_check2.validate_parts(),\n 'Invalid Input XML Response: Error in Part number')\n self.assertEqual(self.part_check3.validate_parts(),\n 'Invalid Input XML Response: Error in Quantity')\n\n def test_delivery(self):\n self.assertEqual(self.delivery1.validate_delivery(),\n 'Delivery Details are good')\n self.assertEqual(self.delivery2.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery3.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery4.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery5.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(self.delivery6.validate_delivery(),\n 'Invalid Input XML Response: Error in Delivery Details')\n\n def test_part_status_check(self):\n self.assertEqual(self.part_status1.\n SubmitPartForManufactureAndDelivery(), ['success',\n 'out of stock', 'no longer manufactured', 'invalid part',\n 'success', 'Invalid Part'])\n\n def test_main_function(self):\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'],\n ['2', '25']),\n 'Dealer is authorized, check the response in output.xml')\n self.assertEqual(A1.main_function([None,\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'],\n ['2', '25']), 'Invalid Input XML Response Error: in Dealer Id')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', None], [\n 'Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],\n ['1234', '5678'], ['2', '25']),\n 'Invalid Input XML Response Error: in Dealer Access Key')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], [None, '35 Streetname', 'Halifax',\n 'NS', 'B2T1A4'], ['1234', '5678'], ['2', '25']),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', None,\n 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'], ['2', '25']),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', None, 'NS', 'B2T1A4'], ['1234', '5678'], ['2',\n '25']), 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', 'Halifax', None, 'B2T1A4'], ['1234', '5678'],\n ['2', '25']),\n 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', 'Halifax', 'NS', None], ['1234', '5678'], ['2',\n '25']), 'Invalid Input XML Response: Error in Delivery Details')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['0000', '5678'],\n ['2', '25']),\n 'Dealer is authorized, check the response in output.xml')\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234',\n 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith',\n '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'],\n ['0', '25']), 'Invalid Input XML Response: Error in Quantity')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nimport A1\nimport part_manager\nimport security\n\n\nclass test_A1(unittest.TestCase):\n \n# ----------------------------------- set up the mock data for test cases ----------------------------------- \n def setUp(self):\n self.security1 = security.Security(\"XXX-1234-ABCD-1234\", None)\n self.security2 = security.Security(None, \"kkklas8882kk23nllfjj88290\")\n self.security3 = security.Security(\"XXX-1234-ABCD-1234\", \"kkklas8882kk23nllfjj88290\")\n \n self.part_check1 = part_manager.Part_Manager(\"1233\", \"2\")\n self.part_check2 = part_manager.Part_Manager(None, \"5\")\n self.part_check3 = part_manager.Part_Manager(\"2222\", None)\n \n self.delivery1 = part_manager.DeliveryAddress(\"Mr. Jadeja\", \"South Park St\", \"Halifax\", \"NS\", \"B3J2K9\")\n self.delivery2 = part_manager.DeliveryAddress(None, \"South Park St\", \"Halifax\", \"NS\", \"B3J2K9\")\n self.delivery3 = part_manager.DeliveryAddress(\"Mr. Jadeja\", None, \"Halifax\", \"NS\", \"B3J2K9\")\n self.delivery4 = part_manager.DeliveryAddress(\"Mr. Jadeja\", \"South Park St\", None, \"NS\", \"B3J2K9\")\n self.delivery5 = part_manager.DeliveryAddress(\"Mr. Jadeja\", \"South Park St\", \"Halifax\", None, \"B3J2K9\")\n self.delivery6 = part_manager.DeliveryAddress(\"Mr. Jadeja\", \"South Park St\", \"Halifax\", \"NS\", None)\n \n self.auth1 = security.Security(\"FAKEDEALER\", \"FAKEACCEESKEY\")\n self.auth2 = security.Security(\"XXX-1111-ABCD-1111\", \"abcd123wxyz456qwerty78901\")\n self.auth3 = security.Security(\"XXX-2222-ABCD-2222\", \"kkklas8882kk23nllfjj88292\") \n\n self.part_status1 = part_manager.Part_Manager([\"1234\", \"1111\", \"2222\", \"3333\", \"4444\", \"fake_part_number\"], \n [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"]) \n\n \n# ----------------------------------- Class: Security ----------------------------------- \n# -----------------------------------------------------------------------------------------\n \n # ------------------------------ Method: validate_dealer ----------------------------- \n def test_dealerCheck(self):\n self.assertEqual(self.security1.validate_dealer(), \"Invalid Input XML Response Error: in Dealer Access Key\")\n self.assertEqual(self.security2.validate_dealer(), \"Invalid Input XML Response Error: in Dealer Id\")\n self.assertEqual(self.security3.validate_dealer(), \"Dealer details validated\")\n \n \n # ------------------------------ Method: isDealerAuthorized --------------------------- \n def test_dealer_auth(self):\n self.assertEqual(self.auth1.isDealerAuthorized(), \"dealer not authorized.\")\n self.assertEqual(self.auth2.isDealerAuthorized(), \"dealer not authorized.\")\n self.assertEqual(self.auth3.isDealerAuthorized(), \"dealer authenticated\")\n \n \n# ----------------------------------- Class: part_manager --------------------------------\n# ------------------------------------------------------------------------------------------\n \n # ------------------------------ Method: validate_parts ------------------------------- \n def test_partsCheck(self):\n self.assertEqual(self.part_check1.validate_parts(), \"Part Number and Quantity are good.\")\n self.assertEqual(self.part_check2.validate_parts(), \"Invalid Input XML Response: Error in Part number\")\n self.assertEqual(self.part_check3.validate_parts(), \"Invalid Input XML Response: Error in Quantity\")\n \n # ------------------------------ Method: validate_delivery ----------------------------\n def test_delivery(self):\n self.assertEqual(self.delivery1.validate_delivery(), \"Delivery Details are good\")\n self.assertEqual(self.delivery2.validate_delivery(), \"Invalid Input XML Response: Error in Delivery Details\")\n self.assertEqual(self.delivery3.validate_delivery(), \"Invalid Input XML Response: Error in Delivery Details\")\n self.assertEqual(self.delivery4.validate_delivery(), \"Invalid Input XML Response: Error in Delivery Details\")\n self.assertEqual(self.delivery5.validate_delivery(), \"Invalid Input XML Response: Error in Delivery Details\")\n self.assertEqual(self.delivery6.validate_delivery(), \"Invalid Input XML Response: Error in Delivery Details\")\n \n # ------------------------------ Method: SubmitPartForManufactureAndDelivery -----------\n def test_part_status_check(self):\n self.assertEqual(self.part_status1.SubmitPartForManufactureAndDelivery(), \n ['success', 'out of stock', 'no longer manufactured', 'invalid part', 'success', 'Invalid Part'])\n \n\n# ----------------------------------- Class: A1 -------------------------------------------\n# -------------------------------------------------------------------------------------------\n \n # ------------------------------ Method: main_function ---------------------------------\n def test_main_function(self):\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],\n ['1234', '5678'], ['2', '25']), \"Dealer is authorized, check the response in output.xml\")\n self.assertEqual(A1.main_function([None, 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'],\n ['2', '25']), \"Invalid Input XML Response Error: in Dealer Id\")\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', None], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],\n ['1234', '5678'], ['2', '25']), \"Invalid Input XML Response Error: in Dealer Access Key\")\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], [None, '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'], \n ['2', '25']), \"Invalid Input XML Response: Error in Delivery Details\")\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', None, 'Halifax', 'NS', 'B2T1A4'],\n ['1234', '5678'], ['2', '25']), \"Invalid Input XML Response: Error in Delivery Details\")\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', None, 'NS', 'B2T1A4'],\n ['1234', '5678'], ['2', '25']), \"Invalid Input XML Response: Error in Delivery Details\")\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', None, 'B2T1A4'], \n ['1234', '5678'], ['2', '25']), \"Invalid Input XML Response: Error in Delivery Details\")\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', None], \n ['1234', '5678'], ['2', '25']), \"Invalid Input XML Response: Error in Delivery Details\")\n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],\n [\"0000\", '5678'], ['2', '25']), \"Dealer is authorized, check the response in output.xml\") \n self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],\n ['1234', '5678'], ['0', '25']), \"Invalid Input XML Response: Error in Quantity\")\n\n\n \nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
"""Exercise 9c"""
import time
import numpy as np
import matplotlib.pyplot as plt
from plot_results import plot_2d
from run_simulation import run_simulation
from simulation_parameters import SimulationParameters
def exercise_9c(world, timestep, reset):
"""Exercise 9c"""
n_joints = 10
Rhead = 0.44
Rtail = 0.23
parameter_set = [
SimulationParameters(
simulation_duration=15,
drive=4.0,
amplitudes=None,
phase_lag=None,
turn=None,
amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],
backward = None,
frequency = 1,
# ...
)
#for Rhead in np.linspace(0.2,0.5,10)
#for Rtail in np.linspace(0.5,0.2,10)
# for amplitudes in ...
# for ...
]
# Grid search
for simulation_i, parameters in enumerate(parameter_set):
reset.reset()
run_simulation(
world,
parameters,
timestep,
int(1000*parameters.simulation_duration/timestep),
logs="./logs/9c/simulation_{}.npz".format(simulation_i)
)
plot_9c(parameter_set)
def main():
n_joints = 10
#Rhead = 0.44
#Rtail = 0.27
parameter_set = [
SimulationParameters(
simulation_duration=15,
drive=4.0,
amplitudes=None,
phase_lag=None,
turn=None,
amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],
backward = None,
frequency = 1,
# ...
)
for Rhead in np.linspace(0.2,0.5,10)
for Rtail in np.linspace(0.5,0.2,10)
# for amplitudes in ...
# for ...
]
plot_9c(parameter_set)
def plot_9c(parameter_set):
results_vel = np.zeros([len(parameter_set),3])
results_en = np.zeros([len(parameter_set),3])
ratio_vel_en = np.zeros([len(parameter_set),3])
sal_pos_t = []
sal_pos_t_bad = []
t = time.time()
#path = os.path.dirname(__file__)
path = 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'
print(path)
for i in range(len(parameter_set)):
with np.load(path+'/logs/9c/simulation_'+str(i)+'.npz',allow_pickle=True) as data:
#? initialisation for the computation
position = data["links"][:, 0, :]
n_steps = len(position)
timestep = float(data["timestep"])
results_vel[i][0] = data["amplitude_gradient"][0]
results_vel[i][1] = data["amplitude_gradient"][1]
results_en[i][:2] = results_vel[i][:2]
ratio_vel_en[i][:2] = results_vel[i][:2]
#! Velocity
begin_step = (int)(4/timestep)
vel = (position[n_steps-1,:] - position[begin_step,:])**2
results_vel[i][2] = np.sqrt(np.sum(vel))/((n_steps-begin_step)*timestep)
#! Energy
joint_vel = data["joints"][begin_step:,:,1]
joint_tor = data["joints"][begin_step:,:,3]
energy = joint_vel * joint_tor
results_en[i][2] = np.log10(np.mean(np.sum(energy,1)))
#! Ratio
ratio_vel_en[i][2] = results_vel[i][2]/results_en[i][2]
print ('Time elapsed for the velocity plot' + str(time.time()-t))
plt.figure("Velocity")
plot_2d(results_vel,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Velocity [m/s]'])
plt.figure("Energy")
plot_2d(results_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', '$log_{10}(Energy)$[J]'])
plt.figure("Ratio")
plot_2d(ratio_vel_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Ratio V/E $[s\cdot kg^{-1}\cdot m^{-1}]$'])
t = time.time()
plt.show()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a0284eba1a0e6c498f240068c586e7f8b79cd86c",
"index": 5782,
"step-1": "<mask token>\n\n\ndef main():\n n_joints = 10\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace\n (0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]\n plot_9c(parameter_set)\n\n\ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set), 3])\n results_en = np.zeros([len(parameter_set), 3])\n ratio_vel_en = np.zeros([len(parameter_set), 3])\n sal_pos_t = []\n sal_pos_t_bad = []\n t = time.time()\n path = (\n 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n )\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',\n allow_pickle=True) as data:\n position = data['links'][:, 0, :]\n n_steps = len(position)\n timestep = float(data['timestep'])\n results_vel[i][0] = data['amplitude_gradient'][0]\n results_vel[i][1] = data['amplitude_gradient'][1]\n results_en[i][:2] = results_vel[i][:2]\n ratio_vel_en[i][:2] = results_vel[i][:2]\n begin_step = int(4 / timestep)\n vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2\n results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -\n begin_step) * timestep)\n joint_vel = data['joints'][begin_step:, :, 1]\n joint_tor = data['joints'][begin_step:, :, 3]\n energy = joint_vel * joint_tor\n results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))\n ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]\n print('Time elapsed for the velocity plot' + str(time.time() - t))\n plt.figure('Velocity')\n plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Velocity [m/s]'])\n plt.figure('Energy')\n plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n '$log_{10}(Energy)$[J]'])\n plt.figure('Ratio')\n plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Ratio V/E $[s\\\\cdot kg^{-1}\\\\cdot m^{-1}]$'])\n t = time.time()\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef exercise_9c(world, timestep, reset):\n \"\"\"Exercise 9c\"\"\"\n n_joints = 10\n Rhead = 0.44\n Rtail = 0.23\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1)]\n for simulation_i, parameters in enumerate(parameter_set):\n reset.reset()\n run_simulation(world, parameters, timestep, int(1000 * parameters.\n simulation_duration / timestep), logs=\n './logs/9c/simulation_{}.npz'.format(simulation_i))\n plot_9c(parameter_set)\n\n\ndef main():\n n_joints = 10\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace\n (0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]\n plot_9c(parameter_set)\n\n\ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set), 3])\n results_en = np.zeros([len(parameter_set), 3])\n ratio_vel_en = np.zeros([len(parameter_set), 3])\n sal_pos_t = []\n sal_pos_t_bad = []\n t = time.time()\n path = (\n 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n )\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',\n allow_pickle=True) as data:\n position = data['links'][:, 0, :]\n n_steps = len(position)\n timestep = float(data['timestep'])\n results_vel[i][0] = data['amplitude_gradient'][0]\n results_vel[i][1] = data['amplitude_gradient'][1]\n results_en[i][:2] = results_vel[i][:2]\n ratio_vel_en[i][:2] = results_vel[i][:2]\n begin_step = int(4 / timestep)\n vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2\n results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -\n begin_step) * timestep)\n joint_vel = data['joints'][begin_step:, :, 1]\n joint_tor = data['joints'][begin_step:, :, 3]\n energy = joint_vel * joint_tor\n results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))\n ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]\n print('Time elapsed for the velocity plot' + str(time.time() - t))\n plt.figure('Velocity')\n plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Velocity [m/s]'])\n plt.figure('Energy')\n plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n '$log_{10}(Energy)$[J]'])\n plt.figure('Ratio')\n plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Ratio V/E $[s\\\\cdot kg^{-1}\\\\cdot m^{-1}]$'])\n t = time.time()\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef exercise_9c(world, timestep, reset):\n \"\"\"Exercise 9c\"\"\"\n n_joints = 10\n Rhead = 0.44\n Rtail = 0.23\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1)]\n for simulation_i, parameters in enumerate(parameter_set):\n reset.reset()\n run_simulation(world, parameters, timestep, int(1000 * parameters.\n simulation_duration / timestep), logs=\n './logs/9c/simulation_{}.npz'.format(simulation_i))\n plot_9c(parameter_set)\n\n\ndef main():\n n_joints = 10\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace\n (0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]\n plot_9c(parameter_set)\n\n\ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set), 3])\n results_en = np.zeros([len(parameter_set), 3])\n ratio_vel_en = np.zeros([len(parameter_set), 3])\n sal_pos_t = []\n sal_pos_t_bad = []\n t = time.time()\n path = (\n 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n )\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',\n allow_pickle=True) as data:\n position = data['links'][:, 0, :]\n n_steps = len(position)\n timestep = float(data['timestep'])\n results_vel[i][0] = data['amplitude_gradient'][0]\n results_vel[i][1] = data['amplitude_gradient'][1]\n results_en[i][:2] = results_vel[i][:2]\n ratio_vel_en[i][:2] = results_vel[i][:2]\n begin_step = int(4 / timestep)\n vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2\n results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -\n begin_step) * timestep)\n joint_vel = data['joints'][begin_step:, :, 1]\n joint_tor = data['joints'][begin_step:, :, 3]\n energy = joint_vel * joint_tor\n results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))\n ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]\n print('Time elapsed for the velocity plot' + str(time.time() - t))\n plt.figure('Velocity')\n plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Velocity [m/s]'])\n plt.figure('Energy')\n plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n '$log_{10}(Energy)$[J]'])\n plt.figure('Ratio')\n plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Ratio V/E $[s\\\\cdot kg^{-1}\\\\cdot m^{-1}]$'])\n t = time.time()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom plot_results import plot_2d\nfrom run_simulation import run_simulation\nfrom simulation_parameters import SimulationParameters\n\n\ndef exercise_9c(world, timestep, reset):\n \"\"\"Exercise 9c\"\"\"\n n_joints = 10\n Rhead = 0.44\n Rtail = 0.23\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1)]\n for simulation_i, parameters in enumerate(parameter_set):\n reset.reset()\n run_simulation(world, parameters, timestep, int(1000 * parameters.\n simulation_duration / timestep), logs=\n './logs/9c/simulation_{}.npz'.format(simulation_i))\n plot_9c(parameter_set)\n\n\ndef main():\n n_joints = 10\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace\n (0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]\n plot_9c(parameter_set)\n\n\ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set), 3])\n results_en = np.zeros([len(parameter_set), 3])\n ratio_vel_en = np.zeros([len(parameter_set), 3])\n sal_pos_t = []\n sal_pos_t_bad = []\n t = time.time()\n path = (\n 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n )\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',\n allow_pickle=True) as data:\n position = data['links'][:, 0, :]\n n_steps = len(position)\n timestep = float(data['timestep'])\n results_vel[i][0] = data['amplitude_gradient'][0]\n results_vel[i][1] = data['amplitude_gradient'][1]\n results_en[i][:2] = results_vel[i][:2]\n ratio_vel_en[i][:2] = results_vel[i][:2]\n begin_step = int(4 / timestep)\n vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2\n results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -\n begin_step) * timestep)\n joint_vel = data['joints'][begin_step:, :, 1]\n joint_tor = data['joints'][begin_step:, :, 3]\n energy = joint_vel * joint_tor\n results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))\n ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]\n print('Time elapsed for the velocity plot' + str(time.time() - t))\n plt.figure('Velocity')\n plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Velocity [m/s]'])\n plt.figure('Energy')\n plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n '$log_{10}(Energy)$[J]'])\n plt.figure('Ratio')\n plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Ratio V/E $[s\\\\cdot kg^{-1}\\\\cdot m^{-1}]$'])\n t = time.time()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"Exercise 9c\"\"\"\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom plot_results import plot_2d\nfrom run_simulation import run_simulation\nfrom simulation_parameters import SimulationParameters\n\ndef exercise_9c(world, timestep, reset):\n \"\"\"Exercise 9c\"\"\"\n\n n_joints = 10\n \n Rhead = 0.44\n Rtail = 0.23\n\n\n parameter_set = [\n SimulationParameters(\n simulation_duration=15,\n drive=4.0,\n amplitudes=None,\n phase_lag=None,\n turn=None,\n amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],\n backward = None,\n frequency = 1,\n # ...\n )\n \n #for Rhead in np.linspace(0.2,0.5,10)\n #for Rtail in np.linspace(0.5,0.2,10)\n # for amplitudes in ...\n # for ...\n ]\n\n \n # Grid search\n for simulation_i, parameters in enumerate(parameter_set):\n reset.reset()\n run_simulation(\n world,\n parameters,\n timestep,\n int(1000*parameters.simulation_duration/timestep),\n logs=\"./logs/9c/simulation_{}.npz\".format(simulation_i)\n )\n\n \n\n plot_9c(parameter_set)\n \n\n \n\ndef main():\n\n\n n_joints = 10\n\n #Rhead = 0.44\n #Rtail = 0.27 \n \n parameter_set = [\n SimulationParameters(\n simulation_duration=15,\n drive=4.0,\n amplitudes=None,\n phase_lag=None,\n turn=None,\n amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],\n backward = None,\n frequency = 1,\n # ...\n )\n \n for Rhead in np.linspace(0.2,0.5,10)\n for Rtail in np.linspace(0.5,0.2,10)\n # for amplitudes in ...\n # for ...\n ]\n\n plot_9c(parameter_set)\n\n \ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set),3])\n results_en = np.zeros([len(parameter_set),3])\n ratio_vel_en = np.zeros([len(parameter_set),3])\n \n \n sal_pos_t = []\n sal_pos_t_bad = []\n\n \n t = time.time()\n\n #path = os.path.dirname(__file__)\n path = 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path+'/logs/9c/simulation_'+str(i)+'.npz',allow_pickle=True) as data:\n \n #? initialisation for the computation\n position = data[\"links\"][:, 0, :]\n n_steps = len(position)\n \n timestep = float(data[\"timestep\"])\n\n results_vel[i][0] = data[\"amplitude_gradient\"][0]\n results_vel[i][1] = data[\"amplitude_gradient\"][1] \n\n results_en[i][:2] = results_vel[i][:2] \n ratio_vel_en[i][:2] = results_vel[i][:2]\n\n \n\n #! Velocity\n\n begin_step = (int)(4/timestep)\n\n vel = (position[n_steps-1,:] - position[begin_step,:])**2\n results_vel[i][2] = np.sqrt(np.sum(vel))/((n_steps-begin_step)*timestep)\n\n #! Energy\n\n joint_vel = data[\"joints\"][begin_step:,:,1]\n joint_tor = data[\"joints\"][begin_step:,:,3]\n\n energy = joint_vel * joint_tor\n \n results_en[i][2] = np.log10(np.mean(np.sum(energy,1)))\n \n #! Ratio \n\n ratio_vel_en[i][2] = results_vel[i][2]/results_en[i][2]\n \n \n print ('Time elapsed for the velocity plot' + str(time.time()-t))\n\n\n\n plt.figure(\"Velocity\")\n plot_2d(results_vel,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Velocity [m/s]'])\n plt.figure(\"Energy\")\n plot_2d(results_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', '$log_{10}(Energy)$[J]'])\n plt.figure(\"Ratio\")\n plot_2d(ratio_vel_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Ratio V/E $[s\\cdot kg^{-1}\\cdot m^{-1}]$'])\n \n t = time.time()\n \n plt.show() \n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def train(token2id, train_data, lr, batch_size, epochs, model):
dataset = DataGenerator(token2id, train_data)
dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=
my_collate)
model = to_device(model)
model_optimizer = optim.Adam(model.discriminator.parameters(), lr=lr)
criterion = nn.BCELoss()
for epoch in range(1, epochs):
print('Epoch {}'.format(epoch))
print('*' * 80)
running_loss = 0
for i, data in enumerate(dataloader):
data = to_device(data)
x, x_len, y, _ = data
predict = model(x, x_len)
loss = criterion(predict.squeeze(1), y)
model_optimizer.zero_grad()
loss.backward()
model_optimizer.step()
running_loss += loss.item()
if i % 10 == 0 and i != 0:
print('Average batch loss: {}'.format(running_loss / 10))
running_loss = 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def train(token2id, train_data, lr, batch_size, epochs, model):
dataset = DataGenerator(token2id, train_data)
dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=
my_collate)
model = to_device(model)
model_optimizer = optim.Adam(model.discriminator.parameters(), lr=lr)
criterion = nn.BCELoss()
for epoch in range(1, epochs):
print('Epoch {}'.format(epoch))
print('*' * 80)
running_loss = 0
for i, data in enumerate(dataloader):
data = to_device(data)
x, x_len, y, _ = data
predict = model(x, x_len)
loss = criterion(predict.squeeze(1), y)
model_optimizer.zero_grad()
loss.backward()
model_optimizer.step()
running_loss += loss.item()
if i % 10 == 0 and i != 0:
print('Average batch loss: {}'.format(running_loss / 10))
running_loss = 0
if __name__ == '__mian__':
pass
<|reserved_special_token_1|>
from utils import to_device
from utils import build_dictionary, my_collate
from DataGenerator import DataGenerator
from torch.utils.data import DataLoader
from torch import optim
import torch.nn as nn
from ADSentimentModel import ADSentimentModel
import torch
def train(token2id, train_data, lr, batch_size, epochs, model):
dataset = DataGenerator(token2id, train_data)
dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=
my_collate)
model = to_device(model)
model_optimizer = optim.Adam(model.discriminator.parameters(), lr=lr)
criterion = nn.BCELoss()
for epoch in range(1, epochs):
print('Epoch {}'.format(epoch))
print('*' * 80)
running_loss = 0
for i, data in enumerate(dataloader):
data = to_device(data)
x, x_len, y, _ = data
predict = model(x, x_len)
loss = criterion(predict.squeeze(1), y)
model_optimizer.zero_grad()
loss.backward()
model_optimizer.step()
running_loss += loss.item()
if i % 10 == 0 and i != 0:
print('Average batch loss: {}'.format(running_loss / 10))
running_loss = 0
if __name__ == '__mian__':
pass
<|reserved_special_token_1|>
from utils import to_device
from utils import build_dictionary,my_collate
from DataGenerator import DataGenerator
from torch.utils.data import DataLoader
from torch import optim
import torch.nn as nn
from ADSentimentModel import ADSentimentModel
import torch
def train(token2id, train_data, lr, batch_size, epochs,model):
dataset = DataGenerator(token2id, train_data)
dataloader = DataLoader(dataset,batch_size=batch_size,collate_fn=my_collate)
model = to_device(model)
model_optimizer = optim.Adam(model.discriminator.parameters(),lr=lr)
criterion = nn.BCELoss()
for epoch in range(1,epochs):
print("Epoch {}".format(epoch))
print("*"*80)
running_loss = 0
for i,data in enumerate(dataloader):
data = to_device(data)
x,x_len,y,_ = data
predict = model(x,x_len)
loss = criterion(predict.squeeze(1),y)
model_optimizer.zero_grad()
loss.backward()
model_optimizer.step()
running_loss += loss.item()
if i%10 == 0 and i != 0 :
print("Average batch loss: {}".format(running_loss/10))
running_loss = 0
if __name__ == "__mian__":
pass
|
flexible
|
{
"blob_id": "d0364b7cad29c639af9df5c78e810144ffd6ce2e",
"index": 2415,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef train(token2id, train_data, lr, batch_size, epochs, model):\n dataset = DataGenerator(token2id, train_data)\n dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=\n my_collate)\n model = to_device(model)\n model_optimizer = optim.Adam(model.discriminator.parameters(), lr=lr)\n criterion = nn.BCELoss()\n for epoch in range(1, epochs):\n print('Epoch {}'.format(epoch))\n print('*' * 80)\n running_loss = 0\n for i, data in enumerate(dataloader):\n data = to_device(data)\n x, x_len, y, _ = data\n predict = model(x, x_len)\n loss = criterion(predict.squeeze(1), y)\n model_optimizer.zero_grad()\n loss.backward()\n model_optimizer.step()\n running_loss += loss.item()\n if i % 10 == 0 and i != 0:\n print('Average batch loss: {}'.format(running_loss / 10))\n running_loss = 0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef train(token2id, train_data, lr, batch_size, epochs, model):\n dataset = DataGenerator(token2id, train_data)\n dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=\n my_collate)\n model = to_device(model)\n model_optimizer = optim.Adam(model.discriminator.parameters(), lr=lr)\n criterion = nn.BCELoss()\n for epoch in range(1, epochs):\n print('Epoch {}'.format(epoch))\n print('*' * 80)\n running_loss = 0\n for i, data in enumerate(dataloader):\n data = to_device(data)\n x, x_len, y, _ = data\n predict = model(x, x_len)\n loss = criterion(predict.squeeze(1), y)\n model_optimizer.zero_grad()\n loss.backward()\n model_optimizer.step()\n running_loss += loss.item()\n if i % 10 == 0 and i != 0:\n print('Average batch loss: {}'.format(running_loss / 10))\n running_loss = 0\n\n\nif __name__ == '__mian__':\n pass\n",
"step-4": "from utils import to_device\nfrom utils import build_dictionary, my_collate\nfrom DataGenerator import DataGenerator\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nimport torch.nn as nn\nfrom ADSentimentModel import ADSentimentModel\nimport torch\n\n\ndef train(token2id, train_data, lr, batch_size, epochs, model):\n dataset = DataGenerator(token2id, train_data)\n dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=\n my_collate)\n model = to_device(model)\n model_optimizer = optim.Adam(model.discriminator.parameters(), lr=lr)\n criterion = nn.BCELoss()\n for epoch in range(1, epochs):\n print('Epoch {}'.format(epoch))\n print('*' * 80)\n running_loss = 0\n for i, data in enumerate(dataloader):\n data = to_device(data)\n x, x_len, y, _ = data\n predict = model(x, x_len)\n loss = criterion(predict.squeeze(1), y)\n model_optimizer.zero_grad()\n loss.backward()\n model_optimizer.step()\n running_loss += loss.item()\n if i % 10 == 0 and i != 0:\n print('Average batch loss: {}'.format(running_loss / 10))\n running_loss = 0\n\n\nif __name__ == '__mian__':\n pass\n",
"step-5": "from utils import to_device\nfrom utils import build_dictionary,my_collate\nfrom DataGenerator import DataGenerator\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nimport torch.nn as nn\nfrom ADSentimentModel import ADSentimentModel\nimport torch\n\ndef train(token2id, train_data, lr, batch_size, epochs,model):\n\n dataset = DataGenerator(token2id, train_data)\n dataloader = DataLoader(dataset,batch_size=batch_size,collate_fn=my_collate)\n model = to_device(model)\n\n model_optimizer = optim.Adam(model.discriminator.parameters(),lr=lr)\n criterion = nn.BCELoss()\n\n for epoch in range(1,epochs):\n print(\"Epoch {}\".format(epoch))\n print(\"*\"*80)\n\n running_loss = 0\n for i,data in enumerate(dataloader):\n data = to_device(data)\n x,x_len,y,_ = data\n predict = model(x,x_len)\n loss = criterion(predict.squeeze(1),y)\n\n model_optimizer.zero_grad()\n loss.backward()\n model_optimizer.step()\n\n running_loss += loss.item()\n\n if i%10 == 0 and i != 0 :\n print(\"Average batch loss: {}\".format(running_loss/10))\n running_loss = 0\n\nif __name__ == \"__mian__\":\n pass\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding: utf-8
# In[5]:
import os
import numpy as np
import pandas as pd
from PIL import Image
import argparse
import time
import shutil
from sklearn.metrics import accuracy_score, mean_squared_error
import torch
import torch.optim
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.models as models
import matplotlib.image as mpimg
class ProtestDataset(Dataset):
"""
dataset for training and evaluation
"""
def __init__(self, txt_file, img_dir, transform = None):
"""
Args:
txt_file: Path to txt file with annotation
img_dir: Directory with images
transform: Optional transform to be applied on a sample.
"""
self.label_frame = pd.read_csv(txt_file, delimiter="\t").replace('-', 0)
self.img_dir = img_dir
self.transform = transform
def __len__(self):
return len(self.label_frame)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir,
self.label_frame.iloc[idx, 0])
image = pil_loader(imgpath)
protest = self.label_frame.iloc[idx, 1:2].values.astype('float')
violence = self.label_frame.iloc[idx, 2:3].values.astype('float')
visattr = self.label_frame.iloc[idx, 3:].values.astype('float')
label = {'protest':protest, 'violence':violence, 'visattr':visattr}
sample = {"image":image, "label":label}
if self.transform:
sample["image"] = self.transform(sample["image"])
return sample
class ProtestDatasetEval(Dataset):
"""
dataset for just calculating the output (does not need an annotation file)
"""
def __init__(self, img_dir):
"""
Args:
img_dir: Directory with images
"""
self.img_dir = img_dir
self.transform = transforms.Compose([
transforms.Resize(125),
transforms.CenterCrop(100),
transforms.Grayscale(num_output_channels=1), #testtest
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
self.img_list = sorted(os.listdir(img_dir))
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir,
self.img_list[idx])
image = pil_loader(imgpath)
# we need this variable to check if the image is protest or not)
sample = {"imgpath":imgpath, "image":image}
sample["image"] = self.transform(sample["image"])
return sample
class FinalLayer(nn.Module):
"""modified last layer for resnet50 for our dataset"""
def __init__(self):
super(FinalLayer, self).__init__()
self.fc = nn.Linear(2048, 12)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc(x)
out = self.sigmoid(out)
return out
def pil_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def modified_resnet():
# load pretrained resnet with a modified last fully connected layer
model = models.resnet50(pretrained = True)
model.fc = FinalLayer()
return model
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count != 0:
self.avg = self.sum / self.count
class Lighting(object):
"""
Lighting noise(AlexNet - style PCA - based noise)
https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py
"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() .mul(alpha.view(1, 3).expand(3, 3)) .mul(self.eigval.view(1, 3).expand(3, 3)) .sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
# for indexing output of the model
protest_idx = Variable(torch.LongTensor([0]))
violence_idx = Variable(torch.LongTensor([1]))
visattr_idx = Variable(torch.LongTensor(range(2,12)))
best_loss = float("inf")
def calculate_loss(output, target, criterions, weights = [1, 10, 5]):
"""Calculate loss"""
# number of protest images
N_protest = int(target['protest'].data.sum())
batch_size = len(target['protest'])
if N_protest == 0:
# if no protest image in target
outputs = [None]
# protest output
outputs[0] = output.index_select(1, protest_idx)
targets = [None]
# protest target
targets[0] = target['protest'].float()
losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(1)]
scores = {}
scores['protest_acc'] = accuracy_score((outputs[0]).data.round(), targets[0].data)
scores['violence_mse'] = 0
scores['visattr_acc'] = 0
return losses, scores, N_protest
# used for filling 0 for non-protest images
not_protest_mask = (1 - target['protest']).byte()
outputs = [None] * 4
# protest output
outputs[0] = output.index_select(1, protest_idx)
# violence output
outputs[1] = output.index_select(1, violence_idx)
outputs[1].masked_fill_(not_protest_mask, 0)
# visual attribute output
outputs[2] = output.index_select(1, visattr_idx)
outputs[2].masked_fill_(not_protest_mask.repeat(1, 10),0)
targets = [None] * 4
targets[0] = target['protest'].float()
targets[1] = target['violence'].float()
targets[2] = target['visattr'].float()
scores = {}
# protest accuracy for this batch
scores['protest_acc'] = accuracy_score(outputs[0].data.round(), targets[0].data)
# violence MSE for this batch
scores['violence_mse'] = ((outputs[1].data - targets[1].data).pow(2)).sum() / float(N_protest)
# mean accuracy for visual attribute for this batch
comparison = (outputs[2].data.round() == targets[2].data)
comparison.masked_fill_(not_protest_mask.repeat(1, 10).data,0)
n_right = comparison.float().sum()
mean_acc = n_right / float(N_protest*10)
scores['visattr_acc'] = mean_acc
# return weighted loss
losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(len(criterions))]
return losses, scores, N_protest
def train(train_loader, model, criterions, optimizer, epoch):
"""training the model"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_protest = AverageMeter()
loss_v = AverageMeter()
protest_acc = AverageMeter()
violence_mse = AverageMeter()
visattr_acc = AverageMeter()
end = time.time()
loss_history = []
for i, sample in enumerate(train_loader):
# measure data loading batch_time
input, target = sample['image'], sample['label']
data_time.update(time.time() - end)
if args.cuda:
input = input.cuda()
for k, v in target.items():
target[k] = v.cuda()
target_var = {}
for k,v in target.items():
target_var[k] = Variable(v)
input_var = Variable(input)
output = model(input_var)
losses, scores, N_protest = calculate_loss(output, target_var, criterions)
optimizer.zero_grad()
loss = 0
for l in losses:
loss += l
# back prop
loss.backward()
optimizer.step()
if N_protest:
loss_protest.update(losses[0].data, input.size(0))
loss_v.update(loss.data - losses[0].data, N_protest)
else:
# when there is no protest image in the batch
loss_protest.update(losses[0].data, input.size(0))
loss_history.append(loss.data)
protest_acc.update(scores['protest_acc'], input.size(0))
violence_mse.update(scores['violence_mse'], N_protest)
visattr_acc.update(scores['visattr_acc'], N_protest)
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}] '
'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '
'Data {data_time.val:.2f} ({data_time.avg:.2f}) '
'Loss {loss_val:.3f} ({loss_avg:.3f}) '
'Protest {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '
'Violence {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '
'Vis Attr {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'
.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time,
loss_val=loss_protest.val + loss_v.val,
loss_avg = loss_protest.avg + loss_v.avg,
protest_acc = protest_acc, violence_mse = violence_mse,
visattr_acc = visattr_acc))
return loss_history
def validate(val_loader, model, criterions, epoch):
"""Validating"""
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_protest = AverageMeter()
loss_v = AverageMeter()
protest_acc = AverageMeter()
violence_mse = AverageMeter()
visattr_acc = AverageMeter()
end = time.time()
loss_history = []
for i, sample in enumerate(val_loader):
# measure data loading batch_time
input, target = sample['image'], sample['label']
if args.cuda:
input = input.cuda()
for k, v in target.items():
target[k] = v.cuda()
input_var = Variable(input)
target_var = {}
for k,v in target.items():
target_var[k] = Variable(v)
output = model(input_var)
losses, scores, N_protest = calculate_loss(output, target_var, criterions)
loss = 0
for l in losses:
loss += l
if N_protest:
loss_protest.update(losses[0].data, input.size(0))
loss_v.update(loss.data - losses[0].data, N_protest)
else:
# when no protest images
loss_protest.update(losses[0].data, input.size(0))
loss_history.append(loss.data)
protest_acc.update(scores['protest_acc'], input.size(0))
violence_mse.update(scores['violence_mse'], N_protest)
visattr_acc.update(scores['visattr_acc'], N_protest)
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '
'Loss {loss_val:.3f} ({loss_avg:.3f}) '
'Protest Acc {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '
'Violence MSE {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '
'Vis Attr Acc {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'
.format(
epoch, i, len(val_loader), batch_time=batch_time,
loss_val =loss_protest.val + loss_v.val,
loss_avg = loss_protest.avg + loss_v.avg,
protest_acc = protest_acc,
violence_mse = violence_mse, visattr_acc = visattr_acc))
print(' * Loss {loss_avg:.3f} Protest Acc {protest_acc.avg:.3f} '
'Violence MSE {violence_mse.avg:.5f} '
'Vis Attr Acc {visattr_acc.avg:.3f} '
.format(loss_avg = loss_protest.avg + loss_v.avg,
protest_acc = protest_acc,
violence_mse = violence_mse, visattr_acc = visattr_acc))
return loss_protest.avg + loss_v.avg, loss_history
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 0.5 every 5 epochs"""
lr = args.lr * (0.4 ** (epoch // 4))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""Save checkpoints"""
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def main():
global best_loss
loss_history_train = []
loss_history_val = []
data_dir = args.data_dir
img_dir_train = os.path.join(data_dir, "train")
img_dir_val = os.path.join(data_dir, "test")
txt_file_train = os.path.join(data_dir, "annot_train.txt")
txt_file_val = os.path.join(data_dir, "annot_test.txt")
# load pretrained resnet50 with a modified last fully connected layer
model = modified_resnet()
# we need three different criterion for training
criterion_protest = nn.BCELoss()
criterion_violence = nn.MSELoss()
criterion_visattr = nn.BCELoss()
criterions = [criterion_protest, criterion_violence, criterion_visattr]
if args.cuda and not torch.cuda.is_available():
raise Exception("No GPU Found")
if args.cuda:
model = model.cuda()
criterions = [criterion.cuda() for criterion in criterions]
# we are not training the frozen layers
parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.SGD(
parameters, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay
)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
loss_history_train = checkpoint['loss_history_train']
loss_history_val = checkpoint['loss_history_val']
if args.change_lr:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
else:
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
eigval = torch.Tensor([0.2175, 0.0188, 0.0045])
eigvec = torch.Tensor([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
train_dataset = ProtestDataset(
txt_file = txt_file_train,
img_dir = img_dir_train,
transform = transforms.Compose([
transforms.RandomResizedCrop(100),
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness = 0.4,
contrast = 0.7,
saturation = 0.4,
),
transforms.ToTensor(),
Lighting(0.1, eigval, eigvec),
normalize,
]))
val_dataset = ProtestDataset(
txt_file = txt_file_val,
img_dir = img_dir_val,
transform = transforms.Compose([
transforms.Resize(125),
transforms.CenterCrop(100),
transforms.ToTensor(),
normalize,
]))
train_loader = DataLoader(
train_dataset,
num_workers = args.workers,
batch_size = args.batch_size,
shuffle = True
)
val_loader = DataLoader(
val_dataset,
num_workers = args.workers,
batch_size = args.batch_size)
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
loss_history_train_this = train(train_loader, model, criterions,
optimizer, epoch)
loss_val, loss_history_val_this = validate(val_loader, model,
criterions, epoch)
loss_history_train.append(loss_history_train_this)
loss_history_val.append(loss_history_val_this)
is_best = loss_val < best_loss
if is_best:
print('best model!!')
best_loss = min(loss_val, best_loss)
save_checkpoint({
'epoch' : epoch + 1,
'state_dict' : model.state_dict(),
'best_loss' : best_loss,
'optimizer' : optimizer.state_dict(),
'loss_history_train': loss_history_train,
'loss_history_val': loss_history_val
}, is_best)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir",
type=str,
default = "",
help = "directory path to dataset",
)
parser.add_argument("--cuda",
action = "store_true",
help = "use cuda?",
)
parser.add_argument("--workers",
type = int,
default = 0,
help = "number of workers",
)
parser.add_argument("--batch_size",
type = int,
default = 8,
help = "batch size",
)
parser.add_argument("--epochs",
type = int,
default = 10,
help = "number of epochs",
)
parser.add_argument("--weight_decay",
type = float,
default = 1e-4,
help = "weight decay",
)
parser.add_argument("--lr",
type = float,
default = 0.01,
help = "learning rate",
)
parser.add_argument("--momentum",
type = float,
default = 0.9,
help = "momentum",
)
parser.add_argument("--print_freq",
type = int,
default = 10,
help = "print frequency",
)
parser.add_argument('--resume',
default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--change_lr',
action = "store_true",
help = "Use this if you want to \
change learning rate when resuming")
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
args, unknown = parser.parse_known_args()
if args.cuda:
protest_idx = protest_idx.cuda()
violence_idx = violence_idx.cuda()
visattr_idx = visattr_idx.cuda()
main()
|
normal
|
{
"blob_id": "f3a3746c48617754aad5ae8d0d7a0b8908c34562",
"index": 7852,
"step-1": "<mask token>\n\n\nclass ProtestDataset(Dataset):\n <mask token>\n <mask token>\n\n def __len__(self):\n return len(self.label_frame)\n <mask token>\n\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([transforms.Resize(125),\n transforms.CenterCrop(100), transforms.Grayscale(\n num_output_channels=1), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.img_list = sorted(os.listdir(img_dir))\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.img_list[idx])\n image = pil_loader(imgpath)\n sample = {'imgpath': imgpath, 'image': image}\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\n<mask token>\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n\n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(\n 3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProtestDataset(Dataset):\n <mask token>\n <mask token>\n\n def __len__(self):\n return len(self.label_frame)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.label_frame.iloc[idx, 0])\n image = pil_loader(imgpath)\n protest = self.label_frame.iloc[idx, 1:2].values.astype('float')\n violence = self.label_frame.iloc[idx, 2:3].values.astype('float')\n visattr = self.label_frame.iloc[idx, 3:].values.astype('float')\n label = {'protest': protest, 'violence': violence, 'visattr': visattr}\n sample = {'image': image, 'label': label}\n if self.transform:\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([transforms.Resize(125),\n transforms.CenterCrop(100), transforms.Grayscale(\n num_output_channels=1), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.img_list = sorted(os.listdir(img_dir))\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.img_list[idx])\n image = pil_loader(imgpath)\n sample = {'imgpath': imgpath, 'image': image}\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\n<mask token>\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n\n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(\n 3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ProtestDataset(Dataset):\n <mask token>\n\n def __init__(self, txt_file, img_dir, transform=None):\n \"\"\"\n Args:\n txt_file: Path to txt file with annotation\n img_dir: Directory with images\n transform: Optional transform to be applied on a sample.\n \"\"\"\n self.label_frame = pd.read_csv(txt_file, delimiter='\\t').replace('-', 0\n )\n self.img_dir = img_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.label_frame)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.label_frame.iloc[idx, 0])\n image = pil_loader(imgpath)\n protest = self.label_frame.iloc[idx, 1:2].values.astype('float')\n violence = self.label_frame.iloc[idx, 2:3].values.astype('float')\n visattr = self.label_frame.iloc[idx, 3:].values.astype('float')\n label = {'protest': protest, 'violence': violence, 'visattr': visattr}\n sample = {'image': image, 'label': label}\n if self.transform:\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([transforms.Resize(125),\n transforms.CenterCrop(100), transforms.Grayscale(\n num_output_channels=1), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.img_list = sorted(os.listdir(img_dir))\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.img_list[idx])\n image = pil_loader(imgpath)\n sample = {'imgpath': imgpath, 'image': image}\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\n<mask token>\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n\n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(\n 3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ProtestDataset(Dataset):\n \"\"\"\n dataset for training and evaluation\n \"\"\"\n\n def __init__(self, txt_file, img_dir, transform=None):\n \"\"\"\n Args:\n txt_file: Path to txt file with annotation\n img_dir: Directory with images\n transform: Optional transform to be applied on a sample.\n \"\"\"\n self.label_frame = pd.read_csv(txt_file, delimiter='\\t').replace('-', 0\n )\n self.img_dir = img_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.label_frame)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.label_frame.iloc[idx, 0])\n image = pil_loader(imgpath)\n protest = self.label_frame.iloc[idx, 1:2].values.astype('float')\n violence = self.label_frame.iloc[idx, 2:3].values.astype('float')\n visattr = self.label_frame.iloc[idx, 3:].values.astype('float')\n label = {'protest': protest, 'violence': violence, 'visattr': visattr}\n sample = {'image': image, 'label': label}\n if self.transform:\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([transforms.Resize(125),\n transforms.CenterCrop(100), transforms.Grayscale(\n num_output_channels=1), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.img_list = sorted(os.listdir(img_dir))\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.img_list[idx])\n image = pil_loader(imgpath)\n sample = {'imgpath': imgpath, 'image': image}\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\n<mask token>\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n\n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(\n 3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n\n<mask token>\n\n\ndef train(train_loader, model, criterions, optimizer, epoch):\n \"\"\"training the model\"\"\"\n model.train()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_protest = AverageMeter()\n loss_v = AverageMeter()\n protest_acc = AverageMeter()\n violence_mse = AverageMeter()\n visattr_acc = AverageMeter()\n end = time.time()\n loss_history = []\n for i, sample in enumerate(train_loader):\n input, target = sample['image'], sample['label']\n data_time.update(time.time() - end)\n if args.cuda:\n input = input.cuda()\n for k, v in target.items():\n target[k] = v.cuda()\n target_var = {}\n for k, v in target.items():\n target_var[k] = Variable(v)\n input_var = Variable(input)\n output = model(input_var)\n losses, scores, N_protest = calculate_loss(output, target_var,\n criterions)\n optimizer.zero_grad()\n loss = 0\n for l in losses:\n loss += l\n loss.backward()\n optimizer.step()\n if N_protest:\n loss_protest.update(losses[0].data, input.size(0))\n loss_v.update(loss.data - losses[0].data, N_protest)\n else:\n loss_protest.update(losses[0].data, input.size(0))\n loss_history.append(loss.data)\n protest_acc.update(scores['protest_acc'], input.size(0))\n violence_mse.update(scores['violence_mse'], N_protest)\n visattr_acc.update(scores['visattr_acc'], N_protest)\n batch_time.update(time.time() - end)\n end = time.time()\n if i % args.print_freq == 0:\n print(\n 'Epoch: [{0}][{1}/{2}] Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f}) Loss {loss_val:.3f} ({loss_avg:.3f}) Protest {protest_acc.val:.3f} ({protest_acc.avg:.3f}) Violence {violence_mse.val:.5f} ({violence_mse.avg:.5f}) Vis Attr {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'\n .format(epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss_val=loss_protest.val + loss_v.val,\n loss_avg=loss_protest.avg + loss_v.avg, protest_acc=\n protest_acc, violence_mse=violence_mse, visattr_acc=\n visattr_acc))\n return loss_history\n\n\ndef validate(val_loader, model, criterions, epoch):\n \"\"\"Validating\"\"\"\n model.eval()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_protest = AverageMeter()\n loss_v = AverageMeter()\n protest_acc = AverageMeter()\n violence_mse = AverageMeter()\n visattr_acc = AverageMeter()\n end = time.time()\n loss_history = []\n for i, sample in enumerate(val_loader):\n input, target = sample['image'], sample['label']\n if args.cuda:\n input = input.cuda()\n for k, v in target.items():\n target[k] = v.cuda()\n input_var = Variable(input)\n target_var = {}\n for k, v in target.items():\n target_var[k] = Variable(v)\n output = model(input_var)\n losses, scores, N_protest = calculate_loss(output, target_var,\n criterions)\n loss = 0\n for l in losses:\n loss += l\n if N_protest:\n loss_protest.update(losses[0].data, input.size(0))\n loss_v.update(loss.data - losses[0].data, N_protest)\n else:\n loss_protest.update(losses[0].data, input.size(0))\n loss_history.append(loss.data)\n protest_acc.update(scores['protest_acc'], input.size(0))\n violence_mse.update(scores['violence_mse'], N_protest)\n visattr_acc.update(scores['visattr_acc'], N_protest)\n batch_time.update(time.time() - end)\n end = time.time()\n if i % args.print_freq == 0:\n print(\n 'Epoch: [{0}][{1}/{2}]\\tTime {batch_time.val:.2f} ({batch_time.avg:.2f}) Loss {loss_val:.3f} ({loss_avg:.3f}) Protest Acc {protest_acc.val:.3f} ({protest_acc.avg:.3f}) Violence MSE {violence_mse.val:.5f} ({violence_mse.avg:.5f}) Vis Attr Acc {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'\n .format(epoch, i, len(val_loader), batch_time=batch_time,\n loss_val=loss_protest.val + loss_v.val, loss_avg=\n loss_protest.avg + loss_v.avg, protest_acc=protest_acc,\n violence_mse=violence_mse, visattr_acc=visattr_acc))\n print(\n ' * Loss {loss_avg:.3f} Protest Acc {protest_acc.avg:.3f} Violence MSE {violence_mse.avg:.5f} Vis Attr Acc {visattr_acc.avg:.3f} '\n .format(loss_avg=loss_protest.avg + loss_v.avg, protest_acc=\n protest_acc, violence_mse=violence_mse, visattr_acc=visattr_acc))\n return loss_protest.avg + loss_v.avg, loss_history\n\n\n<mask token>\n",
"step-5": "\n# coding: utf-8\n\n# In[5]:\n\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport argparse\nimport time\nimport shutil\nfrom sklearn.metrics import accuracy_score, mean_squared_error\n\nimport torch\nimport torch.optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport torchvision.models as models\nimport matplotlib.image as mpimg\n\nclass ProtestDataset(Dataset):\n \"\"\"\n dataset for training and evaluation\n \"\"\"\n def __init__(self, txt_file, img_dir, transform = None):\n \"\"\"\n Args:\n txt_file: Path to txt file with annotation\n img_dir: Directory with images\n transform: Optional transform to be applied on a sample.\n \"\"\"\n self.label_frame = pd.read_csv(txt_file, delimiter=\"\\t\").replace('-', 0)\n self.img_dir = img_dir\n self.transform = transform\n def __len__(self):\n return len(self.label_frame)\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir,\n self.label_frame.iloc[idx, 0])\n image = pil_loader(imgpath)\n \n protest = self.label_frame.iloc[idx, 1:2].values.astype('float')\n violence = self.label_frame.iloc[idx, 2:3].values.astype('float')\n visattr = self.label_frame.iloc[idx, 3:].values.astype('float')\n label = {'protest':protest, 'violence':violence, 'visattr':visattr}\n\n sample = {\"image\":image, \"label\":label}\n if self.transform:\n sample[\"image\"] = self.transform(sample[\"image\"])\n return sample\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([\n transforms.Resize(125),\n transforms.CenterCrop(100),\n transforms.Grayscale(num_output_channels=1), #testtest\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ])\n self.img_list = sorted(os.listdir(img_dir))\n def __len__(self):\n return len(self.img_list)\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir,\n self.img_list[idx])\n image = pil_loader(imgpath)\n # we need this variable to check if the image is protest or not)\n sample = {\"imgpath\":imgpath, \"image\":image}\n sample[\"image\"] = self.transform(sample[\"image\"])\n return sample\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\ndef pil_loader(path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\ndef modified_resnet():\n # load pretrained resnet with a modified last fully connected layer\n model = models.resnet50(pretrained = True)\n model.fc = FinalLayer()\n return model\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n \n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone() .mul(alpha.view(1, 3).expand(3, 3)) .mul(self.eigval.view(1, 3).expand(3, 3)) .sum(1).squeeze()\n\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n# for indexing output of the model\nprotest_idx = Variable(torch.LongTensor([0]))\nviolence_idx = Variable(torch.LongTensor([1]))\nvisattr_idx = Variable(torch.LongTensor(range(2,12)))\nbest_loss = float(\"inf\")\n\ndef calculate_loss(output, target, criterions, weights = [1, 10, 5]):\n \"\"\"Calculate loss\"\"\"\n # number of protest images\n N_protest = int(target['protest'].data.sum())\n batch_size = len(target['protest'])\n\n if N_protest == 0:\n # if no protest image in target\n outputs = [None]\n # protest output\n outputs[0] = output.index_select(1, protest_idx)\n targets = [None]\n # protest target\n targets[0] = target['protest'].float()\n losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(1)]\n scores = {}\n scores['protest_acc'] = accuracy_score((outputs[0]).data.round(), targets[0].data)\n scores['violence_mse'] = 0\n scores['visattr_acc'] = 0\n return losses, scores, N_protest\n\n # used for filling 0 for non-protest images\n not_protest_mask = (1 - target['protest']).byte()\n\n outputs = [None] * 4\n # protest output\n outputs[0] = output.index_select(1, protest_idx)\n # violence output\n outputs[1] = output.index_select(1, violence_idx)\n outputs[1].masked_fill_(not_protest_mask, 0)\n # visual attribute output\n outputs[2] = output.index_select(1, visattr_idx)\n outputs[2].masked_fill_(not_protest_mask.repeat(1, 10),0)\n\n\n targets = [None] * 4\n\n targets[0] = target['protest'].float()\n targets[1] = target['violence'].float()\n targets[2] = target['visattr'].float()\n\n scores = {}\n # protest accuracy for this batch\n scores['protest_acc'] = accuracy_score(outputs[0].data.round(), targets[0].data)\n # violence MSE for this batch\n scores['violence_mse'] = ((outputs[1].data - targets[1].data).pow(2)).sum() / float(N_protest)\n # mean accuracy for visual attribute for this batch\n comparison = (outputs[2].data.round() == targets[2].data)\n comparison.masked_fill_(not_protest_mask.repeat(1, 10).data,0)\n n_right = comparison.float().sum()\n mean_acc = n_right / float(N_protest*10)\n scores['visattr_acc'] = mean_acc\n\n # return weighted loss\n losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(len(criterions))]\n\n return losses, scores, N_protest\n\n\n\ndef train(train_loader, model, criterions, optimizer, epoch):\n \"\"\"training the model\"\"\"\n\n model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_protest = AverageMeter()\n loss_v = AverageMeter()\n protest_acc = AverageMeter()\n violence_mse = AverageMeter()\n visattr_acc = AverageMeter()\n\n end = time.time()\n loss_history = []\n for i, sample in enumerate(train_loader):\n # measure data loading batch_time\n input, target = sample['image'], sample['label']\n data_time.update(time.time() - end)\n\n if args.cuda:\n input = input.cuda()\n for k, v in target.items():\n target[k] = v.cuda()\n target_var = {}\n for k,v in target.items():\n target_var[k] = Variable(v)\n\n input_var = Variable(input)\n output = model(input_var)\n\n losses, scores, N_protest = calculate_loss(output, target_var, criterions)\n\n optimizer.zero_grad()\n loss = 0\n for l in losses:\n loss += l\n # back prop\n loss.backward()\n optimizer.step()\n \n if N_protest:\n loss_protest.update(losses[0].data, input.size(0))\n loss_v.update(loss.data - losses[0].data, N_protest)\n else:\n # when there is no protest image in the batch\n loss_protest.update(losses[0].data, input.size(0))\n loss_history.append(loss.data)\n protest_acc.update(scores['protest_acc'], input.size(0))\n violence_mse.update(scores['violence_mse'], N_protest)\n visattr_acc.update(scores['visattr_acc'], N_protest)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}] '\n 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '\n 'Data {data_time.val:.2f} ({data_time.avg:.2f}) '\n 'Loss {loss_val:.3f} ({loss_avg:.3f}) '\n 'Protest {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '\n 'Violence {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '\n 'Vis Attr {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'\n .format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time,\n loss_val=loss_protest.val + loss_v.val,\n loss_avg = loss_protest.avg + loss_v.avg,\n protest_acc = protest_acc, violence_mse = violence_mse,\n visattr_acc = visattr_acc))\n\n return loss_history\n\ndef validate(val_loader, model, criterions, epoch):\n \"\"\"Validating\"\"\"\n model.eval()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_protest = AverageMeter()\n loss_v = AverageMeter()\n protest_acc = AverageMeter()\n violence_mse = AverageMeter()\n visattr_acc = AverageMeter()\n\n end = time.time()\n loss_history = []\n for i, sample in enumerate(val_loader):\n # measure data loading batch_time\n input, target = sample['image'], sample['label']\n\n if args.cuda:\n input = input.cuda()\n for k, v in target.items():\n target[k] = v.cuda()\n input_var = Variable(input)\n\n target_var = {}\n for k,v in target.items():\n target_var[k] = Variable(v)\n\n output = model(input_var)\n\n losses, scores, N_protest = calculate_loss(output, target_var, criterions)\n loss = 0\n for l in losses:\n loss += l\n\n if N_protest:\n loss_protest.update(losses[0].data, input.size(0))\n loss_v.update(loss.data - losses[0].data, N_protest)\n else:\n # when no protest images\n loss_protest.update(losses[0].data, input.size(0))\n loss_history.append(loss.data)\n protest_acc.update(scores['protest_acc'], input.size(0))\n violence_mse.update(scores['violence_mse'], N_protest)\n visattr_acc.update(scores['visattr_acc'], N_protest)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '\n 'Loss {loss_val:.3f} ({loss_avg:.3f}) '\n 'Protest Acc {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '\n 'Violence MSE {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '\n 'Vis Attr Acc {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'\n .format(\n epoch, i, len(val_loader), batch_time=batch_time,\n loss_val =loss_protest.val + loss_v.val,\n loss_avg = loss_protest.avg + loss_v.avg,\n protest_acc = protest_acc,\n violence_mse = violence_mse, visattr_acc = visattr_acc))\n\n print(' * Loss {loss_avg:.3f} Protest Acc {protest_acc.avg:.3f} '\n 'Violence MSE {violence_mse.avg:.5f} '\n 'Vis Attr Acc {visattr_acc.avg:.3f} '\n .format(loss_avg = loss_protest.avg + loss_v.avg,\n protest_acc = protest_acc,\n violence_mse = violence_mse, visattr_acc = visattr_acc))\n return loss_protest.avg + loss_v.avg, loss_history\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 0.5 every 5 epochs\"\"\"\n lr = args.lr * (0.4 ** (epoch // 4))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n \"\"\"Save checkpoints\"\"\"\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'model_best.pth.tar')\n\n\ndef main():\n global best_loss\n loss_history_train = []\n loss_history_val = []\n data_dir = args.data_dir\n img_dir_train = os.path.join(data_dir, \"train\")\n img_dir_val = os.path.join(data_dir, \"test\")\n txt_file_train = os.path.join(data_dir, \"annot_train.txt\")\n txt_file_val = os.path.join(data_dir, \"annot_test.txt\")\n\n # load pretrained resnet50 with a modified last fully connected layer\n model = modified_resnet()\n\n # we need three different criterion for training\n criterion_protest = nn.BCELoss()\n criterion_violence = nn.MSELoss()\n criterion_visattr = nn.BCELoss()\n criterions = [criterion_protest, criterion_violence, criterion_visattr]\n\n if args.cuda and not torch.cuda.is_available():\n raise Exception(\"No GPU Found\")\n if args.cuda:\n model = model.cuda()\n criterions = [criterion.cuda() for criterion in criterions]\n # we are not training the frozen layers\n parameters = filter(lambda p: p.requires_grad, model.parameters())\n\n optimizer = torch.optim.SGD(\n parameters, args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay\n )\n\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_loss = checkpoint['best_loss']\n args.start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n loss_history_train = checkpoint['loss_history_train']\n loss_history_val = checkpoint['loss_history_val']\n if args.change_lr:\n for param_group in optimizer.param_groups:\n param_group['lr'] = args.lr\n else:\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n eigval = torch.Tensor([0.2175, 0.0188, 0.0045])\n eigvec = torch.Tensor([[-0.5675, 0.7192, 0.4009],\n [-0.5808, -0.0045, -0.8140],\n [-0.5836, -0.6948, 0.4203]])\n\n train_dataset = ProtestDataset(\n txt_file = txt_file_train,\n img_dir = img_dir_train,\n transform = transforms.Compose([\n transforms.RandomResizedCrop(100),\n transforms.RandomRotation(30),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(\n brightness = 0.4,\n contrast = 0.7,\n saturation = 0.4,\n ),\n transforms.ToTensor(),\n Lighting(0.1, eigval, eigvec),\n normalize,\n ]))\n val_dataset = ProtestDataset(\n txt_file = txt_file_val,\n img_dir = img_dir_val,\n transform = transforms.Compose([\n transforms.Resize(125),\n transforms.CenterCrop(100),\n transforms.ToTensor(),\n normalize,\n ]))\n train_loader = DataLoader(\n train_dataset,\n num_workers = args.workers,\n batch_size = args.batch_size,\n shuffle = True\n )\n val_loader = DataLoader(\n val_dataset,\n num_workers = args.workers,\n batch_size = args.batch_size)\n\n for epoch in range(args.start_epoch, args.epochs):\n adjust_learning_rate(optimizer, epoch)\n loss_history_train_this = train(train_loader, model, criterions,\n optimizer, epoch)\n loss_val, loss_history_val_this = validate(val_loader, model,\n criterions, epoch)\n loss_history_train.append(loss_history_train_this)\n loss_history_val.append(loss_history_val_this)\n\n is_best = loss_val < best_loss\n if is_best:\n print('best model!!')\n best_loss = min(loss_val, best_loss)\n\n\n save_checkpoint({\n 'epoch' : epoch + 1,\n 'state_dict' : model.state_dict(),\n 'best_loss' : best_loss,\n 'optimizer' : optimizer.state_dict(),\n 'loss_history_train': loss_history_train,\n 'loss_history_val': loss_history_val\n }, is_best)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data_dir\",\n type=str,\n default = \"\",\n help = \"directory path to dataset\",\n )\n parser.add_argument(\"--cuda\",\n action = \"store_true\",\n help = \"use cuda?\",\n )\n parser.add_argument(\"--workers\",\n type = int,\n default = 0,\n help = \"number of workers\",\n )\n parser.add_argument(\"--batch_size\",\n type = int,\n default = 8,\n help = \"batch size\",\n )\n parser.add_argument(\"--epochs\",\n type = int,\n default = 10,\n help = \"number of epochs\",\n )\n parser.add_argument(\"--weight_decay\",\n type = float,\n default = 1e-4,\n help = \"weight decay\",\n )\n parser.add_argument(\"--lr\",\n type = float,\n default = 0.01,\n help = \"learning rate\",\n )\n parser.add_argument(\"--momentum\",\n type = float,\n default = 0.9,\n help = \"momentum\",\n )\n parser.add_argument(\"--print_freq\",\n type = int,\n default = 10,\n help = \"print frequency\",\n )\n parser.add_argument('--resume',\n default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n parser.add_argument('--change_lr',\n action = \"store_true\",\n help = \"Use this if you want to \\\n change learning rate when resuming\")\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\n args, unknown = parser.parse_known_args()\n\n if args.cuda:\n protest_idx = protest_idx.cuda()\n violence_idx = violence_idx.cuda()\n visattr_idx = visattr_idx.cuda()\n\n\n main()\n\n",
"step-ids": [
20,
21,
22,
25,
35
]
}
|
[
20,
21,
22,
25,
35
] |
import sys
from Decks.Virtual_World.vw_sets import *
from tools import *
hand_3playable_hts = ["Nibiru, the Primal Being", "Effect Veiler", "Fantastical Dragon Phantazmay", "Dragon Buster Destruction Sword", "Dragon Buster Destruction Sword"]
hand_2playable_hts = ["Nibiru, the Primal Being", "Nibiru, the Primal Being", "Fantastical Dragon Phantazmay", "Fantastical Dragon Phantazmay", "Dragon Buster Destruction Sword"]
hand_3lvl3vw = ["Virtual World Mai-Hime - Lulu", "Virtual World Xiezhi - Jiji", "Virtual World Xiezhi - Jiji", "Virtual World Kirin - Lili", "Virtual World Roshi - Laolao"]
def test_playable_hts_in_hand():
assert playable_hts_in_hand(hand_3playable_hts) == 3
assert playable_hts_in_hand(hand_2playable_hts) == 2
def test_cards_of_set_in_hand():
assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3
|
normal
|
{
"blob_id": "43179b8b096836758271a791b4aacb7bbe398ea9",
"index": 1807,
"step-1": "<mask token>\n\n\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\n\ndef test_cards_of_set_in_hand():\n assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3\n",
"step-3": "<mask token>\nhand_3playable_hts = ['Nibiru, the Primal Being', 'Effect Veiler',\n 'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword',\n 'Dragon Buster Destruction Sword']\nhand_2playable_hts = ['Nibiru, the Primal Being',\n 'Nibiru, the Primal Being', 'Fantastical Dragon Phantazmay',\n 'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword']\nhand_3lvl3vw = ['Virtual World Mai-Hime - Lulu',\n 'Virtual World Xiezhi - Jiji', 'Virtual World Xiezhi - Jiji',\n 'Virtual World Kirin - Lili', 'Virtual World Roshi - Laolao']\n\n\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\n\ndef test_cards_of_set_in_hand():\n assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3\n",
"step-4": "import sys\nfrom Decks.Virtual_World.vw_sets import *\nfrom tools import *\nhand_3playable_hts = ['Nibiru, the Primal Being', 'Effect Veiler',\n 'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword',\n 'Dragon Buster Destruction Sword']\nhand_2playable_hts = ['Nibiru, the Primal Being',\n 'Nibiru, the Primal Being', 'Fantastical Dragon Phantazmay',\n 'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword']\nhand_3lvl3vw = ['Virtual World Mai-Hime - Lulu',\n 'Virtual World Xiezhi - Jiji', 'Virtual World Xiezhi - Jiji',\n 'Virtual World Kirin - Lili', 'Virtual World Roshi - Laolao']\n\n\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\n\ndef test_cards_of_set_in_hand():\n assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3\n",
"step-5": "import sys\nfrom Decks.Virtual_World.vw_sets import *\nfrom tools import *\n\nhand_3playable_hts = [\"Nibiru, the Primal Being\", \"Effect Veiler\", \"Fantastical Dragon Phantazmay\", \"Dragon Buster Destruction Sword\", \"Dragon Buster Destruction Sword\"]\nhand_2playable_hts = [\"Nibiru, the Primal Being\", \"Nibiru, the Primal Being\", \"Fantastical Dragon Phantazmay\", \"Fantastical Dragon Phantazmay\", \"Dragon Buster Destruction Sword\"]\nhand_3lvl3vw = [\"Virtual World Mai-Hime - Lulu\", \"Virtual World Xiezhi - Jiji\", \"Virtual World Xiezhi - Jiji\", \"Virtual World Kirin - Lili\", \"Virtual World Roshi - Laolao\"]\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\ndef test_cards_of_set_in_hand():\n assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def generate_colour_data(width, height, imagiry_data, pixel2coord):
"""Extract color data from the .tiff file """
for i in range(1, height):
for j in range(1, width):
colour_data.append([pixel2coord(j, i)[0], pixel2coord(j, i)[1],
imagiry_data.read([1])[0][i - 1][j - 1]])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_colour_data(width, height, imagiry_data, pixel2coord):
"""Extract color data from the .tiff file """
for i in range(1, height):
for j in range(1, width):
colour_data.append([pixel2coord(j, i)[0], pixel2coord(j, i)[1],
imagiry_data.read([1])[0][i - 1][j - 1]])
with rio.open(
'C:\\Users\\user.DESKTOP-OMQ89VA\\Documents\\USGS-LIDAR-\\data\\iowa.tif'
) as imagery_data:
T0 = imagery_data.transform
T1 = T0 * Affine.translation(0.5, 0.5)
pixel2coord = lambda c, r: (c, r) * T1
width = imagery_data.width
height = imagery_data.height
generate_colour_data(width, height, imagery_data, pixel2coord)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
colour_data = []
def generate_colour_data(width, height, imagiry_data, pixel2coord):
"""Extract color data from the .tiff file """
for i in range(1, height):
for j in range(1, width):
colour_data.append([pixel2coord(j, i)[0], pixel2coord(j, i)[1],
imagiry_data.read([1])[0][i - 1][j - 1]])
with rio.open(
'C:\\Users\\user.DESKTOP-OMQ89VA\\Documents\\USGS-LIDAR-\\data\\iowa.tif'
) as imagery_data:
T0 = imagery_data.transform
T1 = T0 * Affine.translation(0.5, 0.5)
pixel2coord = lambda c, r: (c, r) * T1
width = imagery_data.width
height = imagery_data.height
generate_colour_data(width, height, imagery_data, pixel2coord)
<|reserved_special_token_1|>
import rasterio as rio
from affine import Affine
colour_data = []
def generate_colour_data(width, height, imagiry_data, pixel2coord):
"""Extract color data from the .tiff file """
for i in range(1, height):
for j in range(1, width):
colour_data.append([pixel2coord(j, i)[0], pixel2coord(j, i)[1],
imagiry_data.read([1])[0][i - 1][j - 1]])
with rio.open(
'C:\\Users\\user.DESKTOP-OMQ89VA\\Documents\\USGS-LIDAR-\\data\\iowa.tif'
) as imagery_data:
T0 = imagery_data.transform
T1 = T0 * Affine.translation(0.5, 0.5)
pixel2coord = lambda c, r: (c, r) * T1
width = imagery_data.width
height = imagery_data.height
generate_colour_data(width, height, imagery_data, pixel2coord)
<|reserved_special_token_1|>
import rasterio as rio
from affine import Affine
colour_data = []
def generate_colour_data(width, height, imagiry_data, pixel2coord):
"""Extract color data from the .tiff file """
for i in range(1, height):
for j in range(1, width):
colour_data.append(
[
pixel2coord(j, i)[0],
pixel2coord(j, i)[1],
imagiry_data.read([1])[0][i - 1][j - 1],
]
)
#Code that will extract the width, height and transformation information of the .tiff file and pass it to the function
# generate_colour_data which will populate the color data in a list in the following format: [longitude, latitude, Red, Green, Blue, Alpha]
with rio.open(r'C:\Users\user.DESKTOP-OMQ89VA\Documents\USGS-LIDAR-\data\iowa.tif') as imagery_data:
T0 = imagery_data.transform
T1 = T0 * Affine.translation(0.5, 0.5)
pixel2coord = lambda c, r: (c, r) * T1
width = imagery_data.width
height = imagery_data.height
generate_colour_data(width, height, imagery_data, pixel2coord)
|
flexible
|
{
"blob_id": "7e8b192e77e857f1907d5272d03c1138a10c61f4",
"index": 4803,
"step-1": "<mask token>\n\n\ndef generate_colour_data(width, height, imagiry_data, pixel2coord):\n \"\"\"Extract color data from the .tiff file \"\"\"\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append([pixel2coord(j, i)[0], pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1]])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_colour_data(width, height, imagiry_data, pixel2coord):\n \"\"\"Extract color data from the .tiff file \"\"\"\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append([pixel2coord(j, i)[0], pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1]])\n\n\nwith rio.open(\n 'C:\\\\Users\\\\user.DESKTOP-OMQ89VA\\\\Documents\\\\USGS-LIDAR-\\\\data\\\\iowa.tif'\n ) as imagery_data:\n T0 = imagery_data.transform\n T1 = T0 * Affine.translation(0.5, 0.5)\n pixel2coord = lambda c, r: (c, r) * T1\n width = imagery_data.width\n height = imagery_data.height\n generate_colour_data(width, height, imagery_data, pixel2coord)\n",
"step-3": "<mask token>\ncolour_data = []\n\n\ndef generate_colour_data(width, height, imagiry_data, pixel2coord):\n \"\"\"Extract color data from the .tiff file \"\"\"\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append([pixel2coord(j, i)[0], pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1]])\n\n\nwith rio.open(\n 'C:\\\\Users\\\\user.DESKTOP-OMQ89VA\\\\Documents\\\\USGS-LIDAR-\\\\data\\\\iowa.tif'\n ) as imagery_data:\n T0 = imagery_data.transform\n T1 = T0 * Affine.translation(0.5, 0.5)\n pixel2coord = lambda c, r: (c, r) * T1\n width = imagery_data.width\n height = imagery_data.height\n generate_colour_data(width, height, imagery_data, pixel2coord)\n",
"step-4": "import rasterio as rio\nfrom affine import Affine\ncolour_data = []\n\n\ndef generate_colour_data(width, height, imagiry_data, pixel2coord):\n \"\"\"Extract color data from the .tiff file \"\"\"\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append([pixel2coord(j, i)[0], pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1]])\n\n\nwith rio.open(\n 'C:\\\\Users\\\\user.DESKTOP-OMQ89VA\\\\Documents\\\\USGS-LIDAR-\\\\data\\\\iowa.tif'\n ) as imagery_data:\n T0 = imagery_data.transform\n T1 = T0 * Affine.translation(0.5, 0.5)\n pixel2coord = lambda c, r: (c, r) * T1\n width = imagery_data.width\n height = imagery_data.height\n generate_colour_data(width, height, imagery_data, pixel2coord)\n",
"step-5": "import rasterio as rio\nfrom affine import Affine\n\ncolour_data = []\ndef generate_colour_data(width, height, imagiry_data, pixel2coord):\n \"\"\"Extract color data from the .tiff file \"\"\"\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append(\n [\n pixel2coord(j, i)[0],\n pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1],\n \n ]\n )\n#Code that will extract the width, height and transformation information of the .tiff file and pass it to the function \n# generate_colour_data which will populate the color data in a list in the following format: [longitude, latitude, Red, Green, Blue, Alpha]\nwith rio.open(r'C:\\Users\\user.DESKTOP-OMQ89VA\\Documents\\USGS-LIDAR-\\data\\iowa.tif') as imagery_data:\n T0 = imagery_data.transform\n T1 = T0 * Affine.translation(0.5, 0.5)\n pixel2coord = lambda c, r: (c, r) * T1\n width = imagery_data.width\n height = imagery_data.height\n \n generate_colour_data(width, height, imagery_data, pixel2coord)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class TestTTT(unittest.TestCase):
def test_mcts(self):
if 0 in skip:
print('Skipping ai self-play')
return
ttt = TTT()
for i in range(1000):
mcts = MCTS(ttt)
state = mcts.root.state
while not mcts.board.ending_state(state):
move = mcts.search()
print(move)
state = mcts.board.get_state(state, move)
mcts.board.print(state)
mcts.make_move(move)
self.assertEqual(mcts.board.ending_state(state), -1)
def test_play_mcts(self):
if 1 in skip:
print('Skipping human-ai play')
return
ttt = TTT()
mcts = MCTS(ttt)
state = mcts.root.state
my_player = 2
while not mcts.board.ending_state(state):
mcts.board.print(state)
move = mcts.search()
print(move)
if state[1] == my_player:
move = input('Make move!\n')
move = int(move[0]), int(move[1])
mcts.make_move(move)
state = mcts.root.state
mcts.board.print(state)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_trick_win(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTTT(unittest.TestCase):
def test_mcts(self):
if 0 in skip:
print('Skipping ai self-play')
return
ttt = TTT()
for i in range(1000):
mcts = MCTS(ttt)
state = mcts.root.state
while not mcts.board.ending_state(state):
move = mcts.search()
print(move)
state = mcts.board.get_state(state, move)
mcts.board.print(state)
mcts.make_move(move)
self.assertEqual(mcts.board.ending_state(state), -1)
def test_play_mcts(self):
if 1 in skip:
print('Skipping human-ai play')
return
ttt = TTT()
mcts = MCTS(ttt)
state = mcts.root.state
my_player = 2
while not mcts.board.ending_state(state):
mcts.board.print(state)
move = mcts.search()
print(move)
if state[1] == my_player:
move = input('Make move!\n')
move = int(move[0]), int(move[1])
mcts.make_move(move)
state = mcts.root.state
mcts.board.print(state)
def test_positions(self):
move_sequence = [(1, 1), (2, 0), (0, 1)]
move_sequence = [(1, 1), (2, 2), (2, 1)]
move_sequence = [(1, 1), (2, 2), (2, 0), (0, 2), (1, 2), (2, 1)]
def from_position(self, move_sequence, expected_move, name):
ttt = TTT()
mcts = MCTS(ttt, searchtime=30)
mcts.board.print(mcts.root.state)
for move in move_sequence:
mcts.search()
mcts.make_move(move)
mcts.board.print(mcts.root.state)
move = mcts.search()
print('Testing {} block (that was lost before) on the following board'
.format(name))
self.assertEqual(move, expected_move)
def test_trick_win(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTTT(unittest.TestCase):
def test_mcts(self):
if 0 in skip:
print('Skipping ai self-play')
return
ttt = TTT()
for i in range(1000):
mcts = MCTS(ttt)
state = mcts.root.state
while not mcts.board.ending_state(state):
move = mcts.search()
print(move)
state = mcts.board.get_state(state, move)
mcts.board.print(state)
mcts.make_move(move)
self.assertEqual(mcts.board.ending_state(state), -1)
def test_play_mcts(self):
if 1 in skip:
print('Skipping human-ai play')
return
ttt = TTT()
mcts = MCTS(ttt)
state = mcts.root.state
my_player = 2
while not mcts.board.ending_state(state):
mcts.board.print(state)
move = mcts.search()
print(move)
if state[1] == my_player:
move = input('Make move!\n')
move = int(move[0]), int(move[1])
mcts.make_move(move)
state = mcts.root.state
mcts.board.print(state)
def test_positions(self):
move_sequence = [(1, 1), (2, 0), (0, 1)]
move_sequence = [(1, 1), (2, 2), (2, 1)]
move_sequence = [(1, 1), (2, 2), (2, 0), (0, 2), (1, 2), (2, 1)]
def from_position(self, move_sequence, expected_move, name):
ttt = TTT()
mcts = MCTS(ttt, searchtime=30)
mcts.board.print(mcts.root.state)
for move in move_sequence:
mcts.search()
mcts.make_move(move)
mcts.board.print(mcts.root.state)
move = mcts.search()
print('Testing {} block (that was lost before) on the following board'
.format(name))
self.assertEqual(move, expected_move)
def test_trick_win(self):
pass
def test_defend_trick_win(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
skip = [0]
class TestTTT(unittest.TestCase):
def test_mcts(self):
if 0 in skip:
print('Skipping ai self-play')
return
ttt = TTT()
for i in range(1000):
mcts = MCTS(ttt)
state = mcts.root.state
while not mcts.board.ending_state(state):
move = mcts.search()
print(move)
state = mcts.board.get_state(state, move)
mcts.board.print(state)
mcts.make_move(move)
self.assertEqual(mcts.board.ending_state(state), -1)
def test_play_mcts(self):
if 1 in skip:
print('Skipping human-ai play')
return
ttt = TTT()
mcts = MCTS(ttt)
state = mcts.root.state
my_player = 2
while not mcts.board.ending_state(state):
mcts.board.print(state)
move = mcts.search()
print(move)
if state[1] == my_player:
move = input('Make move!\n')
move = int(move[0]), int(move[1])
mcts.make_move(move)
state = mcts.root.state
mcts.board.print(state)
def test_positions(self):
move_sequence = [(1, 1), (2, 0), (0, 1)]
move_sequence = [(1, 1), (2, 2), (2, 1)]
move_sequence = [(1, 1), (2, 2), (2, 0), (0, 2), (1, 2), (2, 1)]
def from_position(self, move_sequence, expected_move, name):
ttt = TTT()
mcts = MCTS(ttt, searchtime=30)
mcts.board.print(mcts.root.state)
for move in move_sequence:
mcts.search()
mcts.make_move(move)
mcts.board.print(mcts.root.state)
move = mcts.search()
print('Testing {} block (that was lost before) on the following board'
.format(name))
self.assertEqual(move, expected_move)
def test_trick_win(self):
pass
def test_defend_trick_win(self):
pass
<|reserved_special_token_1|>
from board.ttt import TTT
from mctsai.mcts import MCTS
import unittest
# skip = [0, 1]
skip = [0]
class TestTTT(unittest.TestCase):
def test_mcts(self):
if 0 in skip:
print("Skipping ai self-play")
return
ttt = TTT()
for i in range(1000):
mcts = MCTS(ttt)
state = mcts.root.state
while not mcts.board.ending_state(state):
move = mcts.search()
print(move)
state = mcts.board.get_state(state, move)
mcts.board.print(state)
mcts.make_move(move)
self.assertEqual(mcts.board.ending_state(state), -1)
def test_play_mcts(self):
if 1 in skip:
print("Skipping human-ai play")
return
ttt = TTT()
mcts = MCTS(ttt)
state = mcts.root.state
my_player = 2
while not mcts.board.ending_state(state):
mcts.board.print(state)
move = mcts.search()
print(move)
if state[1] == my_player:
move = input("Make move!\n")
move = (int(move[0]), int(move[1]))
mcts.make_move(move)
state = mcts.root.state
mcts.board.print(state)
# state = mcts.board.get_state(state, move)
# mcts = MCTS(ttt)
# mcts.root.state = state
# mcts.root.remaining_moves = mcts.board.get_legal_moves(mcts.root.state)
def test_positions(self):
# simple block
move_sequence = [(1, 1), (2, 0), (0, 1)]
# self.from_position(move_sequence, (2, 1), "Simple block 1")
# simple block 2
move_sequence = [(1, 1), (2, 2), (2, 1)]
# self.from_position(move_sequence, (0, 1), "Simple block 2")
# simple win 1
move_sequence = [(1, 1), (2, 2), (2, 0), (0, 2), (1, 2), (2, 1)]
# self.from_position(move_sequence, (1, 0), "Simple win")
def from_position(self, move_sequence, expected_move, name):
ttt = TTT()
mcts = MCTS(ttt, searchtime= 30)
mcts.board.print(mcts.root.state)
for move in move_sequence:
mcts.search()
mcts.make_move(move)
mcts.board.print(mcts.root.state)
move = mcts.search()
print("Testing {} block (that was lost before) on the following board".format(name))
self.assertEqual(move, expected_move)
def test_trick_win(self):
pass
# ttt = TTT()
# state = ttt.get_initial_state()
# state = ttt.get_state(state, (1, 1))
# state = ttt.get_state(state, (2, 2))
# state = ttt.get_state(state, (2, 0))
# print("Testing trick win on the following board")
# ttt.print(state)
# for _ in range(100):
# mcts = MCTS(ttt)
# mcts.set_root_state(state)
# move = mcts.search()
# self.assertEqual(move, (0, 2))
def test_defend_trick_win(self):
pass
|
flexible
|
{
"blob_id": "d0a3f332e04627eb275168972bd92cd1ea9b9447",
"index": 227,
"step-1": "<mask token>\n\n\nclass TestTTT(unittest.TestCase):\n\n def test_mcts(self):\n if 0 in skip:\n print('Skipping ai self-play')\n return\n ttt = TTT()\n for i in range(1000):\n mcts = MCTS(ttt)\n state = mcts.root.state\n while not mcts.board.ending_state(state):\n move = mcts.search()\n print(move)\n state = mcts.board.get_state(state, move)\n mcts.board.print(state)\n mcts.make_move(move)\n self.assertEqual(mcts.board.ending_state(state), -1)\n\n def test_play_mcts(self):\n if 1 in skip:\n print('Skipping human-ai play')\n return\n ttt = TTT()\n mcts = MCTS(ttt)\n state = mcts.root.state\n my_player = 2\n while not mcts.board.ending_state(state):\n mcts.board.print(state)\n move = mcts.search()\n print(move)\n if state[1] == my_player:\n move = input('Make move!\\n')\n move = int(move[0]), int(move[1])\n mcts.make_move(move)\n state = mcts.root.state\n mcts.board.print(state)\n <mask token>\n <mask token>\n\n def test_trick_win(self):\n pass\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTTT(unittest.TestCase):\n\n def test_mcts(self):\n if 0 in skip:\n print('Skipping ai self-play')\n return\n ttt = TTT()\n for i in range(1000):\n mcts = MCTS(ttt)\n state = mcts.root.state\n while not mcts.board.ending_state(state):\n move = mcts.search()\n print(move)\n state = mcts.board.get_state(state, move)\n mcts.board.print(state)\n mcts.make_move(move)\n self.assertEqual(mcts.board.ending_state(state), -1)\n\n def test_play_mcts(self):\n if 1 in skip:\n print('Skipping human-ai play')\n return\n ttt = TTT()\n mcts = MCTS(ttt)\n state = mcts.root.state\n my_player = 2\n while not mcts.board.ending_state(state):\n mcts.board.print(state)\n move = mcts.search()\n print(move)\n if state[1] == my_player:\n move = input('Make move!\\n')\n move = int(move[0]), int(move[1])\n mcts.make_move(move)\n state = mcts.root.state\n mcts.board.print(state)\n\n def test_positions(self):\n move_sequence = [(1, 1), (2, 0), (0, 1)]\n move_sequence = [(1, 1), (2, 2), (2, 1)]\n move_sequence = [(1, 1), (2, 2), (2, 0), (0, 2), (1, 2), (2, 1)]\n\n def from_position(self, move_sequence, expected_move, name):\n ttt = TTT()\n mcts = MCTS(ttt, searchtime=30)\n mcts.board.print(mcts.root.state)\n for move in move_sequence:\n mcts.search()\n mcts.make_move(move)\n mcts.board.print(mcts.root.state)\n move = mcts.search()\n print('Testing {} block (that was lost before) on the following board'\n .format(name))\n self.assertEqual(move, expected_move)\n\n def test_trick_win(self):\n pass\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestTTT(unittest.TestCase):\n\n def test_mcts(self):\n if 0 in skip:\n print('Skipping ai self-play')\n return\n ttt = TTT()\n for i in range(1000):\n mcts = MCTS(ttt)\n state = mcts.root.state\n while not mcts.board.ending_state(state):\n move = mcts.search()\n print(move)\n state = mcts.board.get_state(state, move)\n mcts.board.print(state)\n mcts.make_move(move)\n self.assertEqual(mcts.board.ending_state(state), -1)\n\n def test_play_mcts(self):\n if 1 in skip:\n print('Skipping human-ai play')\n return\n ttt = TTT()\n mcts = MCTS(ttt)\n state = mcts.root.state\n my_player = 2\n while not mcts.board.ending_state(state):\n mcts.board.print(state)\n move = mcts.search()\n print(move)\n if state[1] == my_player:\n move = input('Make move!\\n')\n move = int(move[0]), int(move[1])\n mcts.make_move(move)\n state = mcts.root.state\n mcts.board.print(state)\n\n def test_positions(self):\n move_sequence = [(1, 1), (2, 0), (0, 1)]\n move_sequence = [(1, 1), (2, 2), (2, 1)]\n move_sequence = [(1, 1), (2, 2), (2, 0), (0, 2), (1, 2), (2, 1)]\n\n def from_position(self, move_sequence, expected_move, name):\n ttt = TTT()\n mcts = MCTS(ttt, searchtime=30)\n mcts.board.print(mcts.root.state)\n for move in move_sequence:\n mcts.search()\n mcts.make_move(move)\n mcts.board.print(mcts.root.state)\n move = mcts.search()\n print('Testing {} block (that was lost before) on the following board'\n .format(name))\n self.assertEqual(move, expected_move)\n\n def test_trick_win(self):\n pass\n\n def test_defend_trick_win(self):\n pass\n",
"step-4": "<mask token>\nskip = [0]\n\n\nclass TestTTT(unittest.TestCase):\n\n def test_mcts(self):\n if 0 in skip:\n print('Skipping ai self-play')\n return\n ttt = TTT()\n for i in range(1000):\n mcts = MCTS(ttt)\n state = mcts.root.state\n while not mcts.board.ending_state(state):\n move = mcts.search()\n print(move)\n state = mcts.board.get_state(state, move)\n mcts.board.print(state)\n mcts.make_move(move)\n self.assertEqual(mcts.board.ending_state(state), -1)\n\n def test_play_mcts(self):\n if 1 in skip:\n print('Skipping human-ai play')\n return\n ttt = TTT()\n mcts = MCTS(ttt)\n state = mcts.root.state\n my_player = 2\n while not mcts.board.ending_state(state):\n mcts.board.print(state)\n move = mcts.search()\n print(move)\n if state[1] == my_player:\n move = input('Make move!\\n')\n move = int(move[0]), int(move[1])\n mcts.make_move(move)\n state = mcts.root.state\n mcts.board.print(state)\n\n def test_positions(self):\n move_sequence = [(1, 1), (2, 0), (0, 1)]\n move_sequence = [(1, 1), (2, 2), (2, 1)]\n move_sequence = [(1, 1), (2, 2), (2, 0), (0, 2), (1, 2), (2, 1)]\n\n def from_position(self, move_sequence, expected_move, name):\n ttt = TTT()\n mcts = MCTS(ttt, searchtime=30)\n mcts.board.print(mcts.root.state)\n for move in move_sequence:\n mcts.search()\n mcts.make_move(move)\n mcts.board.print(mcts.root.state)\n move = mcts.search()\n print('Testing {} block (that was lost before) on the following board'\n .format(name))\n self.assertEqual(move, expected_move)\n\n def test_trick_win(self):\n pass\n\n def test_defend_trick_win(self):\n pass\n",
"step-5": "from board.ttt import TTT\nfrom mctsai.mcts import MCTS\nimport unittest\n\n# skip = [0, 1]\n\nskip = [0]\n\nclass TestTTT(unittest.TestCase):\n def test_mcts(self):\n if 0 in skip:\n print(\"Skipping ai self-play\")\n return\n ttt = TTT()\n for i in range(1000):\n mcts = MCTS(ttt)\n state = mcts.root.state\n while not mcts.board.ending_state(state):\n move = mcts.search()\n print(move)\n state = mcts.board.get_state(state, move)\n mcts.board.print(state)\n mcts.make_move(move)\n self.assertEqual(mcts.board.ending_state(state), -1)\n\n def test_play_mcts(self):\n if 1 in skip:\n print(\"Skipping human-ai play\")\n return\n\n ttt = TTT()\n mcts = MCTS(ttt)\n state = mcts.root.state\n my_player = 2\n while not mcts.board.ending_state(state):\n mcts.board.print(state)\n move = mcts.search()\n print(move)\n if state[1] == my_player:\n move = input(\"Make move!\\n\")\n move = (int(move[0]), int(move[1]))\n\n mcts.make_move(move)\n state = mcts.root.state\n mcts.board.print(state)\n # state = mcts.board.get_state(state, move)\n # mcts = MCTS(ttt)\n # mcts.root.state = state\n # mcts.root.remaining_moves = mcts.board.get_legal_moves(mcts.root.state)\n\n def test_positions(self):\n # simple block\n move_sequence = [(1, 1), (2, 0), (0, 1)]\n # self.from_position(move_sequence, (2, 1), \"Simple block 1\")\n\n # simple block 2\n move_sequence = [(1, 1), (2, 2), (2, 1)]\n # self.from_position(move_sequence, (0, 1), \"Simple block 2\")\n\n # simple win 1\n move_sequence = [(1, 1), (2, 2), (2, 0), (0, 2), (1, 2), (2, 1)]\n # self.from_position(move_sequence, (1, 0), \"Simple win\")\n\n def from_position(self, move_sequence, expected_move, name):\n ttt = TTT()\n mcts = MCTS(ttt, searchtime= 30)\n mcts.board.print(mcts.root.state)\n for move in move_sequence:\n mcts.search()\n mcts.make_move(move)\n mcts.board.print(mcts.root.state)\n\n move = mcts.search()\n\n print(\"Testing {} block (that was lost before) on the following board\".format(name))\n self.assertEqual(move, expected_move)\n\n def test_trick_win(self):\n pass\n # ttt = TTT()\n # state = ttt.get_initial_state()\n # state = ttt.get_state(state, (1, 1))\n # state = ttt.get_state(state, (2, 2))\n # state = ttt.get_state(state, (2, 0))\n # print(\"Testing trick win on the following board\")\n # ttt.print(state)\n # for _ in range(100):\n # mcts = MCTS(ttt)\n # mcts.set_root_state(state)\n # move = mcts.search()\n # self.assertEqual(move, (0, 2))\n\n def test_defend_trick_win(self):\n pass\n\n\n\n\n\n\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in NICList:
os.system('sudo ifconfig ' + i + ' promisc')
os.system('sudo python ./src/top.py')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
NICList = [i for i in netifaces.interfaces() if i != 'lo']
for i in NICList:
os.system('sudo ifconfig ' + i + ' promisc')
os.system('sudo python ./src/top.py')
<|reserved_special_token_1|>
import os
import netifaces
NICList = [i for i in netifaces.interfaces() if i != 'lo']
for i in NICList:
os.system('sudo ifconfig ' + i + ' promisc')
os.system('sudo python ./src/top.py')
<|reserved_special_token_1|>
#!/usr/bin/python3
import os
import netifaces
# nicList = netifaces.interfaces()
NICList = [i for i in netifaces.interfaces() if i != "lo"]
for i in NICList:
os.system("sudo ifconfig " + i + " promisc")
os.system("sudo python ./src/top.py")
|
flexible
|
{
"blob_id": "b38d23a7de3c805ddde4ed2d236e3c6e7bb5e2d0",
"index": 118,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in NICList:\n os.system('sudo ifconfig ' + i + ' promisc')\nos.system('sudo python ./src/top.py')\n",
"step-3": "<mask token>\nNICList = [i for i in netifaces.interfaces() if i != 'lo']\nfor i in NICList:\n os.system('sudo ifconfig ' + i + ' promisc')\nos.system('sudo python ./src/top.py')\n",
"step-4": "import os\nimport netifaces\nNICList = [i for i in netifaces.interfaces() if i != 'lo']\nfor i in NICList:\n os.system('sudo ifconfig ' + i + ' promisc')\nos.system('sudo python ./src/top.py')\n",
"step-5": "#!/usr/bin/python3\nimport os\nimport netifaces\n\n# nicList = netifaces.interfaces()\nNICList = [i for i in netifaces.interfaces() if i != \"lo\"]\n\nfor i in NICList:\n os.system(\"sudo ifconfig \" + i + \" promisc\")\nos.system(\"sudo python ./src/top.py\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for instance in instances['instances']:
inst_names.append(instance['name'])
inst_dict[instance['name']] = []
print(inst_names)
<|reserved_special_token_0|>
for snapshot in snapshots['instanceSnapshots']:
inst_dict[snapshot['fromInstanceName']].append(snapshot)
for instance, snapshots in inst_dict.items():
print(json.dumps(json.loads(check_output(
'aws lightsail create-instance-snapshot --instance-name ' +
instance + ' --instance-snapshot-name ' + instance + '-' + str,
shell=True))))
if len(snapshots) > 1:
sorted_snapshots = sorted(snapshots, key=lambda k: k['createdAt'])
print(json.dumps(json.loads(check_output(
'aws lightsail delete-instance-snapshot --instance-snapshot-name '
+ sorted_snapshots[0]['name'], shell=True))))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
date = datetime.datetime.now()
mo = date.month
day = date.day
year = date.year
str = '{0}-{1}-{2}'.format(mo, day, year)
instances = json.loads(check_output('aws lightsail get-instances', shell=True))
inst_names = []
inst_dict = {}
for instance in instances['instances']:
inst_names.append(instance['name'])
inst_dict[instance['name']] = []
print(inst_names)
snapshots = json.loads(check_output('aws lightsail get-instance-snapshots',
shell=True))
for snapshot in snapshots['instanceSnapshots']:
inst_dict[snapshot['fromInstanceName']].append(snapshot)
for instance, snapshots in inst_dict.items():
print(json.dumps(json.loads(check_output(
'aws lightsail create-instance-snapshot --instance-name ' +
instance + ' --instance-snapshot-name ' + instance + '-' + str,
shell=True))))
if len(snapshots) > 1:
sorted_snapshots = sorted(snapshots, key=lambda k: k['createdAt'])
print(json.dumps(json.loads(check_output(
'aws lightsail delete-instance-snapshot --instance-snapshot-name '
+ sorted_snapshots[0]['name'], shell=True))))
<|reserved_special_token_1|>
from subprocess import check_output
import json
import datetime
date = datetime.datetime.now()
mo = date.month
day = date.day
year = date.year
str = '{0}-{1}-{2}'.format(mo, day, year)
instances = json.loads(check_output('aws lightsail get-instances', shell=True))
inst_names = []
inst_dict = {}
for instance in instances['instances']:
inst_names.append(instance['name'])
inst_dict[instance['name']] = []
print(inst_names)
snapshots = json.loads(check_output('aws lightsail get-instance-snapshots',
shell=True))
for snapshot in snapshots['instanceSnapshots']:
inst_dict[snapshot['fromInstanceName']].append(snapshot)
for instance, snapshots in inst_dict.items():
print(json.dumps(json.loads(check_output(
'aws lightsail create-instance-snapshot --instance-name ' +
instance + ' --instance-snapshot-name ' + instance + '-' + str,
shell=True))))
if len(snapshots) > 1:
sorted_snapshots = sorted(snapshots, key=lambda k: k['createdAt'])
print(json.dumps(json.loads(check_output(
'aws lightsail delete-instance-snapshot --instance-snapshot-name '
+ sorted_snapshots[0]['name'], shell=True))))
<|reserved_special_token_1|>
from subprocess import check_output
import json
import datetime
date = datetime.datetime.now()
mo = date.month
day = date.day
year = date.year
str = '{0}-{1}-{2}'.format(mo, day, year)
instances = json.loads(check_output("aws lightsail get-instances", shell=True))
inst_names = []
inst_dict = {}
for instance in instances['instances']:
inst_names.append(instance['name'])
inst_dict[instance['name']] = []
print(inst_names)
snapshots = json.loads(check_output("aws lightsail get-instance-snapshots", shell=True))
for snapshot in snapshots['instanceSnapshots']:
inst_dict[snapshot['fromInstanceName']].append(snapshot)
for instance, snapshots in inst_dict.items():
print(json.dumps(json.loads(
check_output("aws lightsail create-instance-snapshot --instance-name " + instance + " --instance-snapshot-name " + instance + "-" + str,
shell=True))))
if len(snapshots) > 1:
sorted_snapshots = sorted(snapshots, key=lambda k: k['createdAt'])
print(json.dumps(json.loads(check_output("aws lightsail delete-instance-snapshot --instance-snapshot-name " + sorted_snapshots[0]['name'], shell=True))))
|
flexible
|
{
"blob_id": "2023e0b749338488e63cbbb475b7a915bccccce0",
"index": 7531,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor instance in instances['instances']:\n inst_names.append(instance['name'])\n inst_dict[instance['name']] = []\nprint(inst_names)\n<mask token>\nfor snapshot in snapshots['instanceSnapshots']:\n inst_dict[snapshot['fromInstanceName']].append(snapshot)\nfor instance, snapshots in inst_dict.items():\n print(json.dumps(json.loads(check_output(\n 'aws lightsail create-instance-snapshot --instance-name ' +\n instance + ' --instance-snapshot-name ' + instance + '-' + str,\n shell=True))))\n if len(snapshots) > 1:\n sorted_snapshots = sorted(snapshots, key=lambda k: k['createdAt'])\n print(json.dumps(json.loads(check_output(\n 'aws lightsail delete-instance-snapshot --instance-snapshot-name '\n + sorted_snapshots[0]['name'], shell=True))))\n",
"step-3": "<mask token>\ndate = datetime.datetime.now()\nmo = date.month\nday = date.day\nyear = date.year\nstr = '{0}-{1}-{2}'.format(mo, day, year)\ninstances = json.loads(check_output('aws lightsail get-instances', shell=True))\ninst_names = []\ninst_dict = {}\nfor instance in instances['instances']:\n inst_names.append(instance['name'])\n inst_dict[instance['name']] = []\nprint(inst_names)\nsnapshots = json.loads(check_output('aws lightsail get-instance-snapshots',\n shell=True))\nfor snapshot in snapshots['instanceSnapshots']:\n inst_dict[snapshot['fromInstanceName']].append(snapshot)\nfor instance, snapshots in inst_dict.items():\n print(json.dumps(json.loads(check_output(\n 'aws lightsail create-instance-snapshot --instance-name ' +\n instance + ' --instance-snapshot-name ' + instance + '-' + str,\n shell=True))))\n if len(snapshots) > 1:\n sorted_snapshots = sorted(snapshots, key=lambda k: k['createdAt'])\n print(json.dumps(json.loads(check_output(\n 'aws lightsail delete-instance-snapshot --instance-snapshot-name '\n + sorted_snapshots[0]['name'], shell=True))))\n",
"step-4": "from subprocess import check_output\nimport json\nimport datetime\ndate = datetime.datetime.now()\nmo = date.month\nday = date.day\nyear = date.year\nstr = '{0}-{1}-{2}'.format(mo, day, year)\ninstances = json.loads(check_output('aws lightsail get-instances', shell=True))\ninst_names = []\ninst_dict = {}\nfor instance in instances['instances']:\n inst_names.append(instance['name'])\n inst_dict[instance['name']] = []\nprint(inst_names)\nsnapshots = json.loads(check_output('aws lightsail get-instance-snapshots',\n shell=True))\nfor snapshot in snapshots['instanceSnapshots']:\n inst_dict[snapshot['fromInstanceName']].append(snapshot)\nfor instance, snapshots in inst_dict.items():\n print(json.dumps(json.loads(check_output(\n 'aws lightsail create-instance-snapshot --instance-name ' +\n instance + ' --instance-snapshot-name ' + instance + '-' + str,\n shell=True))))\n if len(snapshots) > 1:\n sorted_snapshots = sorted(snapshots, key=lambda k: k['createdAt'])\n print(json.dumps(json.loads(check_output(\n 'aws lightsail delete-instance-snapshot --instance-snapshot-name '\n + sorted_snapshots[0]['name'], shell=True))))\n",
"step-5": "from subprocess import check_output\nimport json\nimport datetime\n\ndate = datetime.datetime.now()\nmo = date.month\nday = date.day\nyear = date.year\nstr = '{0}-{1}-{2}'.format(mo, day, year)\ninstances = json.loads(check_output(\"aws lightsail get-instances\", shell=True))\n\ninst_names = []\ninst_dict = {}\nfor instance in instances['instances']:\n inst_names.append(instance['name'])\n inst_dict[instance['name']] = []\n\nprint(inst_names)\nsnapshots = json.loads(check_output(\"aws lightsail get-instance-snapshots\", shell=True))\n\nfor snapshot in snapshots['instanceSnapshots']:\n inst_dict[snapshot['fromInstanceName']].append(snapshot)\n\nfor instance, snapshots in inst_dict.items():\n print(json.dumps(json.loads(\n check_output(\"aws lightsail create-instance-snapshot --instance-name \" + instance + \" --instance-snapshot-name \" + instance + \"-\" + str,\n shell=True))))\n if len(snapshots) > 1:\n sorted_snapshots = sorted(snapshots, key=lambda k: k['createdAt'])\n print(json.dumps(json.loads(check_output(\"aws lightsail delete-instance-snapshot --instance-snapshot-name \" + sorted_snapshots[0]['name'], shell=True))))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestStudent(unittest.TestCase):
def setUp(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '7.0'
desired_caps['automationName'] = 'UIAutomator2'
desired_caps['deviceName'] = 'PRA-AL00'
desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')
desired_caps['appPackage'
] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'
desired_caps['unicodeKeyboard'] = True
desired_caps['resetKeyboard'] = True
desired_caps['fullReset'] = True
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',
desired_caps)
sleep(3)
def tearDown(self):
self.driver.quit()
def changePwd(self):
driver = self.driver
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:修改密码----开始:' + now)
login(self)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("个人中心")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("修改密码")').click()
sleep(2)
old = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')
old.click()
old.set_value('123456')
sleep(1)
new = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')
new.click()
new.set_value('123456wxl')
sleep(1)
again = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')
again.click()
again.set_value('123456wxl')
sleep(1)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确认")').click()
sleep(3)
driver.swipe(1000, 1600, 1000, 1250, 1000)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确定")').click()
sleep(2)
user = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')
user.click()
user.set_value('13923121234')
sleep(1)
pwd = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')
pwd.click()
pwd.set_value('123456wxl')
sleep(1)
driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()
sleep(4)
now = time.strftime('%Y-%m-%d %H_%M_%S')
sf0 = './' + now + '_021b_relogin_R.png'
driver.get_screenshot_as_file(sf0)
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("个人中心")').click()
sleep(3)
driver.swipe(1000, 1600, 1000, 1250, 1000)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确定")').click()
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:修改密码----结束:' + now)
def changePwdBack(self):
driver = self.driver
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:重置密码----开始:' + now)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("登录")').click()
sleep(2)
user = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')
user.click()
user.set_value('13923121234')
sleep(1)
pwd = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')
pwd.click()
pwd.set_value('123456wxl')
sleep(1)
driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("始终允许")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("始终允许")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("开始测试")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("点击开始录音")').click()
sleep(4)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("停止录音")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("有听到声音")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("下一步")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("下一步")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("您已完成测试")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("个人中心")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("修改密码")').click()
sleep(2)
old = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')
old.click()
old.set_value('123456wxl')
sleep(1)
new = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')
new.click()
new.set_value('123456')
sleep(1)
again = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')
again.click()
again.set_value('123456')
sleep(1)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确认")').click()
sleep(3)
driver.swipe(1000, 1600, 1000, 1250, 1000)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确定")').click()
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:重置密码----结束:' + now)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestStudent(unittest.TestCase):
def setUp(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '7.0'
desired_caps['automationName'] = 'UIAutomator2'
desired_caps['deviceName'] = 'PRA-AL00'
desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')
desired_caps['appPackage'
] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'
desired_caps['unicodeKeyboard'] = True
desired_caps['resetKeyboard'] = True
desired_caps['fullReset'] = True
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',
desired_caps)
sleep(3)
def tearDown(self):
self.driver.quit()
def changePwd(self):
driver = self.driver
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:修改密码----开始:' + now)
login(self)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("个人中心")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("修改密码")').click()
sleep(2)
old = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')
old.click()
old.set_value('123456')
sleep(1)
new = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')
new.click()
new.set_value('123456wxl')
sleep(1)
again = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')
again.click()
again.set_value('123456wxl')
sleep(1)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确认")').click()
sleep(3)
driver.swipe(1000, 1600, 1000, 1250, 1000)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确定")').click()
sleep(2)
user = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')
user.click()
user.set_value('13923121234')
sleep(1)
pwd = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')
pwd.click()
pwd.set_value('123456wxl')
sleep(1)
driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()
sleep(4)
now = time.strftime('%Y-%m-%d %H_%M_%S')
sf0 = './' + now + '_021b_relogin_R.png'
driver.get_screenshot_as_file(sf0)
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("个人中心")').click()
sleep(3)
driver.swipe(1000, 1600, 1000, 1250, 1000)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确定")').click()
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:修改密码----结束:' + now)
def changePwdBack(self):
driver = self.driver
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:重置密码----开始:' + now)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("登录")').click()
sleep(2)
user = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')
user.click()
user.set_value('13923121234')
sleep(1)
pwd = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')
pwd.click()
pwd.set_value('123456wxl')
sleep(1)
driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("始终允许")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("始终允许")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("开始测试")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("点击开始录音")').click()
sleep(4)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("停止录音")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("有听到声音")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("下一步")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("下一步")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("您已完成测试")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("个人中心")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("修改密码")').click()
sleep(2)
old = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')
old.click()
old.set_value('123456wxl')
sleep(1)
new = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')
new.click()
new.set_value('123456')
sleep(1)
again = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')
again.click()
again.set_value('123456')
sleep(1)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确认")').click()
sleep(3)
driver.swipe(1000, 1600, 1000, 1250, 1000)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确定")').click()
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:重置密码----结束:' + now)
if __name__ == '__main__':
testunit = unittest.TestSuite()
testunit.addTest(TestStudent('changePwdBack'))
now = time.strftime('%Y-%m-%d %H_%M_%S')
filename = './' + now + '_021b_result_R.html'
fp = open(filename, 'wb')
runner = HTMLTestRunner(stream=fp, title=
'测试学生版android7.0真机(Honor8Lite)[修改密码/重置密码]测试报告by Appium',
description='自动化测试脚本运行状态:')
runner.run(testunit)
fp.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
PATH = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))
class TestStudent(unittest.TestCase):
def setUp(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '7.0'
desired_caps['automationName'] = 'UIAutomator2'
desired_caps['deviceName'] = 'PRA-AL00'
desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')
desired_caps['appPackage'
] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'
desired_caps['unicodeKeyboard'] = True
desired_caps['resetKeyboard'] = True
desired_caps['fullReset'] = True
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',
desired_caps)
sleep(3)
def tearDown(self):
self.driver.quit()
def changePwd(self):
driver = self.driver
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:修改密码----开始:' + now)
login(self)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("个人中心")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("修改密码")').click()
sleep(2)
old = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')
old.click()
old.set_value('123456')
sleep(1)
new = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')
new.click()
new.set_value('123456wxl')
sleep(1)
again = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')
again.click()
again.set_value('123456wxl')
sleep(1)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确认")').click()
sleep(3)
driver.swipe(1000, 1600, 1000, 1250, 1000)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确定")').click()
sleep(2)
user = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')
user.click()
user.set_value('13923121234')
sleep(1)
pwd = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')
pwd.click()
pwd.set_value('123456wxl')
sleep(1)
driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()
sleep(4)
now = time.strftime('%Y-%m-%d %H_%M_%S')
sf0 = './' + now + '_021b_relogin_R.png'
driver.get_screenshot_as_file(sf0)
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("个人中心")').click()
sleep(3)
driver.swipe(1000, 1600, 1000, 1250, 1000)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确定")').click()
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:修改密码----结束:' + now)
def changePwdBack(self):
driver = self.driver
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:重置密码----开始:' + now)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("登录")').click()
sleep(2)
user = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')
user.click()
user.set_value('13923121234')
sleep(1)
pwd = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')
pwd.click()
pwd.set_value('123456wxl')
sleep(1)
driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("始终允许")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("始终允许")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("开始测试")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("点击开始录音")').click()
sleep(4)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("停止录音")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("有听到声音")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("下一步")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("下一步")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("您已完成测试")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("个人中心")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("修改密码")').click()
sleep(2)
old = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')
old.click()
old.set_value('123456wxl')
sleep(1)
new = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')
new.click()
new.set_value('123456')
sleep(1)
again = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')
again.click()
again.set_value('123456')
sleep(1)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确认")').click()
sleep(3)
driver.swipe(1000, 1600, 1000, 1250, 1000)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确定")').click()
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:重置密码----结束:' + now)
if __name__ == '__main__':
testunit = unittest.TestSuite()
testunit.addTest(TestStudent('changePwdBack'))
now = time.strftime('%Y-%m-%d %H_%M_%S')
filename = './' + now + '_021b_result_R.html'
fp = open(filename, 'wb')
runner = HTMLTestRunner(stream=fp, title=
'测试学生版android7.0真机(Honor8Lite)[修改密码/重置密码]测试报告by Appium',
description='自动化测试脚本运行状态:')
runner.run(testunit)
fp.close()
<|reserved_special_token_1|>
import unittest, time, os
from time import sleep
from appium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from HTMLTestRunner import HTMLTestRunner
from appium.webdriver.common.touch_action import TouchAction
from pub_Student import login, logout
PATH = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))
class TestStudent(unittest.TestCase):
def setUp(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '7.0'
desired_caps['automationName'] = 'UIAutomator2'
desired_caps['deviceName'] = 'PRA-AL00'
desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')
desired_caps['appPackage'
] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'
desired_caps['unicodeKeyboard'] = True
desired_caps['resetKeyboard'] = True
desired_caps['fullReset'] = True
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',
desired_caps)
sleep(3)
def tearDown(self):
self.driver.quit()
def changePwd(self):
driver = self.driver
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:修改密码----开始:' + now)
login(self)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("个人中心")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("修改密码")').click()
sleep(2)
old = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')
old.click()
old.set_value('123456')
sleep(1)
new = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')
new.click()
new.set_value('123456wxl')
sleep(1)
again = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')
again.click()
again.set_value('123456wxl')
sleep(1)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确认")').click()
sleep(3)
driver.swipe(1000, 1600, 1000, 1250, 1000)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确定")').click()
sleep(2)
user = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')
user.click()
user.set_value('13923121234')
sleep(1)
pwd = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')
pwd.click()
pwd.set_value('123456wxl')
sleep(1)
driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()
sleep(4)
now = time.strftime('%Y-%m-%d %H_%M_%S')
sf0 = './' + now + '_021b_relogin_R.png'
driver.get_screenshot_as_file(sf0)
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("个人中心")').click()
sleep(3)
driver.swipe(1000, 1600, 1000, 1250, 1000)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确定")').click()
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:修改密码----结束:' + now)
def changePwdBack(self):
driver = self.driver
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:重置密码----开始:' + now)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("登录")').click()
sleep(2)
user = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')
user.click()
user.set_value('13923121234')
sleep(1)
pwd = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')
pwd.click()
pwd.set_value('123456wxl')
sleep(1)
driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("始终允许")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("始终允许")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("开始测试")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("点击开始录音")').click()
sleep(4)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("停止录音")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("有听到声音")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("下一步")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("下一步")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("您已完成测试")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("个人中心")').click()
sleep(3)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("修改密码")').click()
sleep(2)
old = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')
old.click()
old.set_value('123456wxl')
sleep(1)
new = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')
new.click()
new.set_value('123456')
sleep(1)
again = driver.find_element_by_id(
'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')
again.click()
again.set_value('123456')
sleep(1)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确认")').click()
sleep(3)
driver.swipe(1000, 1600, 1000, 1250, 1000)
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator(
'new UiSelector().text("确定")').click()
sleep(2)
now = time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:重置密码----结束:' + now)
if __name__ == '__main__':
testunit = unittest.TestSuite()
testunit.addTest(TestStudent('changePwdBack'))
now = time.strftime('%Y-%m-%d %H_%M_%S')
filename = './' + now + '_021b_result_R.html'
fp = open(filename, 'wb')
runner = HTMLTestRunner(stream=fp, title=
'测试学生版android7.0真机(Honor8Lite)[修改密码/重置密码]测试报告by Appium',
description='自动化测试脚本运行状态:')
runner.run(testunit)
fp.close()
<|reserved_special_token_1|>
#coding=utf-8
import unittest,time,os
from time import sleep
from appium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from HTMLTestRunner import HTMLTestRunner
from appium.webdriver.common.touch_action import TouchAction
from pub_Student import login,logout
# Returns abs path relative to this file and not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
class TestStudent(unittest.TestCase):
def setUp(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '7.0'
desired_caps['automationName'] = 'UIAutomator2'
desired_caps['deviceName'] = 'PRA-AL00'
#desired_caps['udid'] = 'HMKNW17225011700'
desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')
desired_caps['appPackage'] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'
desired_caps['unicodeKeyboard'] = True
desired_caps['resetKeyboard'] = True
desired_caps['fullReset'] = True
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
sleep(3)
def tearDown(self):
# end the session
self.driver.quit()
def changePwd(self):
driver=self.driver
sleep(2)
now=time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:修改密码----开始:'+now)
login(self)
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("个人中心")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("修改密码")').click()
sleep(2)
old=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')
old.click()
old.set_value('123456')
sleep(1)
new=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')
new.click()
new.set_value('123456wxl')
sleep(1)
#com.android.gallery3d:id/head_select_right
again=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')
again.click()
again.set_value('123456wxl')
sleep(1)
driver.find_element_by_android_uiautomator('new UiSelector().text("确认")').click()
sleep(3)
driver.swipe(1000,1600,1000,1250,1000)
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("确定")').click()
sleep(2)
user=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')
user.click()
user.set_value('13923121234')
sleep(1)
pwd=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')
pwd.click()
pwd.set_value('123456wxl')
sleep(1)
driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()
sleep(4)
now=time.strftime('%Y-%m-%d %H_%M_%S')
sf0='./'+now+'_021b_relogin_R.png'
driver.get_screenshot_as_file(sf0)
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("个人中心")').click()
sleep(3)
driver.swipe(1000,1600,1000,1250,1000)
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("确定")').click()
sleep(2)
now=time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:修改密码----结束:'+now)
def changePwdBack(self):
driver=self.driver
sleep(2)
now=time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:重置密码----开始:'+now)
driver.find_element_by_android_uiautomator('new UiSelector().text("登录")').click()
sleep(2)
user=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')
user.click()
user.set_value('13923121234')
sleep(1)
pwd=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')
pwd.click()
pwd.set_value('123456wxl')
sleep(1)
driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("始终允许")').click()
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("始终允许")').click()
sleep(2)
#test now
driver.find_element_by_android_uiautomator('new UiSelector().text("开始测试")').click()
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("点击开始录音")').click()
sleep(4)
driver.find_element_by_android_uiautomator('new UiSelector().text("停止录音")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("有听到声音")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("下一步")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("下一步")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("您已完成测试")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("个人中心")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("修改密码")').click()
sleep(2)
old=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')
old.click()
old.set_value('123456wxl')
sleep(1)
new=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')
new.click()
new.set_value('123456')
sleep(1)
#com.android.gallery3d:id/head_select_right
again=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')
again.click()
again.set_value('123456')
sleep(1)
driver.find_element_by_android_uiautomator('new UiSelector().text("确认")').click()
sleep(3)
driver.swipe(1000,1600,1000,1250,1000)
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("确定")').click()
sleep(2)
now=time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:重置密码----结束:'+now)
if __name__ == '__main__':
testunit=unittest.TestSuite()
#testunit.addTest(TestStudent('changePwd'))
testunit.addTest(TestStudent('changePwdBack'))
now=time.strftime('%Y-%m-%d %H_%M_%S')
filename='./'+now+'_021b_result_R.html'
fp=open(filename,'wb')
runner=HTMLTestRunner(stream=fp,title='测试学生版android7.0真机(Honor8Lite)[修改密码/重置密码]测试报告by Appium',
description='自动化测试脚本运行状态:')
runner.run(testunit)
fp.close()
|
flexible
|
{
"blob_id": "8d7697a0e49dc9e966b9657171c66ccda57279d6",
"index": 1930,
"step-1": "<mask token>\n\n\nclass TestStudent(unittest.TestCase):\n\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '7.0'\n desired_caps['automationName'] = 'UIAutomator2'\n desired_caps['deviceName'] = 'PRA-AL00'\n desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')\n desired_caps['appPackage'\n ] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'\n desired_caps['unicodeKeyboard'] = True\n desired_caps['resetKeyboard'] = True\n desired_caps['fullReset'] = True\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',\n desired_caps)\n sleep(3)\n\n def tearDown(self):\n self.driver.quit()\n\n def changePwd(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----开始:' + now)\n login(self)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456wxl')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(4)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n sf0 = './' + now + '_021b_relogin_R.png'\n driver.get_screenshot_as_file(sf0)\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----结束:' + now)\n\n def changePwdBack(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----开始:' + now)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"登录\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"开始测试\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"点击开始录音\")').click()\n sleep(4)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"停止录音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"有听到声音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"您已完成测试\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456wxl')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----结束:' + now)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestStudent(unittest.TestCase):\n\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '7.0'\n desired_caps['automationName'] = 'UIAutomator2'\n desired_caps['deviceName'] = 'PRA-AL00'\n desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')\n desired_caps['appPackage'\n ] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'\n desired_caps['unicodeKeyboard'] = True\n desired_caps['resetKeyboard'] = True\n desired_caps['fullReset'] = True\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',\n desired_caps)\n sleep(3)\n\n def tearDown(self):\n self.driver.quit()\n\n def changePwd(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----开始:' + now)\n login(self)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456wxl')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(4)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n sf0 = './' + now + '_021b_relogin_R.png'\n driver.get_screenshot_as_file(sf0)\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----结束:' + now)\n\n def changePwdBack(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----开始:' + now)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"登录\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"开始测试\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"点击开始录音\")').click()\n sleep(4)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"停止录音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"有听到声音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"您已完成测试\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456wxl')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----结束:' + now)\n\n\nif __name__ == '__main__':\n testunit = unittest.TestSuite()\n testunit.addTest(TestStudent('changePwdBack'))\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n filename = './' + now + '_021b_result_R.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner(stream=fp, title=\n '测试学生版android7.0真机(Honor8Lite)[修改密码/重置密码]测试报告by Appium',\n description='自动化测试脚本运行状态:')\n runner.run(testunit)\n fp.close()\n",
"step-3": "<mask token>\nPATH = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))\n\n\nclass TestStudent(unittest.TestCase):\n\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '7.0'\n desired_caps['automationName'] = 'UIAutomator2'\n desired_caps['deviceName'] = 'PRA-AL00'\n desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')\n desired_caps['appPackage'\n ] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'\n desired_caps['unicodeKeyboard'] = True\n desired_caps['resetKeyboard'] = True\n desired_caps['fullReset'] = True\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',\n desired_caps)\n sleep(3)\n\n def tearDown(self):\n self.driver.quit()\n\n def changePwd(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----开始:' + now)\n login(self)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456wxl')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(4)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n sf0 = './' + now + '_021b_relogin_R.png'\n driver.get_screenshot_as_file(sf0)\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----结束:' + now)\n\n def changePwdBack(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----开始:' + now)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"登录\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"开始测试\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"点击开始录音\")').click()\n sleep(4)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"停止录音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"有听到声音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"您已完成测试\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456wxl')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----结束:' + now)\n\n\nif __name__ == '__main__':\n testunit = unittest.TestSuite()\n testunit.addTest(TestStudent('changePwdBack'))\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n filename = './' + now + '_021b_result_R.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner(stream=fp, title=\n '测试学生版android7.0真机(Honor8Lite)[修改密码/重置密码]测试报告by Appium',\n description='自动化测试脚本运行状态:')\n runner.run(testunit)\n fp.close()\n",
"step-4": "import unittest, time, os\nfrom time import sleep\nfrom appium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom HTMLTestRunner import HTMLTestRunner\nfrom appium.webdriver.common.touch_action import TouchAction\nfrom pub_Student import login, logout\nPATH = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))\n\n\nclass TestStudent(unittest.TestCase):\n\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '7.0'\n desired_caps['automationName'] = 'UIAutomator2'\n desired_caps['deviceName'] = 'PRA-AL00'\n desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')\n desired_caps['appPackage'\n ] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'\n desired_caps['unicodeKeyboard'] = True\n desired_caps['resetKeyboard'] = True\n desired_caps['fullReset'] = True\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',\n desired_caps)\n sleep(3)\n\n def tearDown(self):\n self.driver.quit()\n\n def changePwd(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----开始:' + now)\n login(self)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456wxl')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(4)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n sf0 = './' + now + '_021b_relogin_R.png'\n driver.get_screenshot_as_file(sf0)\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----结束:' + now)\n\n def changePwdBack(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----开始:' + now)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"登录\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"开始测试\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"点击开始录音\")').click()\n sleep(4)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"停止录音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"有听到声音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"您已完成测试\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456wxl')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----结束:' + now)\n\n\nif __name__ == '__main__':\n testunit = unittest.TestSuite()\n testunit.addTest(TestStudent('changePwdBack'))\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n filename = './' + now + '_021b_result_R.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner(stream=fp, title=\n '测试学生版android7.0真机(Honor8Lite)[修改密码/重置密码]测试报告by Appium',\n description='自动化测试脚本运行状态:')\n runner.run(testunit)\n fp.close()\n",
"step-5": "#coding=utf-8\nimport unittest,time,os\nfrom time import sleep\nfrom appium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom HTMLTestRunner import HTMLTestRunner\nfrom appium.webdriver.common.touch_action import TouchAction\nfrom pub_Student import login,logout\n\n# Returns abs path relative to this file and not cwd\nPATH = lambda p: os.path.abspath(\n os.path.join(os.path.dirname(__file__), p)\n)\n\n\nclass TestStudent(unittest.TestCase):\n\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '7.0'\n desired_caps['automationName'] = 'UIAutomator2'\n desired_caps['deviceName'] = 'PRA-AL00'\n #desired_caps['udid'] = 'HMKNW17225011700'\n desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')\n desired_caps['appPackage'] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'\n desired_caps['unicodeKeyboard'] = True\n desired_caps['resetKeyboard'] = True\n desired_caps['fullReset'] = True\n\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)\n sleep(3)\n\n def tearDown(self):\n # end the session\n self.driver.quit()\n \n def changePwd(self):\n driver=self.driver\n sleep(2)\n now=time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----开始:'+now)\n login(self)\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456')\n sleep(1)\n new=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456wxl')\n sleep(1)\n #com.android.gallery3d:id/head_select_right\n again=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000,1600,1000,1250,1000)\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"确定\")').click()\n sleep(2)\n user=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(4)\n now=time.strftime('%Y-%m-%d %H_%M_%S')\n sf0='./'+now+'_021b_relogin_R.png'\n driver.get_screenshot_as_file(sf0)\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.swipe(1000,1600,1000,1250,1000)\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"确定\")').click()\n sleep(2)\n now=time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----结束:'+now)\n\n def changePwdBack(self):\n driver=self.driver\n sleep(2)\n now=time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----开始:'+now)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"登录\")').click()\n sleep(2)\n user=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n #test now\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"开始测试\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"点击开始录音\")').click()\n sleep(4)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"停止录音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"有听到声音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"您已完成测试\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456wxl')\n sleep(1)\n new=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456')\n sleep(1)\n #com.android.gallery3d:id/head_select_right\n again=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456')\n sleep(1)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000,1600,1000,1250,1000)\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"确定\")').click()\n sleep(2)\n now=time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----结束:'+now)\n\nif __name__ == '__main__':\n testunit=unittest.TestSuite()\n #testunit.addTest(TestStudent('changePwd'))\n testunit.addTest(TestStudent('changePwdBack'))\n now=time.strftime('%Y-%m-%d %H_%M_%S')\n filename='./'+now+'_021b_result_R.html'\n fp=open(filename,'wb')\n runner=HTMLTestRunner(stream=fp,title='测试学生版android7.0真机(Honor8Lite)[修改密码/重置密码]测试报告by Appium',\n description='自动化测试脚本运行状态:')\n runner.run(testunit)\n fp.close()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
class BruteForceSolution:
<|reserved_special_token_0|>
class Solution:
def smallerNumbersThanCurrent(self, nums):
answer = []
sortedNums = sorted(nums)
for num in nums:
answer.append(sortedNums.index(num))
return answer
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class BruteForceSolution:
def smallerNumbersThanCurrent(self, nums):
answer = []
for num in nums:
counter = 0
for i in range(len(nums)):
if nums[i] < num:
counter += 1
answer.append(counter)
return answer
class Solution:
def smallerNumbersThanCurrent(self, nums):
answer = []
sortedNums = sorted(nums)
for num in nums:
answer.append(sortedNums.index(num))
return answer
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class BruteForceSolution:
def smallerNumbersThanCurrent(self, nums):
answer = []
for num in nums:
counter = 0
for i in range(len(nums)):
if nums[i] < num:
counter += 1
answer.append(counter)
return answer
class Solution:
def smallerNumbersThanCurrent(self, nums):
answer = []
sortedNums = sorted(nums)
for num in nums:
answer.append(sortedNums.index(num))
return answer
<|reserved_special_token_0|>
print(example.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))
print(exampleTwo.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))
<|reserved_special_token_1|>
class BruteForceSolution:
def smallerNumbersThanCurrent(self, nums):
answer = []
for num in nums:
counter = 0
for i in range(len(nums)):
if nums[i] < num:
counter += 1
answer.append(counter)
return answer
class Solution:
def smallerNumbersThanCurrent(self, nums):
answer = []
sortedNums = sorted(nums)
for num in nums:
answer.append(sortedNums.index(num))
return answer
example = BruteForceSolution()
exampleTwo = Solution()
print(example.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))
print(exampleTwo.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))
<|reserved_special_token_1|>
# https://leetcode.com/problems/how-many-numbers-are-smaller-than-the-current-number/
# BruteForce
class BruteForceSolution:
def smallerNumbersThanCurrent(self, nums):
answer = []
for num in nums:
counter = 0
for i in range(len(nums)):
if nums[i] < num:
counter += 1
answer.append(counter)
return answer
class Solution:
def smallerNumbersThanCurrent(self, nums):
answer = []
sortedNums = sorted(nums)
for num in nums:
answer.append(sortedNums.index(num))
return answer
example = BruteForceSolution()
exampleTwo = Solution()
print(example.smallerNumbersThanCurrent([8,1,2,2,3]))
print(exampleTwo.smallerNumbersThanCurrent([8,1,2,2,3]))
|
flexible
|
{
"blob_id": "58e023c3c453d1e190fdb5bc457358f42d1bd93f",
"index": 397,
"step-1": "class BruteForceSolution:\n <mask token>\n\n\nclass Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n sortedNums = sorted(nums)\n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n\n\n<mask token>\n",
"step-2": "class BruteForceSolution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n for num in nums:\n counter = 0\n for i in range(len(nums)):\n if nums[i] < num:\n counter += 1\n answer.append(counter)\n return answer\n\n\nclass Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n sortedNums = sorted(nums)\n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n\n\n<mask token>\n",
"step-3": "class BruteForceSolution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n for num in nums:\n counter = 0\n for i in range(len(nums)):\n if nums[i] < num:\n counter += 1\n answer.append(counter)\n return answer\n\n\nclass Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n sortedNums = sorted(nums)\n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n\n\n<mask token>\nprint(example.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))\nprint(exampleTwo.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))\n",
"step-4": "class BruteForceSolution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n for num in nums:\n counter = 0\n for i in range(len(nums)):\n if nums[i] < num:\n counter += 1\n answer.append(counter)\n return answer\n\n\nclass Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n sortedNums = sorted(nums)\n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n\n\nexample = BruteForceSolution()\nexampleTwo = Solution()\nprint(example.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))\nprint(exampleTwo.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))\n",
"step-5": "# https://leetcode.com/problems/how-many-numbers-are-smaller-than-the-current-number/\n\n# BruteForce\n\nclass BruteForceSolution:\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n \n \n for num in nums:\n counter = 0\n for i in range(len(nums)):\n if nums[i] < num:\n counter += 1\n answer.append(counter)\n \n return answer\n\nclass Solution:\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n \n sortedNums = sorted(nums)\n \n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n \n \n \n \n \n \nexample = BruteForceSolution()\nexampleTwo = Solution()\n\n\nprint(example.smallerNumbersThanCurrent([8,1,2,2,3]))\n\nprint(exampleTwo.smallerNumbersThanCurrent([8,1,2,2,3]))\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('loading data...')
with open('movienumbers.pickle', 'rb') as input_file:
movienumbers = pickle.load(input_file)
with open('ratings.pickle', 'rb') as input_file:
ratings = pickle.load(input_file)
with open('userratings.pickle', 'rb') as input_file:
userratings = pickle.load(input_file)
with open('metaratings.pickle', 'rb') as input_file:
metaratings = pickle.load(input_file)
print('Pickled data successfully loaded.')
<|reserved_special_token_0|>
for movieid, reviews in userratings.items():
score = 0
for eachreviews in reviews:
score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[
'compound']
average = score / len(reviews)
userscore[movieid] = average
print(userscore)
<|reserved_special_token_0|>
for movieid, reviews in metaratings.items():
score_1 = 0
for eachreviews in reviews:
score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[
'compound']
average = score_1 / len(reviews)
criticsscore[movieid] = average
print(criticsscore)
<|reserved_special_token_0|>
for movieid, score in userscore.items():
if movieid in criticsscore and criticsscore[movieid] > score:
counter += 1
else:
counter += 0
print('Critics overpraise these movies ' + str(counter) +
' times more than normal viewers out of ' + str(len(criticsscore)) +
' movies in total.')
if counter < len(criticsscore) - counter:
print(
'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'
)
else:
print(
'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'
)
<|reserved_special_token_0|>
for movieid, score in criticsscore.items():
if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score -
ratings[movieid] / 10):
useriscloser += 1
else:
criticiscloser += 1
print('Critics are more closer to the ratings for ' + str(criticiscloser) +
' times, while normal viewers are closer ' + str(useriscloser) +
' times out of ' + str(len(criticsscore)) + ' movies in total.')
if useriscloser > criticiscloser:
print(
'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'
)
else:
print(
'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('loading data...')
with open('movienumbers.pickle', 'rb') as input_file:
movienumbers = pickle.load(input_file)
with open('ratings.pickle', 'rb') as input_file:
ratings = pickle.load(input_file)
with open('userratings.pickle', 'rb') as input_file:
userratings = pickle.load(input_file)
with open('metaratings.pickle', 'rb') as input_file:
metaratings = pickle.load(input_file)
print('Pickled data successfully loaded.')
<|reserved_special_token_0|>
userscore = {}
for movieid, reviews in userratings.items():
score = 0
for eachreviews in reviews:
score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[
'compound']
average = score / len(reviews)
userscore[movieid] = average
print(userscore)
criticsscore = {}
for movieid, reviews in metaratings.items():
score_1 = 0
for eachreviews in reviews:
score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[
'compound']
average = score_1 / len(reviews)
criticsscore[movieid] = average
print(criticsscore)
counter = 0
for movieid, score in userscore.items():
if movieid in criticsscore and criticsscore[movieid] > score:
counter += 1
else:
counter += 0
print('Critics overpraise these movies ' + str(counter) +
' times more than normal viewers out of ' + str(len(criticsscore)) +
' movies in total.')
if counter < len(criticsscore) - counter:
print(
'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'
)
else:
print(
'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'
)
useriscloser = 0
criticiscloser = 0
for movieid, score in criticsscore.items():
if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score -
ratings[movieid] / 10):
useriscloser += 1
else:
criticiscloser += 1
print('Critics are more closer to the ratings for ' + str(criticiscloser) +
' times, while normal viewers are closer ' + str(useriscloser) +
' times out of ' + str(len(criticsscore)) + ' movies in total.')
if useriscloser > criticiscloser:
print(
'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'
)
else:
print(
'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'
)
<|reserved_special_token_1|>
import pickle
print('loading data...')
with open('movienumbers.pickle', 'rb') as input_file:
movienumbers = pickle.load(input_file)
with open('ratings.pickle', 'rb') as input_file:
ratings = pickle.load(input_file)
with open('userratings.pickle', 'rb') as input_file:
userratings = pickle.load(input_file)
with open('metaratings.pickle', 'rb') as input_file:
metaratings = pickle.load(input_file)
print('Pickled data successfully loaded.')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
userscore = {}
for movieid, reviews in userratings.items():
score = 0
for eachreviews in reviews:
score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[
'compound']
average = score / len(reviews)
userscore[movieid] = average
print(userscore)
criticsscore = {}
for movieid, reviews in metaratings.items():
score_1 = 0
for eachreviews in reviews:
score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[
'compound']
average = score_1 / len(reviews)
criticsscore[movieid] = average
print(criticsscore)
counter = 0
for movieid, score in userscore.items():
if movieid in criticsscore and criticsscore[movieid] > score:
counter += 1
else:
counter += 0
print('Critics overpraise these movies ' + str(counter) +
' times more than normal viewers out of ' + str(len(criticsscore)) +
' movies in total.')
if counter < len(criticsscore) - counter:
print(
'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'
)
else:
print(
'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'
)
useriscloser = 0
criticiscloser = 0
for movieid, score in criticsscore.items():
if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score -
ratings[movieid] / 10):
useriscloser += 1
else:
criticiscloser += 1
print('Critics are more closer to the ratings for ' + str(criticiscloser) +
' times, while normal viewers are closer ' + str(useriscloser) +
' times out of ' + str(len(criticsscore)) + ' movies in total.')
if useriscloser > criticiscloser:
print(
'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'
)
else:
print(
'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'
)
<|reserved_special_token_1|>
# The actual code begins here
# This file is intended to load everything downloaded from loaddata.py, preventing user getting banned from IMDB
# The code is written to see what are some key words of the reviews from critics and normal viewers
# And to see what are some of the differences
# The second task is to asses the people's emotion vs. actual score given
# First, we need to load back everything we dumped to folder via pickle.
import pickle
print('loading data...')
with open('movienumbers.pickle','rb') as input_file:
movienumbers = pickle.load(input_file)
with open('ratings.pickle','rb') as input_file:
ratings = pickle.load(input_file)
with open('userratings.pickle','rb') as input_file:
userratings = pickle.load(input_file)
with open('metaratings.pickle','rb') as input_file:
metaratings = pickle.load(input_file)
print('Pickled data successfully loaded.')
# then, it's time to use nltp to see the score of the critics vs. viewers on movies
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# print(movienumbers)
# print(ratings)
# print(userratings)
# print(metaratings)
# Userratings is a dictionary in ways like this "ttxxxxxx : [reviews1, reviews2,...]"
# print(userratings['tt0111161'])
#
# print(metaratings['tt0111161'])
# print(ratings['tt0111161'])
userscore = {}
for movieid, reviews in userratings.items():
score = 0
for eachreviews in reviews:
score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound']
average = score / len(reviews)
userscore[movieid] = average
print(userscore)
# Meta ratings is a dictionary in ways like this "ttxxxxxx : [reviews1, reviews2,...]"
criticsscore = {}
for movieid, reviews in metaratings.items():
score_1 = 0
for eachreviews in reviews:
score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound']
average = score_1 / len(reviews)
criticsscore[movieid] = average
print(criticsscore)
# Question 1: Are critics always more positive than the audience?
counter = 0
for movieid, score in userscore.items():
if movieid in criticsscore and criticsscore[movieid] > score:
counter += 1
else:
counter += 0
# Displaying results to question 1
print("Critics overpraise these movies " + str(counter) + " times more than normal viewers out of "
+ str(len(criticsscore)) + " movies in total.")
if counter < (len(criticsscore) - counter):
print("Because the critics overpraise less than half of the movies sampled here, the critics are more refrained "
"than the users on IMDb.")
else:
print("Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained "
"than the users on IMDb.")
# Question 2: Is the IMDB score closer to the users' sentiment? Or the critics.
useriscloser = 0
criticiscloser = 0
for movieid, score in criticsscore.items():
if abs(userscore[movieid] - (ratings[movieid])/10) > abs(score - (ratings[movieid]/10)):
useriscloser += 1
else:
criticiscloser += 1
# Displaying results to question 2
print("Critics are more closer to the ratings for " + str(criticiscloser) +
" times, while normal viewers are closer " + str(useriscloser) + " times out of " +
str(len(criticsscore)) + " movies in total.")
if useriscloser > criticiscloser:
print("Because the more movies have users resembling closer to the rating, the critics are less accurate "
"than the users on IMDb.")
else:
print("Because the more movies have critics resembling closer to the rating, the users are less accurate "
"than the users on IMDb.")
|
flexible
|
{
"blob_id": "1f69cf5f6d15048e6ead37b5da836c9e2f783f74",
"index": 803,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('loading data...')\nwith open('movienumbers.pickle', 'rb') as input_file:\n movienumbers = pickle.load(input_file)\nwith open('ratings.pickle', 'rb') as input_file:\n ratings = pickle.load(input_file)\nwith open('userratings.pickle', 'rb') as input_file:\n userratings = pickle.load(input_file)\nwith open('metaratings.pickle', 'rb') as input_file:\n metaratings = pickle.load(input_file)\nprint('Pickled data successfully loaded.')\n<mask token>\nfor movieid, reviews in userratings.items():\n score = 0\n for eachreviews in reviews:\n score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score / len(reviews)\n userscore[movieid] = average\nprint(userscore)\n<mask token>\nfor movieid, reviews in metaratings.items():\n score_1 = 0\n for eachreviews in reviews:\n score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score_1 / len(reviews)\n criticsscore[movieid] = average\nprint(criticsscore)\n<mask token>\nfor movieid, score in userscore.items():\n if movieid in criticsscore and criticsscore[movieid] > score:\n counter += 1\n else:\n counter += 0\nprint('Critics overpraise these movies ' + str(counter) +\n ' times more than normal viewers out of ' + str(len(criticsscore)) +\n ' movies in total.')\nif counter < len(criticsscore) - counter:\n print(\n 'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'\n )\nelse:\n print(\n 'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'\n )\n<mask token>\nfor movieid, score in criticsscore.items():\n if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score - \n ratings[movieid] / 10):\n useriscloser += 1\n else:\n criticiscloser += 1\nprint('Critics are more closer to the ratings for ' + str(criticiscloser) +\n ' times, while normal viewers are closer ' + str(useriscloser) +\n ' times out of ' + str(len(criticsscore)) + ' movies in total.')\nif useriscloser > criticiscloser:\n print(\n 'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'\n )\nelse:\n print(\n 'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'\n )\n",
"step-3": "<mask token>\nprint('loading data...')\nwith open('movienumbers.pickle', 'rb') as input_file:\n movienumbers = pickle.load(input_file)\nwith open('ratings.pickle', 'rb') as input_file:\n ratings = pickle.load(input_file)\nwith open('userratings.pickle', 'rb') as input_file:\n userratings = pickle.load(input_file)\nwith open('metaratings.pickle', 'rb') as input_file:\n metaratings = pickle.load(input_file)\nprint('Pickled data successfully loaded.')\n<mask token>\nuserscore = {}\nfor movieid, reviews in userratings.items():\n score = 0\n for eachreviews in reviews:\n score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score / len(reviews)\n userscore[movieid] = average\nprint(userscore)\ncriticsscore = {}\nfor movieid, reviews in metaratings.items():\n score_1 = 0\n for eachreviews in reviews:\n score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score_1 / len(reviews)\n criticsscore[movieid] = average\nprint(criticsscore)\ncounter = 0\nfor movieid, score in userscore.items():\n if movieid in criticsscore and criticsscore[movieid] > score:\n counter += 1\n else:\n counter += 0\nprint('Critics overpraise these movies ' + str(counter) +\n ' times more than normal viewers out of ' + str(len(criticsscore)) +\n ' movies in total.')\nif counter < len(criticsscore) - counter:\n print(\n 'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'\n )\nelse:\n print(\n 'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'\n )\nuseriscloser = 0\ncriticiscloser = 0\nfor movieid, score in criticsscore.items():\n if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score - \n ratings[movieid] / 10):\n useriscloser += 1\n else:\n criticiscloser += 1\nprint('Critics are more closer to the ratings for ' + str(criticiscloser) +\n ' times, while normal viewers are closer ' + str(useriscloser) +\n ' times out of ' + str(len(criticsscore)) + ' movies in total.')\nif useriscloser > criticiscloser:\n print(\n 'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'\n )\nelse:\n print(\n 'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'\n )\n",
"step-4": "import pickle\nprint('loading data...')\nwith open('movienumbers.pickle', 'rb') as input_file:\n movienumbers = pickle.load(input_file)\nwith open('ratings.pickle', 'rb') as input_file:\n ratings = pickle.load(input_file)\nwith open('userratings.pickle', 'rb') as input_file:\n userratings = pickle.load(input_file)\nwith open('metaratings.pickle', 'rb') as input_file:\n metaratings = pickle.load(input_file)\nprint('Pickled data successfully loaded.')\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nuserscore = {}\nfor movieid, reviews in userratings.items():\n score = 0\n for eachreviews in reviews:\n score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score / len(reviews)\n userscore[movieid] = average\nprint(userscore)\ncriticsscore = {}\nfor movieid, reviews in metaratings.items():\n score_1 = 0\n for eachreviews in reviews:\n score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score_1 / len(reviews)\n criticsscore[movieid] = average\nprint(criticsscore)\ncounter = 0\nfor movieid, score in userscore.items():\n if movieid in criticsscore and criticsscore[movieid] > score:\n counter += 1\n else:\n counter += 0\nprint('Critics overpraise these movies ' + str(counter) +\n ' times more than normal viewers out of ' + str(len(criticsscore)) +\n ' movies in total.')\nif counter < len(criticsscore) - counter:\n print(\n 'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'\n )\nelse:\n print(\n 'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'\n )\nuseriscloser = 0\ncriticiscloser = 0\nfor movieid, score in criticsscore.items():\n if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score - \n ratings[movieid] / 10):\n useriscloser += 1\n else:\n criticiscloser += 1\nprint('Critics are more closer to the ratings for ' + str(criticiscloser) +\n ' times, while normal viewers are closer ' + str(useriscloser) +\n ' times out of ' + str(len(criticsscore)) + ' movies in total.')\nif useriscloser > criticiscloser:\n print(\n 'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'\n )\nelse:\n print(\n 'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'\n )\n",
"step-5": "# The actual code begins here\n# This file is intended to load everything downloaded from loaddata.py, preventing user getting banned from IMDB\n# The code is written to see what are some key words of the reviews from critics and normal viewers\n# And to see what are some of the differences\n# The second task is to asses the people's emotion vs. actual score given\n\n# First, we need to load back everything we dumped to folder via pickle.\n\nimport pickle\nprint('loading data...')\n\nwith open('movienumbers.pickle','rb') as input_file:\n movienumbers = pickle.load(input_file)\n\nwith open('ratings.pickle','rb') as input_file:\n ratings = pickle.load(input_file)\n\nwith open('userratings.pickle','rb') as input_file:\n userratings = pickle.load(input_file)\n\nwith open('metaratings.pickle','rb') as input_file:\n metaratings = pickle.load(input_file)\n\nprint('Pickled data successfully loaded.')\n\n# then, it's time to use nltp to see the score of the critics vs. viewers on movies\n\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\n# print(movienumbers)\n# print(ratings)\n# print(userratings)\n# print(metaratings)\n\n# Userratings is a dictionary in ways like this \"ttxxxxxx : [reviews1, reviews2,...]\"\n\n# print(userratings['tt0111161'])\n#\n# print(metaratings['tt0111161'])\n# print(ratings['tt0111161'])\n\nuserscore = {}\nfor movieid, reviews in userratings.items():\n score = 0\n for eachreviews in reviews:\n score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound']\n average = score / len(reviews)\n userscore[movieid] = average\n\nprint(userscore)\n\n# Meta ratings is a dictionary in ways like this \"ttxxxxxx : [reviews1, reviews2,...]\"\n\n\n\ncriticsscore = {}\nfor movieid, reviews in metaratings.items():\n score_1 = 0\n for eachreviews in reviews:\n score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound']\n average = score_1 / len(reviews)\n criticsscore[movieid] = average\n\nprint(criticsscore)\n\n\n# Question 1: Are critics always more positive than the audience?\n\ncounter = 0\nfor movieid, score in userscore.items():\n if movieid in criticsscore and criticsscore[movieid] > score:\n counter += 1\n else:\n counter += 0\n\n# Displaying results to question 1\nprint(\"Critics overpraise these movies \" + str(counter) + \" times more than normal viewers out of \"\n + str(len(criticsscore)) + \" movies in total.\")\nif counter < (len(criticsscore) - counter):\n print(\"Because the critics overpraise less than half of the movies sampled here, the critics are more refrained \"\n \"than the users on IMDb.\")\nelse:\n print(\"Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained \"\n \"than the users on IMDb.\")\n\n# Question 2: Is the IMDB score closer to the users' sentiment? Or the critics.\n\nuseriscloser = 0\ncriticiscloser = 0\nfor movieid, score in criticsscore.items():\n if abs(userscore[movieid] - (ratings[movieid])/10) > abs(score - (ratings[movieid]/10)):\n useriscloser += 1\n else:\n criticiscloser += 1\n\n# Displaying results to question 2\nprint(\"Critics are more closer to the ratings for \" + str(criticiscloser) +\n \" times, while normal viewers are closer \" + str(useriscloser) + \" times out of \" +\n str(len(criticsscore)) + \" movies in total.\")\n\nif useriscloser > criticiscloser:\n print(\"Because the more movies have users resembling closer to the rating, the critics are less accurate \"\n \"than the users on IMDb.\")\nelse:\n print(\"Because the more movies have critics resembling closer to the rating, the users are less accurate \"\n \"than the users on IMDb.\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('../..')
<|reserved_special_token_0|>
for epoch in range(60):
batch_count = 0
for i in range(len(X)):
feature = np.mat(X.values[i]).reshape(img_shape)
label = np.mat(one_hot_label[i]).T
x.set_value(feature)
one_hot.set_value(label)
optimizer.one_step()
batch_count += 1
if batch_count >= batch_size:
print('epoch: {:d}, iteration: {:d}, loss: {:.3f}'.format(epoch +
1, i + 1, loss.value[0, 0]))
optimizer.update()
batch_count = 0
pred = []
for i in range(len(X)):
feature = np.mat(X[i]).reshape(img_shape)
x.set_value(feature)
predict.forward()
pred.append(predict.value.A.ravel())
pred = np.array(pred).argmax(axis=1)
accuracy = (y == pred).astype(np.int).sum() / len(X)
print('epoch: {:d}, accuracy: {:.3f}'.format(epoch + 1, accuracy))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('../..')
<|reserved_special_token_0|>
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
X, y = X[:1000] / 255, y.astype(np.int)[:1000]
oh = OneHotEncoder(sparse=False)
one_hot_label = oh.fit_transform(y.values.reshape(-1, 1))
img_shape = 28, 28
x = ms.core.Variable(img_shape, init=False, trainable=False)
one_hot = ms.core.Variable(dim=(10, 1), init=False, trainable=False)
conv1 = ms.layer.conv([x], img_shape, 3, (5, 5), 'ReLU')
pooling1 = ms.layer.pooling(conv1, (3, 3), (2, 2))
conv2 = ms.layer.conv(pooling1, (14, 14), 3, (3, 3), 'ReLU')
pooling2 = ms.layer.pooling(conv2, (3, 3), (2, 2))
fc1 = ms.layer.fc(ms.ops.Concat(*pooling2), 147, 120, 'ReLU')
output = ms.layer.fc(fc1, 120, 10, 'None')
predict = ms.ops.SoftMax(output)
loss = ms.ops.loss.CrossEntropyWithSoftMax(output, one_hot)
learning_rate = 0.005
optimizer = ms.optimizer.Adam(ms.default_graph, loss, learning_rate)
batch_size = 32
for epoch in range(60):
batch_count = 0
for i in range(len(X)):
feature = np.mat(X.values[i]).reshape(img_shape)
label = np.mat(one_hot_label[i]).T
x.set_value(feature)
one_hot.set_value(label)
optimizer.one_step()
batch_count += 1
if batch_count >= batch_size:
print('epoch: {:d}, iteration: {:d}, loss: {:.3f}'.format(epoch +
1, i + 1, loss.value[0, 0]))
optimizer.update()
batch_count = 0
pred = []
for i in range(len(X)):
feature = np.mat(X[i]).reshape(img_shape)
x.set_value(feature)
predict.forward()
pred.append(predict.value.A.ravel())
pred = np.array(pred).argmax(axis=1)
accuracy = (y == pred).astype(np.int).sum() / len(X)
print('epoch: {:d}, accuracy: {:.3f}'.format(epoch + 1, accuracy))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
sys.path.append('../..')
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.preprocessing import OneHotEncoder
import matrixslow as ms
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
X, y = X[:1000] / 255, y.astype(np.int)[:1000]
oh = OneHotEncoder(sparse=False)
one_hot_label = oh.fit_transform(y.values.reshape(-1, 1))
img_shape = 28, 28
x = ms.core.Variable(img_shape, init=False, trainable=False)
one_hot = ms.core.Variable(dim=(10, 1), init=False, trainable=False)
conv1 = ms.layer.conv([x], img_shape, 3, (5, 5), 'ReLU')
pooling1 = ms.layer.pooling(conv1, (3, 3), (2, 2))
conv2 = ms.layer.conv(pooling1, (14, 14), 3, (3, 3), 'ReLU')
pooling2 = ms.layer.pooling(conv2, (3, 3), (2, 2))
fc1 = ms.layer.fc(ms.ops.Concat(*pooling2), 147, 120, 'ReLU')
output = ms.layer.fc(fc1, 120, 10, 'None')
predict = ms.ops.SoftMax(output)
loss = ms.ops.loss.CrossEntropyWithSoftMax(output, one_hot)
learning_rate = 0.005
optimizer = ms.optimizer.Adam(ms.default_graph, loss, learning_rate)
batch_size = 32
for epoch in range(60):
batch_count = 0
for i in range(len(X)):
feature = np.mat(X.values[i]).reshape(img_shape)
label = np.mat(one_hot_label[i]).T
x.set_value(feature)
one_hot.set_value(label)
optimizer.one_step()
batch_count += 1
if batch_count >= batch_size:
print('epoch: {:d}, iteration: {:d}, loss: {:.3f}'.format(epoch +
1, i + 1, loss.value[0, 0]))
optimizer.update()
batch_count = 0
pred = []
for i in range(len(X)):
feature = np.mat(X[i]).reshape(img_shape)
x.set_value(feature)
predict.forward()
pred.append(predict.value.A.ravel())
pred = np.array(pred).argmax(axis=1)
accuracy = (y == pred).astype(np.int).sum() / len(X)
print('epoch: {:d}, accuracy: {:.3f}'.format(epoch + 1, accuracy))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 12:16:15 2020
@author: zhangjuefei
"""
import sys
sys.path.append('../..')
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.preprocessing import OneHotEncoder
import matrixslow as ms
# 加载MNIST数据集,取一部分样本并归一化
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
X, y = X[:1000] / 255, y.astype(np.int)[:1000]
# 将整数形式的标签转换成One-Hot编码
oh = OneHotEncoder(sparse=False)
one_hot_label = oh.fit_transform(y.values.reshape(-1, 1))
# 输入图像尺寸
img_shape = (28, 28)
# 输入图像
x = ms.core.Variable(img_shape, init=False, trainable=False)
# One-Hot标签
one_hot = ms.core.Variable(dim=(10, 1), init=False, trainable=False)
# 第一卷积层
conv1 = ms.layer.conv([x], img_shape, 3, (5, 5), "ReLU")
# 第一池化层
pooling1 = ms.layer.pooling(conv1, (3, 3), (2, 2))
# 第二卷积层
conv2 = ms.layer.conv(pooling1, (14, 14), 3, (3, 3), "ReLU")
# 第二池化层
pooling2 = ms.layer.pooling(conv2, (3, 3), (2, 2))
# 全连接层
fc1 = ms.layer.fc(ms.ops.Concat(*pooling2), 147, 120, "ReLU")
# 输出层
output = ms.layer.fc(fc1, 120, 10, "None")
# 分类概率
predict = ms.ops.SoftMax(output)
# 交叉熵损失
loss = ms.ops.loss.CrossEntropyWithSoftMax(output, one_hot)
# 学习率
learning_rate = 0.005
# 优化器
optimizer = ms.optimizer.Adam(ms.default_graph, loss, learning_rate)
# 批大小
batch_size = 32
# 训练
for epoch in range(60):
batch_count = 0
for i in range(len(X)):
feature = np.mat(X.values[i]).reshape(img_shape)
label = np.mat(one_hot_label[i]).T
x.set_value(feature)
one_hot.set_value(label)
optimizer.one_step()
batch_count += 1
if batch_count >= batch_size:
print("epoch: {:d}, iteration: {:d}, loss: {:.3f}".format(epoch + 1, i + 1, loss.value[0, 0]))
optimizer.update()
batch_count = 0
pred = []
for i in range(len(X)):
feature = np.mat(X[i]).reshape(img_shape)
x.set_value(feature)
predict.forward()
pred.append(predict.value.A.ravel())
pred = np.array(pred).argmax(axis=1)
accuracy = (y == pred).astype(np.int).sum() / len(X)
print("epoch: {:d}, accuracy: {:.3f}".format(epoch + 1, accuracy))
|
flexible
|
{
"blob_id": "63f155f7da958e9b6865007c701f7cf986b0cbac",
"index": 7800,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append('../..')\n<mask token>\nfor epoch in range(60):\n batch_count = 0\n for i in range(len(X)):\n feature = np.mat(X.values[i]).reshape(img_shape)\n label = np.mat(one_hot_label[i]).T\n x.set_value(feature)\n one_hot.set_value(label)\n optimizer.one_step()\n batch_count += 1\n if batch_count >= batch_size:\n print('epoch: {:d}, iteration: {:d}, loss: {:.3f}'.format(epoch +\n 1, i + 1, loss.value[0, 0]))\n optimizer.update()\n batch_count = 0\n pred = []\n for i in range(len(X)):\n feature = np.mat(X[i]).reshape(img_shape)\n x.set_value(feature)\n predict.forward()\n pred.append(predict.value.A.ravel())\n pred = np.array(pred).argmax(axis=1)\n accuracy = (y == pred).astype(np.int).sum() / len(X)\n print('epoch: {:d}, accuracy: {:.3f}'.format(epoch + 1, accuracy))\n",
"step-3": "<mask token>\nsys.path.append('../..')\n<mask token>\nX, y = fetch_openml('mnist_784', version=1, return_X_y=True)\nX, y = X[:1000] / 255, y.astype(np.int)[:1000]\noh = OneHotEncoder(sparse=False)\none_hot_label = oh.fit_transform(y.values.reshape(-1, 1))\nimg_shape = 28, 28\nx = ms.core.Variable(img_shape, init=False, trainable=False)\none_hot = ms.core.Variable(dim=(10, 1), init=False, trainable=False)\nconv1 = ms.layer.conv([x], img_shape, 3, (5, 5), 'ReLU')\npooling1 = ms.layer.pooling(conv1, (3, 3), (2, 2))\nconv2 = ms.layer.conv(pooling1, (14, 14), 3, (3, 3), 'ReLU')\npooling2 = ms.layer.pooling(conv2, (3, 3), (2, 2))\nfc1 = ms.layer.fc(ms.ops.Concat(*pooling2), 147, 120, 'ReLU')\noutput = ms.layer.fc(fc1, 120, 10, 'None')\npredict = ms.ops.SoftMax(output)\nloss = ms.ops.loss.CrossEntropyWithSoftMax(output, one_hot)\nlearning_rate = 0.005\noptimizer = ms.optimizer.Adam(ms.default_graph, loss, learning_rate)\nbatch_size = 32\nfor epoch in range(60):\n batch_count = 0\n for i in range(len(X)):\n feature = np.mat(X.values[i]).reshape(img_shape)\n label = np.mat(one_hot_label[i]).T\n x.set_value(feature)\n one_hot.set_value(label)\n optimizer.one_step()\n batch_count += 1\n if batch_count >= batch_size:\n print('epoch: {:d}, iteration: {:d}, loss: {:.3f}'.format(epoch +\n 1, i + 1, loss.value[0, 0]))\n optimizer.update()\n batch_count = 0\n pred = []\n for i in range(len(X)):\n feature = np.mat(X[i]).reshape(img_shape)\n x.set_value(feature)\n predict.forward()\n pred.append(predict.value.A.ravel())\n pred = np.array(pred).argmax(axis=1)\n accuracy = (y == pred).astype(np.int).sum() / len(X)\n print('epoch: {:d}, accuracy: {:.3f}'.format(epoch + 1, accuracy))\n",
"step-4": "<mask token>\nimport sys\nsys.path.append('../..')\nimport numpy as np\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.preprocessing import OneHotEncoder\nimport matrixslow as ms\nX, y = fetch_openml('mnist_784', version=1, return_X_y=True)\nX, y = X[:1000] / 255, y.astype(np.int)[:1000]\noh = OneHotEncoder(sparse=False)\none_hot_label = oh.fit_transform(y.values.reshape(-1, 1))\nimg_shape = 28, 28\nx = ms.core.Variable(img_shape, init=False, trainable=False)\none_hot = ms.core.Variable(dim=(10, 1), init=False, trainable=False)\nconv1 = ms.layer.conv([x], img_shape, 3, (5, 5), 'ReLU')\npooling1 = ms.layer.pooling(conv1, (3, 3), (2, 2))\nconv2 = ms.layer.conv(pooling1, (14, 14), 3, (3, 3), 'ReLU')\npooling2 = ms.layer.pooling(conv2, (3, 3), (2, 2))\nfc1 = ms.layer.fc(ms.ops.Concat(*pooling2), 147, 120, 'ReLU')\noutput = ms.layer.fc(fc1, 120, 10, 'None')\npredict = ms.ops.SoftMax(output)\nloss = ms.ops.loss.CrossEntropyWithSoftMax(output, one_hot)\nlearning_rate = 0.005\noptimizer = ms.optimizer.Adam(ms.default_graph, loss, learning_rate)\nbatch_size = 32\nfor epoch in range(60):\n batch_count = 0\n for i in range(len(X)):\n feature = np.mat(X.values[i]).reshape(img_shape)\n label = np.mat(one_hot_label[i]).T\n x.set_value(feature)\n one_hot.set_value(label)\n optimizer.one_step()\n batch_count += 1\n if batch_count >= batch_size:\n print('epoch: {:d}, iteration: {:d}, loss: {:.3f}'.format(epoch +\n 1, i + 1, loss.value[0, 0]))\n optimizer.update()\n batch_count = 0\n pred = []\n for i in range(len(X)):\n feature = np.mat(X[i]).reshape(img_shape)\n x.set_value(feature)\n predict.forward()\n pred.append(predict.value.A.ravel())\n pred = np.array(pred).argmax(axis=1)\n accuracy = (y == pred).astype(np.int).sum() / len(X)\n print('epoch: {:d}, accuracy: {:.3f}'.format(epoch + 1, accuracy))\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 24 12:16:15 2020\n\n@author: zhangjuefei\n\"\"\"\n\nimport sys\nsys.path.append('../..')\n\nimport numpy as np\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.preprocessing import OneHotEncoder\nimport matrixslow as ms\n\n# 加载MNIST数据集,取一部分样本并归一化\nX, y = fetch_openml('mnist_784', version=1, return_X_y=True)\nX, y = X[:1000] / 255, y.astype(np.int)[:1000]\n\n# 将整数形式的标签转换成One-Hot编码\noh = OneHotEncoder(sparse=False)\none_hot_label = oh.fit_transform(y.values.reshape(-1, 1))\n\n# 输入图像尺寸\nimg_shape = (28, 28)\n\n# 输入图像\nx = ms.core.Variable(img_shape, init=False, trainable=False)\n\n# One-Hot标签\none_hot = ms.core.Variable(dim=(10, 1), init=False, trainable=False)\n\n# 第一卷积层\nconv1 = ms.layer.conv([x], img_shape, 3, (5, 5), \"ReLU\")\n\n# 第一池化层\npooling1 = ms.layer.pooling(conv1, (3, 3), (2, 2))\n\n# 第二卷积层\nconv2 = ms.layer.conv(pooling1, (14, 14), 3, (3, 3), \"ReLU\")\n\n# 第二池化层\npooling2 = ms.layer.pooling(conv2, (3, 3), (2, 2))\n\n# 全连接层\nfc1 = ms.layer.fc(ms.ops.Concat(*pooling2), 147, 120, \"ReLU\")\n\n# 输出层\noutput = ms.layer.fc(fc1, 120, 10, \"None\")\n\n# 分类概率\npredict = ms.ops.SoftMax(output)\n\n# 交叉熵损失\nloss = ms.ops.loss.CrossEntropyWithSoftMax(output, one_hot)\n\n# 学习率\nlearning_rate = 0.005\n\n# 优化器\noptimizer = ms.optimizer.Adam(ms.default_graph, loss, learning_rate)\n\n# 批大小\nbatch_size = 32\n\n# 训练\nfor epoch in range(60):\n \n batch_count = 0\n \n for i in range(len(X)):\n \n feature = np.mat(X.values[i]).reshape(img_shape)\n label = np.mat(one_hot_label[i]).T\n \n x.set_value(feature)\n one_hot.set_value(label)\n \n\n optimizer.one_step()\n \n\n batch_count += 1\n if batch_count >= batch_size:\n \n print(\"epoch: {:d}, iteration: {:d}, loss: {:.3f}\".format(epoch + 1, i + 1, loss.value[0, 0]))\n\n optimizer.update()\n batch_count = 0\n \n\n pred = []\n for i in range(len(X)):\n \n feature = np.mat(X[i]).reshape(img_shape)\n x.set_value(feature)\n \n predict.forward()\n pred.append(predict.value.A.ravel())\n \n pred = np.array(pred).argmax(axis=1)\n accuracy = (y == pred).astype(np.int).sum() / len(X)\n \n print(\"epoch: {:d}, accuracy: {:.3f}\".format(epoch + 1, accuracy))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding: utf-8
from mrcnn import utils
import numpy as np
import os
import skimage
class SlicesDataset(utils.Dataset):
""" Extension of maskrcnn dataset class to be used with our provided data. """
def load_slices(self, dataset_dir, n_images, n_patches, channels = ["base"]):
"""Load a subset of the Slices dataset.
dataset_dir: Root directory of the dataset.
n_images: number of images to load. Will load in os.listdir list order.
n_patches: number of patches to load per image.
channels: list of strings indicating channels to be stacked in the image.
currently "base", "mf", "edges" and "none" can be arbitrarily stacked.
"""
# add classes to be trained on
self.add_class("slices", 1, "tissue")
self.add_class("slices", 2, "mag")
# collect image list and initialize counter
image_list = os.listdir(dataset_dir)
image_counter = 0
patch_counter = 0
# cycle over images and save patches to database.
for i in range(n_images):
image_path = os.path.join(dataset_dir,image_list[i])
patch_list = os.listdir(image_path)
print(f"processing: image {i}")
for j in range(n_patches):
patch_path = os.path.join(image_path, patch_list[j])
patch_image_path = os.path.join(patch_path,"images")
file_list = os.listdir(patch_image_path)
image_file_path = os.path.join(patch_image_path,file_list[0])
image = skimage.io.imread(image_file_path)
height, width = image.shape
self.add_image(
"slices",
image_id = patch_counter,
path = patch_path,
width = width, height = height,
channels = channels,
)
patch_counter += 1
def load_image(self, image_id):
"""Returns an image with a given id."""
# load image infos
info = self.image_info[image_id]
patch_path = info['path']
width = info['width']
height = info['height']
impath = os.path.join(patch_path,"images")
file_list = os.listdir(impath)
channels = info['channels']
image = []
# stack channels to be loaded.
for channel in channels:
if channel == "none":
channel_image = skimage.img_as_ubyte(np.zeros( (height,width) ) )
else:
channel_image_name = [x for x in file_list if channel in x][0]
channel_image_path = os.path.join(impath, channel_image_name)
channel_image = skimage.io.imread(channel_image_path)
channel_image = skimage.img_as_ubyte(channel_image)
image.append(channel_image)
image = np.stack(image, axis=2)
return image
def load_mask(self, image_id):
"""Loads masks from dataset.
"""
# load image infos
info = self.image_info[image_id]
patch_path = info['path']
height = info['height']
width = info['width']
mag_path = os.path.join(patch_path,"mag")
tissue_path = os.path.join(patch_path,"tissue")
# collect mask names
mag_mask_list = os.listdir(mag_path)
tissue_mask_list = os.listdir(tissue_path)
classes = []
masks = []
# append masks and ids in list
if mag_mask_list:
for filename in mag_mask_list:
a = os.path.join(mag_path,filename)
masks.append(skimage.io.imread(a).astype(bool))
classes.append(2)
if tissue_mask_list:
for filename in tissue_mask_list:
a = os.path.join(tissue_path,filename)
masks.append(skimage.io.imread(a).astype(bool))
classes.append(1)
return np.stack(masks,axis=2), np.asarray(classes).astype(int)
|
normal
|
{
"blob_id": "8675deb69eae04a722073432eaf69ce3d24a11ad",
"index": 9041,
"step-1": "<mask token>\n\n\nclass SlicesDataset(utils.Dataset):\n <mask token>\n <mask token>\n\n def load_image(self, image_id):\n \"\"\"Returns an image with a given id.\"\"\"\n info = self.image_info[image_id]\n patch_path = info['path']\n width = info['width']\n height = info['height']\n impath = os.path.join(patch_path, 'images')\n file_list = os.listdir(impath)\n channels = info['channels']\n image = []\n for channel in channels:\n if channel == 'none':\n channel_image = skimage.img_as_ubyte(np.zeros((height, width)))\n else:\n channel_image_name = [x for x in file_list if channel in x][0]\n channel_image_path = os.path.join(impath, channel_image_name)\n channel_image = skimage.io.imread(channel_image_path)\n channel_image = skimage.img_as_ubyte(channel_image)\n image.append(channel_image)\n image = np.stack(image, axis=2)\n return image\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SlicesDataset(utils.Dataset):\n <mask token>\n\n def load_slices(self, dataset_dir, n_images, n_patches, channels=['base']):\n \"\"\"Load a subset of the Slices dataset.\n dataset_dir: Root directory of the dataset.\n n_images: number of images to load. Will load in os.listdir list order.\n n_patches: number of patches to load per image.\n channels: list of strings indicating channels to be stacked in the image.\n currently \"base\", \"mf\", \"edges\" and \"none\" can be arbitrarily stacked.\n \"\"\"\n self.add_class('slices', 1, 'tissue')\n self.add_class('slices', 2, 'mag')\n image_list = os.listdir(dataset_dir)\n image_counter = 0\n patch_counter = 0\n for i in range(n_images):\n image_path = os.path.join(dataset_dir, image_list[i])\n patch_list = os.listdir(image_path)\n print(f'processing: image {i}')\n for j in range(n_patches):\n patch_path = os.path.join(image_path, patch_list[j])\n patch_image_path = os.path.join(patch_path, 'images')\n file_list = os.listdir(patch_image_path)\n image_file_path = os.path.join(patch_image_path, file_list[0])\n image = skimage.io.imread(image_file_path)\n height, width = image.shape\n self.add_image('slices', image_id=patch_counter, path=\n patch_path, width=width, height=height, channels=channels)\n patch_counter += 1\n\n def load_image(self, image_id):\n \"\"\"Returns an image with a given id.\"\"\"\n info = self.image_info[image_id]\n patch_path = info['path']\n width = info['width']\n height = info['height']\n impath = os.path.join(patch_path, 'images')\n file_list = os.listdir(impath)\n channels = info['channels']\n image = []\n for channel in channels:\n if channel == 'none':\n channel_image = skimage.img_as_ubyte(np.zeros((height, width)))\n else:\n channel_image_name = [x for x in file_list if channel in x][0]\n channel_image_path = os.path.join(impath, channel_image_name)\n channel_image = skimage.io.imread(channel_image_path)\n channel_image = skimage.img_as_ubyte(channel_image)\n image.append(channel_image)\n image = np.stack(image, axis=2)\n return image\n\n def load_mask(self, image_id):\n \"\"\"Loads masks from dataset.\n \"\"\"\n info = self.image_info[image_id]\n patch_path = info['path']\n height = info['height']\n width = info['width']\n mag_path = os.path.join(patch_path, 'mag')\n tissue_path = os.path.join(patch_path, 'tissue')\n mag_mask_list = os.listdir(mag_path)\n tissue_mask_list = os.listdir(tissue_path)\n classes = []\n masks = []\n if mag_mask_list:\n for filename in mag_mask_list:\n a = os.path.join(mag_path, filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(2)\n if tissue_mask_list:\n for filename in tissue_mask_list:\n a = os.path.join(tissue_path, filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(1)\n return np.stack(masks, axis=2), np.asarray(classes).astype(int)\n",
"step-3": "<mask token>\n\n\nclass SlicesDataset(utils.Dataset):\n \"\"\" Extension of maskrcnn dataset class to be used with our provided data. \"\"\"\n\n def load_slices(self, dataset_dir, n_images, n_patches, channels=['base']):\n \"\"\"Load a subset of the Slices dataset.\n dataset_dir: Root directory of the dataset.\n n_images: number of images to load. Will load in os.listdir list order.\n n_patches: number of patches to load per image.\n channels: list of strings indicating channels to be stacked in the image.\n currently \"base\", \"mf\", \"edges\" and \"none\" can be arbitrarily stacked.\n \"\"\"\n self.add_class('slices', 1, 'tissue')\n self.add_class('slices', 2, 'mag')\n image_list = os.listdir(dataset_dir)\n image_counter = 0\n patch_counter = 0\n for i in range(n_images):\n image_path = os.path.join(dataset_dir, image_list[i])\n patch_list = os.listdir(image_path)\n print(f'processing: image {i}')\n for j in range(n_patches):\n patch_path = os.path.join(image_path, patch_list[j])\n patch_image_path = os.path.join(patch_path, 'images')\n file_list = os.listdir(patch_image_path)\n image_file_path = os.path.join(patch_image_path, file_list[0])\n image = skimage.io.imread(image_file_path)\n height, width = image.shape\n self.add_image('slices', image_id=patch_counter, path=\n patch_path, width=width, height=height, channels=channels)\n patch_counter += 1\n\n def load_image(self, image_id):\n \"\"\"Returns an image with a given id.\"\"\"\n info = self.image_info[image_id]\n patch_path = info['path']\n width = info['width']\n height = info['height']\n impath = os.path.join(patch_path, 'images')\n file_list = os.listdir(impath)\n channels = info['channels']\n image = []\n for channel in channels:\n if channel == 'none':\n channel_image = skimage.img_as_ubyte(np.zeros((height, width)))\n else:\n channel_image_name = [x for x in file_list if channel in x][0]\n channel_image_path = os.path.join(impath, channel_image_name)\n channel_image = skimage.io.imread(channel_image_path)\n channel_image = skimage.img_as_ubyte(channel_image)\n image.append(channel_image)\n image = np.stack(image, axis=2)\n return image\n\n def load_mask(self, image_id):\n \"\"\"Loads masks from dataset.\n \"\"\"\n info = self.image_info[image_id]\n patch_path = info['path']\n height = info['height']\n width = info['width']\n mag_path = os.path.join(patch_path, 'mag')\n tissue_path = os.path.join(patch_path, 'tissue')\n mag_mask_list = os.listdir(mag_path)\n tissue_mask_list = os.listdir(tissue_path)\n classes = []\n masks = []\n if mag_mask_list:\n for filename in mag_mask_list:\n a = os.path.join(mag_path, filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(2)\n if tissue_mask_list:\n for filename in tissue_mask_list:\n a = os.path.join(tissue_path, filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(1)\n return np.stack(masks, axis=2), np.asarray(classes).astype(int)\n",
"step-4": "from mrcnn import utils\nimport numpy as np\nimport os\nimport skimage\n\n\nclass SlicesDataset(utils.Dataset):\n \"\"\" Extension of maskrcnn dataset class to be used with our provided data. \"\"\"\n\n def load_slices(self, dataset_dir, n_images, n_patches, channels=['base']):\n \"\"\"Load a subset of the Slices dataset.\n dataset_dir: Root directory of the dataset.\n n_images: number of images to load. Will load in os.listdir list order.\n n_patches: number of patches to load per image.\n channels: list of strings indicating channels to be stacked in the image.\n currently \"base\", \"mf\", \"edges\" and \"none\" can be arbitrarily stacked.\n \"\"\"\n self.add_class('slices', 1, 'tissue')\n self.add_class('slices', 2, 'mag')\n image_list = os.listdir(dataset_dir)\n image_counter = 0\n patch_counter = 0\n for i in range(n_images):\n image_path = os.path.join(dataset_dir, image_list[i])\n patch_list = os.listdir(image_path)\n print(f'processing: image {i}')\n for j in range(n_patches):\n patch_path = os.path.join(image_path, patch_list[j])\n patch_image_path = os.path.join(patch_path, 'images')\n file_list = os.listdir(patch_image_path)\n image_file_path = os.path.join(patch_image_path, file_list[0])\n image = skimage.io.imread(image_file_path)\n height, width = image.shape\n self.add_image('slices', image_id=patch_counter, path=\n patch_path, width=width, height=height, channels=channels)\n patch_counter += 1\n\n def load_image(self, image_id):\n \"\"\"Returns an image with a given id.\"\"\"\n info = self.image_info[image_id]\n patch_path = info['path']\n width = info['width']\n height = info['height']\n impath = os.path.join(patch_path, 'images')\n file_list = os.listdir(impath)\n channels = info['channels']\n image = []\n for channel in channels:\n if channel == 'none':\n channel_image = skimage.img_as_ubyte(np.zeros((height, width)))\n else:\n channel_image_name = [x for x in file_list if channel in x][0]\n channel_image_path = os.path.join(impath, channel_image_name)\n channel_image = skimage.io.imread(channel_image_path)\n channel_image = skimage.img_as_ubyte(channel_image)\n image.append(channel_image)\n image = np.stack(image, axis=2)\n return image\n\n def load_mask(self, image_id):\n \"\"\"Loads masks from dataset.\n \"\"\"\n info = self.image_info[image_id]\n patch_path = info['path']\n height = info['height']\n width = info['width']\n mag_path = os.path.join(patch_path, 'mag')\n tissue_path = os.path.join(patch_path, 'tissue')\n mag_mask_list = os.listdir(mag_path)\n tissue_mask_list = os.listdir(tissue_path)\n classes = []\n masks = []\n if mag_mask_list:\n for filename in mag_mask_list:\n a = os.path.join(mag_path, filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(2)\n if tissue_mask_list:\n for filename in tissue_mask_list:\n a = os.path.join(tissue_path, filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(1)\n return np.stack(masks, axis=2), np.asarray(classes).astype(int)\n",
"step-5": "\n# coding: utf-8\n\n\n\n\nfrom mrcnn import utils\nimport numpy as np\nimport os\nimport skimage\n\n\nclass SlicesDataset(utils.Dataset):\n \"\"\" Extension of maskrcnn dataset class to be used with our provided data. \"\"\"\n \n \n def load_slices(self, dataset_dir, n_images, n_patches, channels = [\"base\"]):\n \"\"\"Load a subset of the Slices dataset.\n dataset_dir: Root directory of the dataset.\n n_images: number of images to load. Will load in os.listdir list order.\n n_patches: number of patches to load per image.\n channels: list of strings indicating channels to be stacked in the image.\n currently \"base\", \"mf\", \"edges\" and \"none\" can be arbitrarily stacked.\n \"\"\"\n \n # add classes to be trained on\n \n self.add_class(\"slices\", 1, \"tissue\")\n self.add_class(\"slices\", 2, \"mag\")\n \n # collect image list and initialize counter\n \n image_list = os.listdir(dataset_dir)\n image_counter = 0\n patch_counter = 0\n \n # cycle over images and save patches to database.\n \n for i in range(n_images):\n \n image_path = os.path.join(dataset_dir,image_list[i])\n patch_list = os.listdir(image_path)\n \n print(f\"processing: image {i}\") \n \n for j in range(n_patches):\n \n patch_path = os.path.join(image_path, patch_list[j])\n \n patch_image_path = os.path.join(patch_path,\"images\")\n \n file_list = os.listdir(patch_image_path)\n \n image_file_path = os.path.join(patch_image_path,file_list[0])\n \n image = skimage.io.imread(image_file_path)\n \n height, width = image.shape\n \n self.add_image(\n \"slices\",\n image_id = patch_counter,\n path = patch_path,\n width = width, height = height,\n channels = channels,\n )\n patch_counter += 1\n \n\n\n def load_image(self, image_id):\n \"\"\"Returns an image with a given id.\"\"\"\n \n # load image infos\n \n info = self.image_info[image_id]\n patch_path = info['path']\n width = info['width']\n height = info['height']\n impath = os.path.join(patch_path,\"images\")\n file_list = os.listdir(impath) \n channels = info['channels']\n \n image = []\n \n # stack channels to be loaded.\n \n for channel in channels:\n \n if channel == \"none\":\n channel_image = skimage.img_as_ubyte(np.zeros( (height,width) ) )\n \n else:\n channel_image_name = [x for x in file_list if channel in x][0] \n channel_image_path = os.path.join(impath, channel_image_name)\n channel_image = skimage.io.imread(channel_image_path)\n channel_image = skimage.img_as_ubyte(channel_image)\n image.append(channel_image)\n \n image = np.stack(image, axis=2)\n \n return image\n \n def load_mask(self, image_id):\n \"\"\"Loads masks from dataset.\n \"\"\"\n # load image infos\n \n info = self.image_info[image_id]\n patch_path = info['path']\n height = info['height']\n width = info['width']\n mag_path = os.path.join(patch_path,\"mag\")\n tissue_path = os.path.join(patch_path,\"tissue\")\n \n # collect mask names\n \n mag_mask_list = os.listdir(mag_path)\n tissue_mask_list = os.listdir(tissue_path)\n \n classes = []\n masks = []\n \n # append masks and ids in list\n \n if mag_mask_list:\n for filename in mag_mask_list:\n a = os.path.join(mag_path,filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(2)\n \n if tissue_mask_list:\n for filename in tissue_mask_list:\n a = os.path.join(tissue_path,filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(1)\n \n return np.stack(masks,axis=2), np.asarray(classes).astype(int)\n\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import sys
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMainWindow, QApplication
#---Import that will load the UI file---#
from PyQt5.uic import loadUi
import detechRs_rc #---THIS IMPORT WILL DISPLAY THE IMAGES STORED IN THE QRC FILE AND _rc.py FILE--#
#--CLASS CREATED THAT WILL LOAD THE UI FILE
class Login(QMainWindow):
def __init__(self):
super(Login, self).__init__()
# --- FROM THE IMPORT PYQT5.UIC IMPORT LOADUI---##
loadUi("login_UI.ui",self)
#--- a code once the login button clicked, will call the loginFunction ---#
self.loginButton.clicked.connect(self.loginFunction)
#-- Created a function called "loginFunction" --#
def loginFunction(self):
lgUserLine=self.lgUserLine.text() #-- Getting the textbox context lgUserline --#
lgPassLine=self.lgPassLine.text() #-- Getting the textbox context lgPassline --#
#-- Will display at the terminal what you wrote in the textbox(QLineEdit) --#
print("Success, ", lgUserLine, "and ", lgPassLine)
app=QApplication(sys.argv)
loginWindow=Login()
widget=QtWidgets.QStackedWidget()
widget.addWidget(loginWindow) #-- displays all design widgets of the UI Window --#
widget.setFixedWidth(1190) #-- setting the fixed window size in width --#
widget.setFixedHeight(782) #-- setting the fixed window size in height--#
widget.show()
app.exec_() #-- window execution --#
|
normal
|
{
"blob_id": "a9b1cc9b928b8999450b6c95656b863c476b273b",
"index": 7355,
"step-1": "<mask token>\n\n\nclass Login(QMainWindow):\n\n def __init__(self):\n super(Login, self).__init__()\n loadUi('login_UI.ui', self)\n self.loginButton.clicked.connect(self.loginFunction)\n\n def loginFunction(self):\n lgUserLine = self.lgUserLine.text()\n lgPassLine = self.lgPassLine.text()\n print('Success, ', lgUserLine, 'and ', lgPassLine)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Login(QMainWindow):\n\n def __init__(self):\n super(Login, self).__init__()\n loadUi('login_UI.ui', self)\n self.loginButton.clicked.connect(self.loginFunction)\n\n def loginFunction(self):\n lgUserLine = self.lgUserLine.text()\n lgPassLine = self.lgPassLine.text()\n print('Success, ', lgUserLine, 'and ', lgPassLine)\n\n\n<mask token>\nwidget.addWidget(loginWindow)\nwidget.setFixedWidth(1190)\nwidget.setFixedHeight(782)\nwidget.show()\napp.exec_()\n",
"step-3": "<mask token>\n\n\nclass Login(QMainWindow):\n\n def __init__(self):\n super(Login, self).__init__()\n loadUi('login_UI.ui', self)\n self.loginButton.clicked.connect(self.loginFunction)\n\n def loginFunction(self):\n lgUserLine = self.lgUserLine.text()\n lgPassLine = self.lgPassLine.text()\n print('Success, ', lgUserLine, 'and ', lgPassLine)\n\n\napp = QApplication(sys.argv)\nloginWindow = Login()\nwidget = QtWidgets.QStackedWidget()\nwidget.addWidget(loginWindow)\nwidget.setFixedWidth(1190)\nwidget.setFixedHeight(782)\nwidget.show()\napp.exec_()\n",
"step-4": "import sys\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QMainWindow, QApplication\nfrom PyQt5.uic import loadUi\nimport detechRs_rc\n\n\nclass Login(QMainWindow):\n\n def __init__(self):\n super(Login, self).__init__()\n loadUi('login_UI.ui', self)\n self.loginButton.clicked.connect(self.loginFunction)\n\n def loginFunction(self):\n lgUserLine = self.lgUserLine.text()\n lgPassLine = self.lgPassLine.text()\n print('Success, ', lgUserLine, 'and ', lgPassLine)\n\n\napp = QApplication(sys.argv)\nloginWindow = Login()\nwidget = QtWidgets.QStackedWidget()\nwidget.addWidget(loginWindow)\nwidget.setFixedWidth(1190)\nwidget.setFixedHeight(782)\nwidget.show()\napp.exec_()\n",
"step-5": "import sys\r\nfrom PyQt5 import QtWidgets\r\nfrom PyQt5.QtWidgets import QMainWindow, QApplication\r\n\r\n#---Import that will load the UI file---#\r\nfrom PyQt5.uic import loadUi\r\n\r\nimport detechRs_rc #---THIS IMPORT WILL DISPLAY THE IMAGES STORED IN THE QRC FILE AND _rc.py FILE--#\r\n\r\n#--CLASS CREATED THAT WILL LOAD THE UI FILE\r\nclass Login(QMainWindow):\r\n def __init__(self):\r\n super(Login, self).__init__()\r\n # --- FROM THE IMPORT PYQT5.UIC IMPORT LOADUI---##\r\n loadUi(\"login_UI.ui\",self)\r\n\r\n #--- a code once the login button clicked, will call the loginFunction ---#\r\n self.loginButton.clicked.connect(self.loginFunction)\r\n\r\n #-- Created a function called \"loginFunction\" --#\r\n def loginFunction(self):\r\n lgUserLine=self.lgUserLine.text() #-- Getting the textbox context lgUserline --#\r\n lgPassLine=self.lgPassLine.text() #-- Getting the textbox context lgPassline --#\r\n\r\n #-- Will display at the terminal what you wrote in the textbox(QLineEdit) --#\r\n print(\"Success, \", lgUserLine, \"and \", lgPassLine)\r\n\r\n\r\n\r\napp=QApplication(sys.argv)\r\nloginWindow=Login()\r\nwidget=QtWidgets.QStackedWidget()\r\nwidget.addWidget(loginWindow) #-- displays all design widgets of the UI Window --#\r\nwidget.setFixedWidth(1190) #-- setting the fixed window size in width --#\r\nwidget.setFixedHeight(782) #-- setting the fixed window size in height--#\r\nwidget.show()\r\napp.exec_() #-- window execution --#",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import asyncio
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from datetime import datetime
import time
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.combining import OrTrigger
from apscheduler.triggers.cron import CronTrigger
def day_limits():
variable.value = 90
print ('Day Variable: ',variable.value)
def night_limits():
variable.value = 65
print ('Night Variable: ', variable.value)
def thread_2(variable):
while True:
c_hour = int(datetime.now().strftime("%H"))
c_min = int(datetime.now().strftime("%M"))
c_sec = int(datetime.now().strftime("%S"))
print ('%02d:%02d:%02d - Variable: %d ' % (c_hour,c_min,c_sec,variable.value))
time.sleep(2)
if __name__ == "__main__":
m = multiprocessing.Manager()
variable = m.Value('i', 60)
schedfortest = BlockingScheduler()
trigger_test = OrTrigger([
CronTrigger(minute='*/1')
])
schedfortest.add_job(callbacktotal,
trigger_test,
minute='*/2',
max_instances=10)
schedfortest.start()
scheduler = AsyncIOScheduler()
scheduler.add_job(day_limits, 'cron', hour=7,misfire_grace_time=3600,timezone='GB')
scheduler.add_job(night_limits, 'cron', hour=19, minute=32,misfire_grace_time=3600,timezone='GB')
scheduler.start()
scheduler.print_jobs()
executor = ProcessPoolExecutor(1)
loop = asyncio.get_event_loop()
baa = asyncio.async(loop.run_in_executor(executor, thread_2, variable)) # Need to pass variable explicitly
try:
loop.run_forever()
except (KeyboardInterrupt, Exception):
loop.stop()
scheduler.shutdown()
|
normal
|
{
"blob_id": "f5a953d91e95d82e84e3e6d18ee89d28ba1b1515",
"index": 6022,
"step-1": "import asyncio\nimport multiprocessing\nfrom concurrent.futures import ProcessPoolExecutor\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\nfrom datetime import datetime\nimport time\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom apscheduler.triggers.combining import OrTrigger\nfrom apscheduler.triggers.cron import CronTrigger\n\n\ndef day_limits():\n variable.value = 90\n print ('Day Variable: ',variable.value)\n\ndef night_limits():\n variable.value = 65\n print ('Night Variable: ', variable.value)\n\n\ndef thread_2(variable):\n while True:\n c_hour = int(datetime.now().strftime(\"%H\"))\n c_min = int(datetime.now().strftime(\"%M\"))\n c_sec = int(datetime.now().strftime(\"%S\"))\n\n print ('%02d:%02d:%02d - Variable: %d ' % (c_hour,c_min,c_sec,variable.value))\n\n time.sleep(2)\n\n\nif __name__ == \"__main__\":\n\n m = multiprocessing.Manager()\n variable = m.Value('i', 60)\n\n schedfortest = BlockingScheduler()\n\n trigger_test = OrTrigger([\n CronTrigger(minute='*/1')\n ])\n\n schedfortest.add_job(callbacktotal,\n trigger_test,\n minute='*/2',\n max_instances=10)\n\n schedfortest.start()\n\n\n\n\n\n scheduler = AsyncIOScheduler()\n scheduler.add_job(day_limits, 'cron', hour=7,misfire_grace_time=3600,timezone='GB')\n scheduler.add_job(night_limits, 'cron', hour=19, minute=32,misfire_grace_time=3600,timezone='GB')\n scheduler.start()\n\n scheduler.print_jobs()\n\n executor = ProcessPoolExecutor(1)\n loop = asyncio.get_event_loop()\n baa = asyncio.async(loop.run_in_executor(executor, thread_2, variable)) # Need to pass variable explicitly\n\n try:\n loop.run_forever()\n\n except (KeyboardInterrupt, Exception):\n loop.stop()\n scheduler.shutdown()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
x += 3
print('x : ', x)
print('-' * 30)
<|reserved_special_token_0|>
total += 1
total
<|reserved_special_token_1|>
x = 5
x += 3
print('x : ', x)
print('-' * 30)
total = 0
total += 1
total
<|reserved_special_token_1|>
# operatorTest02.py
x = 5
x += 3 #복함 대입 연산자
print("x : ", x)
print("-"*30)
total = 0
total += 1
total
|
flexible
|
{
"blob_id": "4f8bc19bb113c9eac7c2ac774ac7b16f569d9704",
"index": 3083,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nx += 3\nprint('x : ', x)\nprint('-' * 30)\n<mask token>\ntotal += 1\ntotal\n",
"step-3": "x = 5\nx += 3\nprint('x : ', x)\nprint('-' * 30)\ntotal = 0\ntotal += 1\ntotal\n",
"step-4": "# operatorTest02.py\n\nx = 5\nx += 3 #복함 대입 연산자\nprint(\"x : \", x)\nprint(\"-\"*30)\n\ntotal = 0\ntotal += 1\ntotal ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import faiss
from util import vecs_io, vecs_util
from time import time
import os
'''
提取vecs, 输出numpy文件
'''
def vecs2numpy(fname, new_file_name, file_type, file_len=None):
if file_type == 'bvecs':
vectors, dim = vecs_io.bvecs_read_mmap(fname)
elif file_type == 'ivecs':
vectors, dim = vecs_io.ivecs_read_mmap(fname)
elif file_type == 'fvecs':
vectors, dim = vecs_io.fvecs_read_mmap(fname)
if file_len is not None:
vectors = vectors[:file_len]
vectors = vectors.astype(np.float32)
np.save(new_file_name, vectors)
return vectors
'''
创建文件夹, 提取base, query, gnd
'''
def get_base_query_gnd(config):
os.system("mkdir %s" % (config['project_data_dir']))
print("创建文件夹")
base_dir = '%s/%s' % (config['source_data_dir'], config['source_data_fname']['base'])
base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy')
base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type'])
print("提取base")
query_dir = '%s/%s' % (config['source_data_dir'], config['source_data_fname']['query'])
query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy')
query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type'])
print("提取query")
gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy')
# print(base_npy_dir)
# print(query_npy_dir)
# print(gnd_npy_dir)
gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir)
print("提取gnd")
return base, query, gnd
if __name__ == '__main__':
fname = '/home/bz/learn-to-hash/data/sift/sift_dataset_unnorm.npy'
new_fname = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'
get_NN_graph(fname, new_fname, 10)
a = '/home/bz/KaHIP/deploy/graphchecker'
b = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'
|
normal
|
{
"blob_id": "5f84c8654c976bca2fa33e8f9ba5e28e3249253d",
"index": 7312,
"step-1": "<mask token>\n\n\ndef vecs2numpy(fname, new_file_name, file_type, file_len=None):\n if file_type == 'bvecs':\n vectors, dim = vecs_io.bvecs_read_mmap(fname)\n elif file_type == 'ivecs':\n vectors, dim = vecs_io.ivecs_read_mmap(fname)\n elif file_type == 'fvecs':\n vectors, dim = vecs_io.fvecs_read_mmap(fname)\n if file_len is not None:\n vectors = vectors[:file_len]\n vectors = vectors.astype(np.float32)\n np.save(new_file_name, vectors)\n return vectors\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef vecs2numpy(fname, new_file_name, file_type, file_len=None):\n if file_type == 'bvecs':\n vectors, dim = vecs_io.bvecs_read_mmap(fname)\n elif file_type == 'ivecs':\n vectors, dim = vecs_io.ivecs_read_mmap(fname)\n elif file_type == 'fvecs':\n vectors, dim = vecs_io.fvecs_read_mmap(fname)\n if file_len is not None:\n vectors = vectors[:file_len]\n vectors = vectors.astype(np.float32)\n np.save(new_file_name, vectors)\n return vectors\n\n\n<mask token>\n\n\ndef get_base_query_gnd(config):\n os.system('mkdir %s' % config['project_data_dir'])\n print('创建文件夹')\n base_dir = '%s/%s' % (config['source_data_dir'], config[\n 'source_data_fname']['base'])\n base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy')\n base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type'])\n print('提取base')\n query_dir = '%s/%s' % (config['source_data_dir'], config[\n 'source_data_fname']['query'])\n query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy')\n query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type'])\n print('提取query')\n gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy')\n gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir)\n print('提取gnd')\n return base, query, gnd\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef vecs2numpy(fname, new_file_name, file_type, file_len=None):\n if file_type == 'bvecs':\n vectors, dim = vecs_io.bvecs_read_mmap(fname)\n elif file_type == 'ivecs':\n vectors, dim = vecs_io.ivecs_read_mmap(fname)\n elif file_type == 'fvecs':\n vectors, dim = vecs_io.fvecs_read_mmap(fname)\n if file_len is not None:\n vectors = vectors[:file_len]\n vectors = vectors.astype(np.float32)\n np.save(new_file_name, vectors)\n return vectors\n\n\n<mask token>\n\n\ndef get_base_query_gnd(config):\n os.system('mkdir %s' % config['project_data_dir'])\n print('创建文件夹')\n base_dir = '%s/%s' % (config['source_data_dir'], config[\n 'source_data_fname']['base'])\n base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy')\n base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type'])\n print('提取base')\n query_dir = '%s/%s' % (config['source_data_dir'], config[\n 'source_data_fname']['query'])\n query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy')\n query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type'])\n print('提取query')\n gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy')\n gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir)\n print('提取gnd')\n return base, query, gnd\n\n\nif __name__ == '__main__':\n fname = '/home/bz/learn-to-hash/data/sift/sift_dataset_unnorm.npy'\n new_fname = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'\n get_NN_graph(fname, new_fname, 10)\n a = '/home/bz/KaHIP/deploy/graphchecker'\n b = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'\n",
"step-4": "import numpy as np\nimport faiss\nfrom util import vecs_io, vecs_util\nfrom time import time\nimport os\n<mask token>\n\n\ndef vecs2numpy(fname, new_file_name, file_type, file_len=None):\n if file_type == 'bvecs':\n vectors, dim = vecs_io.bvecs_read_mmap(fname)\n elif file_type == 'ivecs':\n vectors, dim = vecs_io.ivecs_read_mmap(fname)\n elif file_type == 'fvecs':\n vectors, dim = vecs_io.fvecs_read_mmap(fname)\n if file_len is not None:\n vectors = vectors[:file_len]\n vectors = vectors.astype(np.float32)\n np.save(new_file_name, vectors)\n return vectors\n\n\n<mask token>\n\n\ndef get_base_query_gnd(config):\n os.system('mkdir %s' % config['project_data_dir'])\n print('创建文件夹')\n base_dir = '%s/%s' % (config['source_data_dir'], config[\n 'source_data_fname']['base'])\n base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy')\n base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type'])\n print('提取base')\n query_dir = '%s/%s' % (config['source_data_dir'], config[\n 'source_data_fname']['query'])\n query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy')\n query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type'])\n print('提取query')\n gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy')\n gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir)\n print('提取gnd')\n return base, query, gnd\n\n\nif __name__ == '__main__':\n fname = '/home/bz/learn-to-hash/data/sift/sift_dataset_unnorm.npy'\n new_fname = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'\n get_NN_graph(fname, new_fname, 10)\n a = '/home/bz/KaHIP/deploy/graphchecker'\n b = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'\n",
"step-5": "import numpy as np\r\nimport faiss\r\nfrom util import vecs_io, vecs_util\r\nfrom time import time\r\nimport os\r\n\r\n'''\r\n提取vecs, 输出numpy文件\r\n'''\r\n\r\n\r\ndef vecs2numpy(fname, new_file_name, file_type, file_len=None):\r\n if file_type == 'bvecs':\r\n vectors, dim = vecs_io.bvecs_read_mmap(fname)\r\n elif file_type == 'ivecs':\r\n vectors, dim = vecs_io.ivecs_read_mmap(fname)\r\n elif file_type == 'fvecs':\r\n vectors, dim = vecs_io.fvecs_read_mmap(fname)\r\n if file_len is not None:\r\n vectors = vectors[:file_len]\r\n vectors = vectors.astype(np.float32)\r\n np.save(new_file_name, vectors)\r\n return vectors\r\n\r\n\r\n'''\r\n创建文件夹, 提取base, query, gnd\r\n'''\r\n\r\n\r\ndef get_base_query_gnd(config):\r\n os.system(\"mkdir %s\" % (config['project_data_dir']))\r\n print(\"创建文件夹\")\r\n\r\n base_dir = '%s/%s' % (config['source_data_dir'], config['source_data_fname']['base'])\r\n base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy')\r\n base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type'])\r\n print(\"提取base\")\r\n\r\n query_dir = '%s/%s' % (config['source_data_dir'], config['source_data_fname']['query'])\r\n query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy')\r\n query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type'])\r\n print(\"提取query\")\r\n\r\n gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy')\r\n # print(base_npy_dir)\r\n # print(query_npy_dir)\r\n # print(gnd_npy_dir)\r\n gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir)\r\n print(\"提取gnd\")\r\n return base, query, gnd\r\n\r\n\r\nif __name__ == '__main__':\r\n fname = '/home/bz/learn-to-hash/data/sift/sift_dataset_unnorm.npy'\r\n new_fname = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'\r\n get_NN_graph(fname, new_fname, 10)\r\n a = '/home/bz/KaHIP/deploy/graphchecker'\r\n b = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class StockPagination(PageNumberPagination):
page_size = 20
page_size_query_param = 'page_size'
max_page_size = 500
class StockView(APIView):
def get(self, request, *args, **kwargs):
if request.GET.get('ticker'):
qs = Stock.objects.filter(ticker=request.GET.get('ticker'))
serializer = StockSerializer(qs, many=True)
return Response(serializer.data)
else:
qs = Stock.objects.all()
paginator = StockPagination()
result_page = paginator.paginate_queryset(qs, request)
serializer = StockSerializer(result_page, many=True, context={
'request': request})
return Response(serializer.data, status=HTTP_200_OK)
def post(self, request, *args, **kwargs):
serializer = StockSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestView(APIView):
<|reserved_special_token_0|>
class StockPagination(PageNumberPagination):
page_size = 20
page_size_query_param = 'page_size'
max_page_size = 500
class StockView(APIView):
def get(self, request, *args, **kwargs):
if request.GET.get('ticker'):
qs = Stock.objects.filter(ticker=request.GET.get('ticker'))
serializer = StockSerializer(qs, many=True)
return Response(serializer.data)
else:
qs = Stock.objects.all()
paginator = StockPagination()
result_page = paginator.paginate_queryset(qs, request)
serializer = StockSerializer(result_page, many=True, context={
'request': request})
return Response(serializer.data, status=HTTP_200_OK)
def post(self, request, *args, **kwargs):
serializer = StockSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestView(APIView):
def get(self, request, *args, **kwargs):
ans = {'msg': 'Test'}
return Response(ans)
class StockPagination(PageNumberPagination):
page_size = 20
page_size_query_param = 'page_size'
max_page_size = 500
class StockView(APIView):
def get(self, request, *args, **kwargs):
if request.GET.get('ticker'):
qs = Stock.objects.filter(ticker=request.GET.get('ticker'))
serializer = StockSerializer(qs, many=True)
return Response(serializer.data)
else:
qs = Stock.objects.all()
paginator = StockPagination()
result_page = paginator.paginate_queryset(qs, request)
serializer = StockSerializer(result_page, many=True, context={
'request': request})
return Response(serializer.data, status=HTTP_200_OK)
def post(self, request, *args, **kwargs):
serializer = StockSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors)
<|reserved_special_token_1|>
from django.db.models import manager
from django.shortcuts import render
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.utils import serializer_helpers
from rest_framework.views import APIView
from rest_framework.pagination import PageNumberPagination
from rest_framework.status import HTTP_200_OK
from .serializers import StockSerializer
from .models import Stock
class TestView(APIView):
def get(self, request, *args, **kwargs):
ans = {'msg': 'Test'}
return Response(ans)
class StockPagination(PageNumberPagination):
page_size = 20
page_size_query_param = 'page_size'
max_page_size = 500
class StockView(APIView):
def get(self, request, *args, **kwargs):
if request.GET.get('ticker'):
qs = Stock.objects.filter(ticker=request.GET.get('ticker'))
serializer = StockSerializer(qs, many=True)
return Response(serializer.data)
else:
qs = Stock.objects.all()
paginator = StockPagination()
result_page = paginator.paginate_queryset(qs, request)
serializer = StockSerializer(result_page, many=True, context={
'request': request})
return Response(serializer.data, status=HTTP_200_OK)
def post(self, request, *args, **kwargs):
serializer = StockSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors)
<|reserved_special_token_1|>
from django.db.models import manager
from django.shortcuts import render
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.utils import serializer_helpers
from rest_framework.views import APIView
from rest_framework.pagination import PageNumberPagination
from rest_framework.status import HTTP_200_OK
from .serializers import StockSerializer
from .models import Stock
# Create your views here.
class TestView(APIView):
def get(self, request, *args, **kwargs):
ans = {
"msg": "Test"
}
return Response(ans)
class StockPagination(PageNumberPagination):
page_size = 20
page_size_query_param = 'page_size'
max_page_size = 500
class StockView(APIView):
def get(self, request, *args, **kwargs):
if request.GET.get('ticker'):
qs = Stock.objects.filter(ticker=request.GET.get('ticker'))
serializer = StockSerializer(qs, many=True)
return Response(serializer.data)
else:
qs = Stock.objects.all()
paginator = StockPagination()
result_page = paginator.paginate_queryset(qs, request)
serializer = StockSerializer(result_page, many=True, context={'request': request})
return Response(serializer.data, status=HTTP_200_OK)
def post(self, request, *args, **kwargs):
serializer = StockSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors)
|
flexible
|
{
"blob_id": "34536e3112c8791c8f8d48bb6ffd059c1af38e2f",
"index": 8978,
"step-1": "<mask token>\n\n\nclass StockPagination(PageNumberPagination):\n page_size = 20\n page_size_query_param = 'page_size'\n max_page_size = 500\n\n\nclass StockView(APIView):\n\n def get(self, request, *args, **kwargs):\n if request.GET.get('ticker'):\n qs = Stock.objects.filter(ticker=request.GET.get('ticker'))\n serializer = StockSerializer(qs, many=True)\n return Response(serializer.data)\n else:\n qs = Stock.objects.all()\n paginator = StockPagination()\n result_page = paginator.paginate_queryset(qs, request)\n serializer = StockSerializer(result_page, many=True, context={\n 'request': request})\n return Response(serializer.data, status=HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n serializer = StockSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors)\n",
"step-2": "<mask token>\n\n\nclass TestView(APIView):\n <mask token>\n\n\nclass StockPagination(PageNumberPagination):\n page_size = 20\n page_size_query_param = 'page_size'\n max_page_size = 500\n\n\nclass StockView(APIView):\n\n def get(self, request, *args, **kwargs):\n if request.GET.get('ticker'):\n qs = Stock.objects.filter(ticker=request.GET.get('ticker'))\n serializer = StockSerializer(qs, many=True)\n return Response(serializer.data)\n else:\n qs = Stock.objects.all()\n paginator = StockPagination()\n result_page = paginator.paginate_queryset(qs, request)\n serializer = StockSerializer(result_page, many=True, context={\n 'request': request})\n return Response(serializer.data, status=HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n serializer = StockSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors)\n",
"step-3": "<mask token>\n\n\nclass TestView(APIView):\n\n def get(self, request, *args, **kwargs):\n ans = {'msg': 'Test'}\n return Response(ans)\n\n\nclass StockPagination(PageNumberPagination):\n page_size = 20\n page_size_query_param = 'page_size'\n max_page_size = 500\n\n\nclass StockView(APIView):\n\n def get(self, request, *args, **kwargs):\n if request.GET.get('ticker'):\n qs = Stock.objects.filter(ticker=request.GET.get('ticker'))\n serializer = StockSerializer(qs, many=True)\n return Response(serializer.data)\n else:\n qs = Stock.objects.all()\n paginator = StockPagination()\n result_page = paginator.paginate_queryset(qs, request)\n serializer = StockSerializer(result_page, many=True, context={\n 'request': request})\n return Response(serializer.data, status=HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n serializer = StockSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors)\n",
"step-4": "from django.db.models import manager\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom rest_framework.response import Response\nfrom rest_framework.utils import serializer_helpers\nfrom rest_framework.views import APIView\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.status import HTTP_200_OK\nfrom .serializers import StockSerializer\nfrom .models import Stock\n\n\nclass TestView(APIView):\n\n def get(self, request, *args, **kwargs):\n ans = {'msg': 'Test'}\n return Response(ans)\n\n\nclass StockPagination(PageNumberPagination):\n page_size = 20\n page_size_query_param = 'page_size'\n max_page_size = 500\n\n\nclass StockView(APIView):\n\n def get(self, request, *args, **kwargs):\n if request.GET.get('ticker'):\n qs = Stock.objects.filter(ticker=request.GET.get('ticker'))\n serializer = StockSerializer(qs, many=True)\n return Response(serializer.data)\n else:\n qs = Stock.objects.all()\n paginator = StockPagination()\n result_page = paginator.paginate_queryset(qs, request)\n serializer = StockSerializer(result_page, many=True, context={\n 'request': request})\n return Response(serializer.data, status=HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n serializer = StockSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors)\n",
"step-5": "from django.db.models import manager\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\n\nfrom rest_framework.response import Response\nfrom rest_framework.utils import serializer_helpers\nfrom rest_framework.views import APIView\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.status import HTTP_200_OK\nfrom .serializers import StockSerializer\nfrom .models import Stock\n\n# Create your views here.\nclass TestView(APIView):\n def get(self, request, *args, **kwargs):\n ans = {\n \"msg\": \"Test\"\n }\n return Response(ans)\n\nclass StockPagination(PageNumberPagination):\n page_size = 20\n page_size_query_param = 'page_size'\n max_page_size = 500\n\nclass StockView(APIView):\n \n def get(self, request, *args, **kwargs):\n if request.GET.get('ticker'):\n qs = Stock.objects.filter(ticker=request.GET.get('ticker'))\n serializer = StockSerializer(qs, many=True)\n return Response(serializer.data)\n else:\n qs = Stock.objects.all()\n paginator = StockPagination()\n result_page = paginator.paginate_queryset(qs, request)\n serializer = StockSerializer(result_page, many=True, context={'request': request})\n return Response(serializer.data, status=HTTP_200_OK)\n\n\n def post(self, request, *args, **kwargs):\n serializer = StockSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors)",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _camel_to_snake(s):
""" Convert CamelCase to snake_case.
"""
return '_'.join([i.lower() for i in _camel_words.split(s)[1::2]])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_camel_words = re.compile('([A-Z][a-z0-9_]+)')
def _camel_to_snake(s):
""" Convert CamelCase to snake_case.
"""
return '_'.join([i.lower() for i in _camel_words.split(s)[1::2]])
<|reserved_special_token_1|>
import re
_camel_words = re.compile('([A-Z][a-z0-9_]+)')
def _camel_to_snake(s):
""" Convert CamelCase to snake_case.
"""
return '_'.join([i.lower() for i in _camel_words.split(s)[1::2]])
<|reserved_special_token_1|>
import re
_camel_words = re.compile(r"([A-Z][a-z0-9_]+)")
def _camel_to_snake(s):
""" Convert CamelCase to snake_case.
"""
return "_".join(
[
i.lower() for i in _camel_words.split(s)[1::2]
]
)
|
flexible
|
{
"blob_id": "6c9f9363a95ea7dc97ccb45d0922f0531c5cfec9",
"index": 6572,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef _camel_to_snake(s):\n \"\"\" Convert CamelCase to snake_case.\n \"\"\"\n return '_'.join([i.lower() for i in _camel_words.split(s)[1::2]])\n",
"step-3": "<mask token>\n_camel_words = re.compile('([A-Z][a-z0-9_]+)')\n\n\ndef _camel_to_snake(s):\n \"\"\" Convert CamelCase to snake_case.\n \"\"\"\n return '_'.join([i.lower() for i in _camel_words.split(s)[1::2]])\n",
"step-4": "import re\n_camel_words = re.compile('([A-Z][a-z0-9_]+)')\n\n\ndef _camel_to_snake(s):\n \"\"\" Convert CamelCase to snake_case.\n \"\"\"\n return '_'.join([i.lower() for i in _camel_words.split(s)[1::2]])\n",
"step-5": "import re\n\n\n_camel_words = re.compile(r\"([A-Z][a-z0-9_]+)\")\n\n\ndef _camel_to_snake(s):\n \"\"\" Convert CamelCase to snake_case.\n \"\"\"\n return \"_\".join(\n [\n i.lower() for i in _camel_words.split(s)[1::2]\n ]\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def backtracing(self, arr, start):
if start == len(arr):
self.result.append(''.join(arr))
return
self.backtracing(arr, start + 1)
if arr[start].isalpha():
arr[start] = arr[start].lower() if arr[start].isupper() else arr[
start].upper()
self.backtracing(arr, start + 1)
<|reserved_special_token_1|>
class Solution:
def __init__(self):
self.result = []
<|reserved_special_token_0|>
def backtracing(self, arr, start):
if start == len(arr):
self.result.append(''.join(arr))
return
self.backtracing(arr, start + 1)
if arr[start].isalpha():
arr[start] = arr[start].lower() if arr[start].isupper() else arr[
start].upper()
self.backtracing(arr, start + 1)
<|reserved_special_token_1|>
class Solution:
def __init__(self):
self.result = []
def letterCasePermutation(self, S: str) ->List[str]:
arr = list(S)
self.backtracing(arr, 0)
return self.result
def backtracing(self, arr, start):
if start == len(arr):
self.result.append(''.join(arr))
return
self.backtracing(arr, start + 1)
if arr[start].isalpha():
arr[start] = arr[start].lower() if arr[start].isupper() else arr[
start].upper()
self.backtracing(arr, start + 1)
<|reserved_special_token_1|>
#
# @lc app=leetcode.cn id=784 lang=python3
#
# [784] 字母大小写全排列
#
# @lc code=start
# 回溯法 --> 通过 64 ms 13.5 MB
class Solution:
def __init__(self):
self.result = []
def letterCasePermutation(self, S: str) -> List[str]:
arr = list(S)
self.backtracing(arr, 0)
return self.result
def backtracing(self, arr, start):
if start == len(arr):
self.result.append(''.join(arr))
return
# 把自身递归
self.backtracing(arr, start+1)
# 若是字母,则切换大小写后递归
if arr[start].isalpha():
arr[start] = arr[start].lower() if arr[start].isupper() else arr[start].upper()
self.backtracing(arr, start+1)
# @lc code=end
|
flexible
|
{
"blob_id": "632c690261b31c7ac0e1d90c814e3b9a7a0dcb29",
"index": 7663,
"step-1": "class Solution:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class Solution:\n <mask token>\n <mask token>\n\n def backtracing(self, arr, start):\n if start == len(arr):\n self.result.append(''.join(arr))\n return\n self.backtracing(arr, start + 1)\n if arr[start].isalpha():\n arr[start] = arr[start].lower() if arr[start].isupper() else arr[\n start].upper()\n self.backtracing(arr, start + 1)\n",
"step-3": "class Solution:\n\n def __init__(self):\n self.result = []\n <mask token>\n\n def backtracing(self, arr, start):\n if start == len(arr):\n self.result.append(''.join(arr))\n return\n self.backtracing(arr, start + 1)\n if arr[start].isalpha():\n arr[start] = arr[start].lower() if arr[start].isupper() else arr[\n start].upper()\n self.backtracing(arr, start + 1)\n",
"step-4": "class Solution:\n\n def __init__(self):\n self.result = []\n\n def letterCasePermutation(self, S: str) ->List[str]:\n arr = list(S)\n self.backtracing(arr, 0)\n return self.result\n\n def backtracing(self, arr, start):\n if start == len(arr):\n self.result.append(''.join(arr))\n return\n self.backtracing(arr, start + 1)\n if arr[start].isalpha():\n arr[start] = arr[start].lower() if arr[start].isupper() else arr[\n start].upper()\n self.backtracing(arr, start + 1)\n",
"step-5": "#\n# @lc app=leetcode.cn id=784 lang=python3\n#\n# [784] 字母大小写全排列\n#\n\n# @lc code=start\n# 回溯法 --> 通过 64 ms 13.5 MB\nclass Solution:\n def __init__(self):\n self.result = []\n\n def letterCasePermutation(self, S: str) -> List[str]:\n arr = list(S)\n self.backtracing(arr, 0)\n return self.result\n\n def backtracing(self, arr, start):\n if start == len(arr):\n self.result.append(''.join(arr))\n return\n # 把自身递归\n self.backtracing(arr, start+1)\n # 若是字母,则切换大小写后递归\n if arr[start].isalpha():\n arr[start] = arr[start].lower() if arr[start].isupper() else arr[start].upper()\n self.backtracing(arr, start+1)\n \n# @lc code=end\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
water = 400
milk = 540
coffee = 120
cups = 9
money = 550
def buying():
global water
global coffee
global cups
global milk
global money
choice_coffee = input("What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:")
if choice_coffee == "1":
if water > 250 and coffee > 16 and cups > 1:
print("I have enough resources, making you a coffee!")
water -= 250
coffee -= 16
cups -= 1
money += 4
coffee_machine()
elif choice_coffee == "2":
if water > 350 and coffee > 16 and cups > 1 and milk > 75:
print("I have enough resources, making you a coffee!")
water -= 350
milk -= 75
coffee -= 20
cups -= 1
money += 7
elif water < 350:
print("Sorry, not enough water!")
coffee_machine()
elif choice_coffee == "3":
if water > 200 and coffee > 12 and cups > 1 and milk > 100:
print("I have enough resources, making you a coffee!")
water -= 200
milk -= 100
coffee -= 12
cups -= 1
money += 6
coffee_machine()
elif choice_coffee == "back":
coffee_machine()
def filling():
global water
global coffee
global cups
global milk
water_fill = int(input("Write how many ml of water do you want to add:"))
milk_fill = int(input("Write how many ml of milk do you want to add:"))
coffee_fill = int(input("Write how many grams of coffee beans do you want to add:"))
cups_fill = int(input("Write how many disposable cups of coffee do you want to add:"))
water += water_fill
milk += milk_fill
coffee += coffee_fill
cups += cups_fill
coffee_machine()
def taking():
global money
print("I gave you $" + str(money))
money = 0
coffee_machine()
def stats_print():
print("The coffee machine has:")
print(str(water) + " of water")
print(str(milk) + " of milk")
print(str(coffee) + " of coffee beans")
print(str(cups) + " of disposable cups")
print(str(money) + " of money")
def coffee_machine():
user_action = input("Write action (buy, fill, take, remaining, exit):")
if user_action == "buy":
buying()
elif user_action == "fill":
filling()
elif user_action == "take":
taking()
elif user_action == "remaining":
stats_print()
coffee_machine()
elif user_action == "exit":
return
coffee_machine()
|
normal
|
{
"blob_id": "4e98ebd040297cb9472368478452bc484e0aaa04",
"index": 3255,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef stats_print():\n print('The coffee machine has:')\n print(str(water) + ' of water')\n print(str(milk) + ' of milk')\n print(str(coffee) + ' of coffee beans')\n print(str(cups) + ' of disposable cups')\n print(str(money) + ' of money')\n\n\ndef coffee_machine():\n user_action = input('Write action (buy, fill, take, remaining, exit):')\n if user_action == 'buy':\n buying()\n elif user_action == 'fill':\n filling()\n elif user_action == 'take':\n taking()\n elif user_action == 'remaining':\n stats_print()\n coffee_machine()\n elif user_action == 'exit':\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef buying():\n global water\n global coffee\n global cups\n global milk\n global money\n choice_coffee = input(\n 'What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:'\n )\n if choice_coffee == '1':\n if water > 250 and coffee > 16 and cups > 1:\n print('I have enough resources, making you a coffee!')\n water -= 250\n coffee -= 16\n cups -= 1\n money += 4\n coffee_machine()\n elif choice_coffee == '2':\n if water > 350 and coffee > 16 and cups > 1 and milk > 75:\n print('I have enough resources, making you a coffee!')\n water -= 350\n milk -= 75\n coffee -= 20\n cups -= 1\n money += 7\n elif water < 350:\n print('Sorry, not enough water!')\n coffee_machine()\n elif choice_coffee == '3':\n if water > 200 and coffee > 12 and cups > 1 and milk > 100:\n print('I have enough resources, making you a coffee!')\n water -= 200\n milk -= 100\n coffee -= 12\n cups -= 1\n money += 6\n coffee_machine()\n elif choice_coffee == 'back':\n coffee_machine()\n\n\ndef filling():\n global water\n global coffee\n global cups\n global milk\n water_fill = int(input('Write how many ml of water do you want to add:'))\n milk_fill = int(input('Write how many ml of milk do you want to add:'))\n coffee_fill = int(input(\n 'Write how many grams of coffee beans do you want to add:'))\n cups_fill = int(input(\n 'Write how many disposable cups of coffee do you want to add:'))\n water += water_fill\n milk += milk_fill\n coffee += coffee_fill\n cups += cups_fill\n coffee_machine()\n\n\ndef taking():\n global money\n print('I gave you $' + str(money))\n money = 0\n coffee_machine()\n\n\ndef stats_print():\n print('The coffee machine has:')\n print(str(water) + ' of water')\n print(str(milk) + ' of milk')\n print(str(coffee) + ' of coffee beans')\n print(str(cups) + ' of disposable cups')\n print(str(money) + ' of money')\n\n\ndef coffee_machine():\n user_action = input('Write action (buy, fill, take, remaining, exit):')\n if user_action == 'buy':\n buying()\n elif user_action == 'fill':\n filling()\n elif user_action == 'take':\n taking()\n elif user_action == 'remaining':\n stats_print()\n coffee_machine()\n elif user_action == 'exit':\n return\n\n\ncoffee_machine()\n",
"step-4": "water = 400\nmilk = 540\ncoffee = 120\ncups = 9\nmoney = 550\n\n\ndef buying():\n global water\n global coffee\n global cups\n global milk\n global money\n choice_coffee = input(\n 'What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:'\n )\n if choice_coffee == '1':\n if water > 250 and coffee > 16 and cups > 1:\n print('I have enough resources, making you a coffee!')\n water -= 250\n coffee -= 16\n cups -= 1\n money += 4\n coffee_machine()\n elif choice_coffee == '2':\n if water > 350 and coffee > 16 and cups > 1 and milk > 75:\n print('I have enough resources, making you a coffee!')\n water -= 350\n milk -= 75\n coffee -= 20\n cups -= 1\n money += 7\n elif water < 350:\n print('Sorry, not enough water!')\n coffee_machine()\n elif choice_coffee == '3':\n if water > 200 and coffee > 12 and cups > 1 and milk > 100:\n print('I have enough resources, making you a coffee!')\n water -= 200\n milk -= 100\n coffee -= 12\n cups -= 1\n money += 6\n coffee_machine()\n elif choice_coffee == 'back':\n coffee_machine()\n\n\ndef filling():\n global water\n global coffee\n global cups\n global milk\n water_fill = int(input('Write how many ml of water do you want to add:'))\n milk_fill = int(input('Write how many ml of milk do you want to add:'))\n coffee_fill = int(input(\n 'Write how many grams of coffee beans do you want to add:'))\n cups_fill = int(input(\n 'Write how many disposable cups of coffee do you want to add:'))\n water += water_fill\n milk += milk_fill\n coffee += coffee_fill\n cups += cups_fill\n coffee_machine()\n\n\ndef taking():\n global money\n print('I gave you $' + str(money))\n money = 0\n coffee_machine()\n\n\ndef stats_print():\n print('The coffee machine has:')\n print(str(water) + ' of water')\n print(str(milk) + ' of milk')\n print(str(coffee) + ' of coffee beans')\n print(str(cups) + ' of disposable cups')\n print(str(money) + ' of money')\n\n\ndef coffee_machine():\n user_action = input('Write action (buy, fill, take, remaining, exit):')\n if user_action == 'buy':\n buying()\n elif user_action == 'fill':\n filling()\n elif user_action == 'take':\n taking()\n elif user_action == 'remaining':\n stats_print()\n coffee_machine()\n elif user_action == 'exit':\n return\n\n\ncoffee_machine()\n",
"step-5": "water = 400\r\nmilk = 540\r\ncoffee = 120\r\ncups = 9\r\nmoney = 550\r\n\r\n\r\ndef buying():\r\n global water\r\n global coffee\r\n global cups\r\n global milk\r\n global money\r\n choice_coffee = input(\"What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:\")\r\n if choice_coffee == \"1\":\r\n if water > 250 and coffee > 16 and cups > 1:\r\n print(\"I have enough resources, making you a coffee!\")\r\n water -= 250\r\n coffee -= 16\r\n cups -= 1\r\n money += 4\r\n coffee_machine()\r\n elif choice_coffee == \"2\":\r\n if water > 350 and coffee > 16 and cups > 1 and milk > 75:\r\n print(\"I have enough resources, making you a coffee!\")\r\n water -= 350\r\n milk -= 75\r\n coffee -= 20\r\n cups -= 1\r\n money += 7\r\n elif water < 350:\r\n print(\"Sorry, not enough water!\")\r\n coffee_machine()\r\n elif choice_coffee == \"3\":\r\n if water > 200 and coffee > 12 and cups > 1 and milk > 100:\r\n print(\"I have enough resources, making you a coffee!\")\r\n water -= 200\r\n milk -= 100\r\n coffee -= 12\r\n cups -= 1\r\n money += 6\r\n coffee_machine()\r\n elif choice_coffee == \"back\":\r\n coffee_machine()\r\n\r\n\r\ndef filling():\r\n global water\r\n global coffee\r\n global cups\r\n global milk\r\n water_fill = int(input(\"Write how many ml of water do you want to add:\"))\r\n milk_fill = int(input(\"Write how many ml of milk do you want to add:\"))\r\n coffee_fill = int(input(\"Write how many grams of coffee beans do you want to add:\"))\r\n cups_fill = int(input(\"Write how many disposable cups of coffee do you want to add:\"))\r\n water += water_fill\r\n milk += milk_fill\r\n coffee += coffee_fill\r\n cups += cups_fill\r\n coffee_machine()\r\n\r\n\r\ndef taking():\r\n global money\r\n print(\"I gave you $\" + str(money))\r\n money = 0\r\n coffee_machine()\r\n\r\n\r\ndef stats_print():\r\n print(\"The coffee machine has:\")\r\n print(str(water) + \" of water\")\r\n print(str(milk) + \" of milk\")\r\n print(str(coffee) + \" of coffee beans\")\r\n print(str(cups) + \" of disposable cups\")\r\n print(str(money) + \" of money\")\r\n\r\n\r\ndef coffee_machine():\r\n user_action = input(\"Write action (buy, fill, take, remaining, exit):\")\r\n if user_action == \"buy\":\r\n buying()\r\n elif user_action == \"fill\":\r\n filling()\r\n elif user_action == \"take\":\r\n taking()\r\n elif user_action == \"remaining\":\r\n stats_print()\r\n coffee_machine()\r\n elif user_action == \"exit\":\r\n return\r\n\r\n\r\ncoffee_machine()",
"step-ids": [
0,
2,
6,
7,
8
]
}
|
[
0,
2,
6,
7,
8
] |
<|reserved_special_token_0|>
def code_pre_block(func):
"""
formats a code block according to rst format
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
block = func(*args, **kwargs)
new_block = '\n.. code-block::\n\n'
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def source_block(func):
"""
formats code from <source lang="some_language"> blocks
where the language is optional
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
lang, block = func(*args, **kwargs)
new_block = f"\n\n.. code-block:: {lang or ''}\n\n"
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def list_block(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
items = func(*args, **kwargs)
new_list = '\n'
prev_indent = 0
sub_list_started = False
for line in items.split('\n'):
num_markers = get_num_markers(line)
indent_by = (num_markers - 1) * 2
def get_printable_part(string):
"""
trim out up to a colon or semi-colon after a # list marker
"""
return string[num_markers + 1:].strip() if string[num_markers
] in [':', ';', '*'] else string[num_markers:].strip()
if line[num_markers] == '*':
if not sub_list_started:
new_list += (
f"\n{' ' * num_markers * 2}* {get_printable_part(line)}\n"
)
sub_list_started = True
else:
new_list += (
f"{' ' * num_markers * 2}* {get_printable_part(line)}\n"
)
continue
sub_list_started = False
if line[num_markers] in [':', ';']:
line = f"{' ' * num_markers * 2}{get_printable_part(line)}"
else:
line = f"{' ' * indent_by}* {get_printable_part(line)}"
if indent_by != prev_indent:
line = f'\n{line}'
prev_indent = indent_by
new_list += f'{line}\n'
return new_list
return wrapper
def get_num_markers(string):
indent_by = 0
for i in range(len(string)):
if string[i] == '#':
indent_by += 1
else:
break
return indent_by
@list_block
def list_block_converter(match_group):
return match_group.group(1)
@code_pre_block
def code_pre_block_converter(match_group):
return match_group.group(2)
@source_block
def source_block_converter(match_group):
"""
formats a code block from <source lang="some_language">
the language part is optional
"""
return match_group.group(1), match_group.group(2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def heading(*, marker=''):
"""
Add a new line with the same number of heading markers as the characters in the title
Need to specify marker to one of the valid rst line markups
"""
def wrapper_heading(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
title = func(*args, **kwargs)
class_obj, passed_title = args
title = title.strip()
return f'\n{title}\n{marker * len(title)}\n' if passed_title.strip(
) != title else passed_title
return wrapper
return wrapper_heading
def code_pre_block(func):
"""
formats a code block according to rst format
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
block = func(*args, **kwargs)
new_block = '\n.. code-block::\n\n'
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def source_block(func):
"""
formats code from <source lang="some_language"> blocks
where the language is optional
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
lang, block = func(*args, **kwargs)
new_block = f"\n\n.. code-block:: {lang or ''}\n\n"
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def list_block(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
items = func(*args, **kwargs)
new_list = '\n'
prev_indent = 0
sub_list_started = False
for line in items.split('\n'):
num_markers = get_num_markers(line)
indent_by = (num_markers - 1) * 2
def get_printable_part(string):
"""
trim out up to a colon or semi-colon after a # list marker
"""
return string[num_markers + 1:].strip() if string[num_markers
] in [':', ';', '*'] else string[num_markers:].strip()
if line[num_markers] == '*':
if not sub_list_started:
new_list += (
f"\n{' ' * num_markers * 2}* {get_printable_part(line)}\n"
)
sub_list_started = True
else:
new_list += (
f"{' ' * num_markers * 2}* {get_printable_part(line)}\n"
)
continue
sub_list_started = False
if line[num_markers] in [':', ';']:
line = f"{' ' * num_markers * 2}{get_printable_part(line)}"
else:
line = f"{' ' * indent_by}* {get_printable_part(line)}"
if indent_by != prev_indent:
line = f'\n{line}'
prev_indent = indent_by
new_list += f'{line}\n'
return new_list
return wrapper
def get_num_markers(string):
indent_by = 0
for i in range(len(string)):
if string[i] == '#':
indent_by += 1
else:
break
return indent_by
@list_block
def list_block_converter(match_group):
return match_group.group(1)
@code_pre_block
def code_pre_block_converter(match_group):
return match_group.group(2)
@source_block
def source_block_converter(match_group):
"""
formats a code block from <source lang="some_language">
the language part is optional
"""
return match_group.group(1), match_group.group(2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def heading(*, marker=''):
"""
Add a new line with the same number of heading markers as the characters in the title
Need to specify marker to one of the valid rst line markups
"""
def wrapper_heading(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
title = func(*args, **kwargs)
class_obj, passed_title = args
title = title.strip()
return f'\n{title}\n{marker * len(title)}\n' if passed_title.strip(
) != title else passed_title
return wrapper
return wrapper_heading
def code_pre_block(func):
"""
formats a code block according to rst format
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
block = func(*args, **kwargs)
new_block = '\n.. code-block::\n\n'
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def source_block(func):
"""
formats code from <source lang="some_language"> blocks
where the language is optional
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
lang, block = func(*args, **kwargs)
new_block = f"\n\n.. code-block:: {lang or ''}\n\n"
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def list_block(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
items = func(*args, **kwargs)
new_list = '\n'
prev_indent = 0
sub_list_started = False
for line in items.split('\n'):
num_markers = get_num_markers(line)
indent_by = (num_markers - 1) * 2
def get_printable_part(string):
"""
trim out up to a colon or semi-colon after a # list marker
"""
return string[num_markers + 1:].strip() if string[num_markers
] in [':', ';', '*'] else string[num_markers:].strip()
if line[num_markers] == '*':
if not sub_list_started:
new_list += (
f"\n{' ' * num_markers * 2}* {get_printable_part(line)}\n"
)
sub_list_started = True
else:
new_list += (
f"{' ' * num_markers * 2}* {get_printable_part(line)}\n"
)
continue
sub_list_started = False
if line[num_markers] in [':', ';']:
line = f"{' ' * num_markers * 2}{get_printable_part(line)}"
else:
line = f"{' ' * indent_by}* {get_printable_part(line)}"
if indent_by != prev_indent:
line = f'\n{line}'
prev_indent = indent_by
new_list += f'{line}\n'
return new_list
return wrapper
def get_num_markers(string):
indent_by = 0
for i in range(len(string)):
if string[i] == '#':
indent_by += 1
else:
break
return indent_by
@list_block
def list_block_converter(match_group):
return match_group.group(1)
@code_pre_block
def code_pre_block_converter(match_group):
return match_group.group(2)
@source_block
def source_block_converter(match_group):
"""
formats a code block from <source lang="some_language">
the language part is optional
"""
return match_group.group(1), match_group.group(2)
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
import functools
import re
from pprint import pprint
def heading(*, marker=''):
"""
Add a new line with the same number of heading markers as the characters in the title
Need to specify marker to one of the valid rst line markups
"""
def wrapper_heading(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
title = func(*args, **kwargs)
class_obj, passed_title = args
title = title.strip()
return f'\n{title}\n{marker * len(title)}\n' if passed_title.strip(
) != title else passed_title
return wrapper
return wrapper_heading
def code_pre_block(func):
"""
formats a code block according to rst format
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
block = func(*args, **kwargs)
new_block = '\n.. code-block::\n\n'
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def source_block(func):
"""
formats code from <source lang="some_language"> blocks
where the language is optional
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
lang, block = func(*args, **kwargs)
new_block = f"\n\n.. code-block:: {lang or ''}\n\n"
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def list_block(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
items = func(*args, **kwargs)
new_list = '\n'
prev_indent = 0
sub_list_started = False
for line in items.split('\n'):
num_markers = get_num_markers(line)
indent_by = (num_markers - 1) * 2
def get_printable_part(string):
"""
trim out up to a colon or semi-colon after a # list marker
"""
return string[num_markers + 1:].strip() if string[num_markers
] in [':', ';', '*'] else string[num_markers:].strip()
if line[num_markers] == '*':
if not sub_list_started:
new_list += (
f"\n{' ' * num_markers * 2}* {get_printable_part(line)}\n"
)
sub_list_started = True
else:
new_list += (
f"{' ' * num_markers * 2}* {get_printable_part(line)}\n"
)
continue
sub_list_started = False
if line[num_markers] in [':', ';']:
line = f"{' ' * num_markers * 2}{get_printable_part(line)}"
else:
line = f"{' ' * indent_by}* {get_printable_part(line)}"
if indent_by != prev_indent:
line = f'\n{line}'
prev_indent = indent_by
new_list += f'{line}\n'
return new_list
return wrapper
def get_num_markers(string):
indent_by = 0
for i in range(len(string)):
if string[i] == '#':
indent_by += 1
else:
break
return indent_by
@list_block
def list_block_converter(match_group):
return match_group.group(1)
@code_pre_block
def code_pre_block_converter(match_group):
return match_group.group(2)
@source_block
def source_block_converter(match_group):
"""
formats a code block from <source lang="some_language">
the language part is optional
"""
return match_group.group(1), match_group.group(2)
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
import functools
import re
from pprint import pprint
def heading(*, marker=''):
'''
Add a new line with the same number of heading markers as the characters in the title
Need to specify marker to one of the valid rst line markups
'''
def wrapper_heading(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
title = func(*args, **kwargs)
class_obj, passed_title, = args
title = title.strip()
return f'\n{title}\n{marker*len(title)}\n' if passed_title.strip() != title else passed_title
return wrapper
return wrapper_heading
def code_pre_block(func):
'''
formats a code block according to rst format
'''
@functools.wraps(func)
def wrapper(*args, **kwargs):
block = func(*args, **kwargs)
new_block = '\n.. code-block::\n\n'
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def source_block(func):
'''
formats code from <source lang="some_language"> blocks
where the language is optional
'''
@functools.wraps(func)
def wrapper(*args, **kwargs):
lang, block = func(*args, **kwargs)
new_block = f'\n\n.. code-block:: {lang or ""}\n\n'
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def list_block(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
items = func(*args, **kwargs)
new_list = '\n'
prev_indent = 0
sub_list_started = False
for line in items.split('\n'):
num_markers = get_num_markers(line) # how many # there are
indent_by = (num_markers - 1) * 2 # no indentation for first level
def get_printable_part(string):
'''
trim out up to a colon or semi-colon after a # list marker
'''
return string[num_markers+1:].strip() if string[num_markers] in [':', ';', '*'] else string[num_markers:].strip()
# if # is followed by ; or :, it is a continuation of the previous list item
# this can just be indented
if line[num_markers] == '*': # bullet list item
if not sub_list_started:
new_list += f'\n{" " * num_markers*2}* {get_printable_part(line)}\n'
sub_list_started = True
else:
new_list += f'{" " * num_markers*2}* {get_printable_part(line)}\n'
continue
sub_list_started = False
if line[num_markers] in [':', ';']:
line = f'{" " * num_markers*2}{get_printable_part(line)}'
else:
line = f'{" " * indent_by}* {get_printable_part(line)}'
if indent_by != prev_indent: # starting a new level or going back to old level
line = f'\n{line}' # new level starts a new line
prev_indent = indent_by
new_list += f'{line}\n'
return new_list
return wrapper
def get_num_markers(string):
indent_by = 0
for i in range(len(string)):
if string[i] == '#':
indent_by += 1
else:
break
return indent_by
@list_block
def list_block_converter(match_group):
return match_group.group(1)
@code_pre_block
def code_pre_block_converter(match_group):
return match_group.group(2)
@source_block
def source_block_converter(match_group):
'''
formats a code block from <source lang="some_language">
the language part is optional
'''
return (match_group.group(1), match_group.group(2))
if __name__ == '__main__':
pass
|
flexible
|
{
"blob_id": "d1b2420778e788d78be2a12a27c80f5fa1b15a0f",
"index": 465,
"step-1": "<mask token>\n\n\ndef code_pre_block(func):\n \"\"\"\n formats a code block according to rst format\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n block = func(*args, **kwargs)\n new_block = '\\n.. code-block::\\n\\n'\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef source_block(func):\n \"\"\"\n formats code from <source lang=\"some_language\"> blocks\n where the language is optional\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n lang, block = func(*args, **kwargs)\n new_block = f\"\\n\\n.. code-block:: {lang or ''}\\n\\n\"\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef list_block(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n items = func(*args, **kwargs)\n new_list = '\\n'\n prev_indent = 0\n sub_list_started = False\n for line in items.split('\\n'):\n num_markers = get_num_markers(line)\n indent_by = (num_markers - 1) * 2\n\n def get_printable_part(string):\n \"\"\"\n trim out up to a colon or semi-colon after a # list marker\n \"\"\"\n return string[num_markers + 1:].strip() if string[num_markers\n ] in [':', ';', '*'] else string[num_markers:].strip()\n if line[num_markers] == '*':\n if not sub_list_started:\n new_list += (\n f\"\\n{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n sub_list_started = True\n else:\n new_list += (\n f\"{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n continue\n sub_list_started = False\n if line[num_markers] in [':', ';']:\n line = f\"{' ' * num_markers * 2}{get_printable_part(line)}\"\n else:\n line = f\"{' ' * indent_by}* {get_printable_part(line)}\"\n if indent_by != prev_indent:\n line = f'\\n{line}'\n prev_indent = indent_by\n new_list += f'{line}\\n'\n return new_list\n return wrapper\n\n\ndef get_num_markers(string):\n indent_by = 0\n for i in range(len(string)):\n if string[i] == '#':\n indent_by += 1\n else:\n break\n return indent_by\n\n\n@list_block\ndef list_block_converter(match_group):\n return match_group.group(1)\n\n\n@code_pre_block\ndef code_pre_block_converter(match_group):\n return match_group.group(2)\n\n\n@source_block\ndef source_block_converter(match_group):\n \"\"\"\n formats a code block from <source lang=\"some_language\">\n the language part is optional\n \"\"\"\n return match_group.group(1), match_group.group(2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef heading(*, marker=''):\n \"\"\"\n Add a new line with the same number of heading markers as the characters in the title\n Need to specify marker to one of the valid rst line markups\n \"\"\"\n\n def wrapper_heading(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n title = func(*args, **kwargs)\n class_obj, passed_title = args\n title = title.strip()\n return f'\\n{title}\\n{marker * len(title)}\\n' if passed_title.strip(\n ) != title else passed_title\n return wrapper\n return wrapper_heading\n\n\ndef code_pre_block(func):\n \"\"\"\n formats a code block according to rst format\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n block = func(*args, **kwargs)\n new_block = '\\n.. code-block::\\n\\n'\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef source_block(func):\n \"\"\"\n formats code from <source lang=\"some_language\"> blocks\n where the language is optional\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n lang, block = func(*args, **kwargs)\n new_block = f\"\\n\\n.. code-block:: {lang or ''}\\n\\n\"\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef list_block(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n items = func(*args, **kwargs)\n new_list = '\\n'\n prev_indent = 0\n sub_list_started = False\n for line in items.split('\\n'):\n num_markers = get_num_markers(line)\n indent_by = (num_markers - 1) * 2\n\n def get_printable_part(string):\n \"\"\"\n trim out up to a colon or semi-colon after a # list marker\n \"\"\"\n return string[num_markers + 1:].strip() if string[num_markers\n ] in [':', ';', '*'] else string[num_markers:].strip()\n if line[num_markers] == '*':\n if not sub_list_started:\n new_list += (\n f\"\\n{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n sub_list_started = True\n else:\n new_list += (\n f\"{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n continue\n sub_list_started = False\n if line[num_markers] in [':', ';']:\n line = f\"{' ' * num_markers * 2}{get_printable_part(line)}\"\n else:\n line = f\"{' ' * indent_by}* {get_printable_part(line)}\"\n if indent_by != prev_indent:\n line = f'\\n{line}'\n prev_indent = indent_by\n new_list += f'{line}\\n'\n return new_list\n return wrapper\n\n\ndef get_num_markers(string):\n indent_by = 0\n for i in range(len(string)):\n if string[i] == '#':\n indent_by += 1\n else:\n break\n return indent_by\n\n\n@list_block\ndef list_block_converter(match_group):\n return match_group.group(1)\n\n\n@code_pre_block\ndef code_pre_block_converter(match_group):\n return match_group.group(2)\n\n\n@source_block\ndef source_block_converter(match_group):\n \"\"\"\n formats a code block from <source lang=\"some_language\">\n the language part is optional\n \"\"\"\n return match_group.group(1), match_group.group(2)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef heading(*, marker=''):\n \"\"\"\n Add a new line with the same number of heading markers as the characters in the title\n Need to specify marker to one of the valid rst line markups\n \"\"\"\n\n def wrapper_heading(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n title = func(*args, **kwargs)\n class_obj, passed_title = args\n title = title.strip()\n return f'\\n{title}\\n{marker * len(title)}\\n' if passed_title.strip(\n ) != title else passed_title\n return wrapper\n return wrapper_heading\n\n\ndef code_pre_block(func):\n \"\"\"\n formats a code block according to rst format\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n block = func(*args, **kwargs)\n new_block = '\\n.. code-block::\\n\\n'\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef source_block(func):\n \"\"\"\n formats code from <source lang=\"some_language\"> blocks\n where the language is optional\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n lang, block = func(*args, **kwargs)\n new_block = f\"\\n\\n.. code-block:: {lang or ''}\\n\\n\"\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef list_block(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n items = func(*args, **kwargs)\n new_list = '\\n'\n prev_indent = 0\n sub_list_started = False\n for line in items.split('\\n'):\n num_markers = get_num_markers(line)\n indent_by = (num_markers - 1) * 2\n\n def get_printable_part(string):\n \"\"\"\n trim out up to a colon or semi-colon after a # list marker\n \"\"\"\n return string[num_markers + 1:].strip() if string[num_markers\n ] in [':', ';', '*'] else string[num_markers:].strip()\n if line[num_markers] == '*':\n if not sub_list_started:\n new_list += (\n f\"\\n{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n sub_list_started = True\n else:\n new_list += (\n f\"{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n continue\n sub_list_started = False\n if line[num_markers] in [':', ';']:\n line = f\"{' ' * num_markers * 2}{get_printable_part(line)}\"\n else:\n line = f\"{' ' * indent_by}* {get_printable_part(line)}\"\n if indent_by != prev_indent:\n line = f'\\n{line}'\n prev_indent = indent_by\n new_list += f'{line}\\n'\n return new_list\n return wrapper\n\n\ndef get_num_markers(string):\n indent_by = 0\n for i in range(len(string)):\n if string[i] == '#':\n indent_by += 1\n else:\n break\n return indent_by\n\n\n@list_block\ndef list_block_converter(match_group):\n return match_group.group(1)\n\n\n@code_pre_block\ndef code_pre_block_converter(match_group):\n return match_group.group(2)\n\n\n@source_block\ndef source_block_converter(match_group):\n \"\"\"\n formats a code block from <source lang=\"some_language\">\n the language part is optional\n \"\"\"\n return match_group.group(1), match_group.group(2)\n\n\nif __name__ == '__main__':\n pass\n",
"step-4": "import functools\nimport re\nfrom pprint import pprint\n\n\ndef heading(*, marker=''):\n \"\"\"\n Add a new line with the same number of heading markers as the characters in the title\n Need to specify marker to one of the valid rst line markups\n \"\"\"\n\n def wrapper_heading(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n title = func(*args, **kwargs)\n class_obj, passed_title = args\n title = title.strip()\n return f'\\n{title}\\n{marker * len(title)}\\n' if passed_title.strip(\n ) != title else passed_title\n return wrapper\n return wrapper_heading\n\n\ndef code_pre_block(func):\n \"\"\"\n formats a code block according to rst format\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n block = func(*args, **kwargs)\n new_block = '\\n.. code-block::\\n\\n'\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef source_block(func):\n \"\"\"\n formats code from <source lang=\"some_language\"> blocks\n where the language is optional\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n lang, block = func(*args, **kwargs)\n new_block = f\"\\n\\n.. code-block:: {lang or ''}\\n\\n\"\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef list_block(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n items = func(*args, **kwargs)\n new_list = '\\n'\n prev_indent = 0\n sub_list_started = False\n for line in items.split('\\n'):\n num_markers = get_num_markers(line)\n indent_by = (num_markers - 1) * 2\n\n def get_printable_part(string):\n \"\"\"\n trim out up to a colon or semi-colon after a # list marker\n \"\"\"\n return string[num_markers + 1:].strip() if string[num_markers\n ] in [':', ';', '*'] else string[num_markers:].strip()\n if line[num_markers] == '*':\n if not sub_list_started:\n new_list += (\n f\"\\n{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n sub_list_started = True\n else:\n new_list += (\n f\"{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n continue\n sub_list_started = False\n if line[num_markers] in [':', ';']:\n line = f\"{' ' * num_markers * 2}{get_printable_part(line)}\"\n else:\n line = f\"{' ' * indent_by}* {get_printable_part(line)}\"\n if indent_by != prev_indent:\n line = f'\\n{line}'\n prev_indent = indent_by\n new_list += f'{line}\\n'\n return new_list\n return wrapper\n\n\ndef get_num_markers(string):\n indent_by = 0\n for i in range(len(string)):\n if string[i] == '#':\n indent_by += 1\n else:\n break\n return indent_by\n\n\n@list_block\ndef list_block_converter(match_group):\n return match_group.group(1)\n\n\n@code_pre_block\ndef code_pre_block_converter(match_group):\n return match_group.group(2)\n\n\n@source_block\ndef source_block_converter(match_group):\n \"\"\"\n formats a code block from <source lang=\"some_language\">\n the language part is optional\n \"\"\"\n return match_group.group(1), match_group.group(2)\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "import functools\nimport re\nfrom pprint import pprint\n\ndef heading(*, marker=''):\n '''\n Add a new line with the same number of heading markers as the characters in the title\n Need to specify marker to one of the valid rst line markups\n '''\n def wrapper_heading(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n title = func(*args, **kwargs)\n class_obj, passed_title, = args\n title = title.strip()\n return f'\\n{title}\\n{marker*len(title)}\\n' if passed_title.strip() != title else passed_title\n return wrapper\n return wrapper_heading\n\ndef code_pre_block(func):\n '''\n formats a code block according to rst format\n '''\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n block = func(*args, **kwargs)\n new_block = '\\n.. code-block::\\n\\n'\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\ndef source_block(func):\n '''\n formats code from <source lang=\"some_language\"> blocks\n where the language is optional\n '''\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n lang, block = func(*args, **kwargs)\n new_block = f'\\n\\n.. code-block:: {lang or \"\"}\\n\\n'\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\ndef list_block(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n items = func(*args, **kwargs)\n new_list = '\\n'\n prev_indent = 0\n sub_list_started = False\n for line in items.split('\\n'):\n num_markers = get_num_markers(line) # how many # there are\n indent_by = (num_markers - 1) * 2 # no indentation for first level\n def get_printable_part(string):\n '''\n trim out up to a colon or semi-colon after a # list marker\n '''\n return string[num_markers+1:].strip() if string[num_markers] in [':', ';', '*'] else string[num_markers:].strip()\n # if # is followed by ; or :, it is a continuation of the previous list item\n # this can just be indented\n if line[num_markers] == '*': # bullet list item\n if not sub_list_started:\n new_list += f'\\n{\" \" * num_markers*2}* {get_printable_part(line)}\\n'\n sub_list_started = True\n else:\n new_list += f'{\" \" * num_markers*2}* {get_printable_part(line)}\\n'\n continue\n sub_list_started = False\n if line[num_markers] in [':', ';']:\n line = f'{\" \" * num_markers*2}{get_printable_part(line)}'\n else:\n line = f'{\" \" * indent_by}* {get_printable_part(line)}'\n if indent_by != prev_indent: # starting a new level or going back to old level\n line = f'\\n{line}' # new level starts a new line\n prev_indent = indent_by\n new_list += f'{line}\\n'\n return new_list\n return wrapper\n\ndef get_num_markers(string):\n indent_by = 0\n for i in range(len(string)):\n if string[i] == '#':\n indent_by += 1\n else:\n break\n return indent_by\n\n@list_block\ndef list_block_converter(match_group):\n return match_group.group(1)\n\n@code_pre_block\ndef code_pre_block_converter(match_group):\n return match_group.group(2)\n\n@source_block\ndef source_block_converter(match_group):\n '''\n formats a code block from <source lang=\"some_language\">\n the language part is optional\n '''\n return (match_group.group(1), match_group.group(2))\n\nif __name__ == '__main__':\n pass",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
from python_logging.Demo_CustomLogger import CustomLogger
CustomLogger.init_log()
# CustomLogger.info()
log_str = '%s/%s/%s\n' % ("demo1", "demo2", "demo3")
CustomLogger.info('[main]', log_str)
|
normal
|
{
"blob_id": "ed5653455062cb3468c232cf0fa3f1d18793626a",
"index": 591,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nCustomLogger.init_log()\n<mask token>\nCustomLogger.info('[main]', log_str)\n",
"step-3": "<mask token>\nCustomLogger.init_log()\nlog_str = '%s/%s/%s\\n' % ('demo1', 'demo2', 'demo3')\nCustomLogger.info('[main]', log_str)\n",
"step-4": "from python_logging.Demo_CustomLogger import CustomLogger\nCustomLogger.init_log()\nlog_str = '%s/%s/%s\\n' % ('demo1', 'demo2', 'demo3')\nCustomLogger.info('[main]', log_str)\n",
"step-5": "from python_logging.Demo_CustomLogger import CustomLogger\n\nCustomLogger.init_log()\n# CustomLogger.info()\nlog_str = '%s/%s/%s\\n' % (\"demo1\", \"demo2\", \"demo3\")\nCustomLogger.info('[main]', log_str)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Массив размером 2m + 1, где m — натуральное число, заполнен случайным образом. Найдите в массиве медиану.
Медианой называется элемент ряда, делящий его на две равные части:
в одной находятся элементы, которые не меньше медианы, в другой — не больше медианы.
Примечание: задачу можно решить без сортировки исходного массива.
Но если это слишком сложно, используйте метод сортировки, который не рассматривался на уроках
(сортировка слиянием также недопустима).
"""
"""В этой задаче как раз могла бы пригодиться быстрая сортировка Хоара или слиянием.
"Но без них не знаю, как можно написать более менее оптимизировано"""
import random
m = random.randint(5, 10)
# "одномерный вещественный массив, заданный случайными числами на промежутке [0; 50)" - т.е. [0; 49].
# Не знаю, важно ли это. uniform включает последнее число, в отличии от range и большинства прочих функций
# Для лучшей читабельности добавил округление
mas = [round(random.uniform(0, 49), 3) for i in range(2 * m + 1)]
print(f'Исходный список: {mas}')
# Через сортировку кучей
def heapify(array, size, ind):
largest = ind
left = (2 * ind) + 1
right = (2 * ind) + 2
if left < size and array[left] > array[largest]:
largest = left
if right < size and array[right] > array[largest]:
largest = right
if largest != ind:
array[ind], array[largest] = array[largest], array[ind]
heapify(array, size, largest)
def heap_sort(array):
n = len(array)
for i in range(n, -1, -1):
heapify(array, n, i)
for i in range(n - 1, 0, -1):
array[i], array[0] = array[0], array[i]
heapify(array, i, 0)
heap_sort(mas)
print(f'Отсортированный список по возрастанию: {mas}')
print(f'Медиана: {mas[len(mas) // 2]}')
# Читерский вариант :)
import statistics
print(statistics.median(mas))
|
normal
|
{
"blob_id": "fbcbad9f64c0f9b68e29afde01f3a4fdba012e10",
"index": 4868,
"step-1": "<mask token>\n\n\ndef heapify(array, size, ind):\n largest = ind\n left = 2 * ind + 1\n right = 2 * ind + 2\n if left < size and array[left] > array[largest]:\n largest = left\n if right < size and array[right] > array[largest]:\n largest = right\n if largest != ind:\n array[ind], array[largest] = array[largest], array[ind]\n heapify(array, size, largest)\n\n\ndef heap_sort(array):\n n = len(array)\n for i in range(n, -1, -1):\n heapify(array, n, i)\n for i in range(n - 1, 0, -1):\n array[i], array[0] = array[0], array[i]\n heapify(array, i, 0)\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(f'Исходный список: {mas}')\n\n\ndef heapify(array, size, ind):\n largest = ind\n left = 2 * ind + 1\n right = 2 * ind + 2\n if left < size and array[left] > array[largest]:\n largest = left\n if right < size and array[right] > array[largest]:\n largest = right\n if largest != ind:\n array[ind], array[largest] = array[largest], array[ind]\n heapify(array, size, largest)\n\n\ndef heap_sort(array):\n n = len(array)\n for i in range(n, -1, -1):\n heapify(array, n, i)\n for i in range(n - 1, 0, -1):\n array[i], array[0] = array[0], array[i]\n heapify(array, i, 0)\n\n\nheap_sort(mas)\nprint(f'Отсортированный список по возрастанию: {mas}')\nprint(f'Медиана: {mas[len(mas) // 2]}')\n<mask token>\nprint(statistics.median(mas))\n",
"step-3": "<mask token>\nm = random.randint(5, 10)\nmas = [round(random.uniform(0, 49), 3) for i in range(2 * m + 1)]\nprint(f'Исходный список: {mas}')\n\n\ndef heapify(array, size, ind):\n largest = ind\n left = 2 * ind + 1\n right = 2 * ind + 2\n if left < size and array[left] > array[largest]:\n largest = left\n if right < size and array[right] > array[largest]:\n largest = right\n if largest != ind:\n array[ind], array[largest] = array[largest], array[ind]\n heapify(array, size, largest)\n\n\ndef heap_sort(array):\n n = len(array)\n for i in range(n, -1, -1):\n heapify(array, n, i)\n for i in range(n - 1, 0, -1):\n array[i], array[0] = array[0], array[i]\n heapify(array, i, 0)\n\n\nheap_sort(mas)\nprint(f'Отсортированный список по возрастанию: {mas}')\nprint(f'Медиана: {mas[len(mas) // 2]}')\n<mask token>\nprint(statistics.median(mas))\n",
"step-4": "<mask token>\nimport random\nm = random.randint(5, 10)\nmas = [round(random.uniform(0, 49), 3) for i in range(2 * m + 1)]\nprint(f'Исходный список: {mas}')\n\n\ndef heapify(array, size, ind):\n largest = ind\n left = 2 * ind + 1\n right = 2 * ind + 2\n if left < size and array[left] > array[largest]:\n largest = left\n if right < size and array[right] > array[largest]:\n largest = right\n if largest != ind:\n array[ind], array[largest] = array[largest], array[ind]\n heapify(array, size, largest)\n\n\ndef heap_sort(array):\n n = len(array)\n for i in range(n, -1, -1):\n heapify(array, n, i)\n for i in range(n - 1, 0, -1):\n array[i], array[0] = array[0], array[i]\n heapify(array, i, 0)\n\n\nheap_sort(mas)\nprint(f'Отсортированный список по возрастанию: {mas}')\nprint(f'Медиана: {mas[len(mas) // 2]}')\nimport statistics\nprint(statistics.median(mas))\n",
"step-5": "\"\"\"\nМассив размером 2m + 1, где m — натуральное число, заполнен случайным образом. Найдите в массиве медиану.\nМедианой называется элемент ряда, делящий его на две равные части:\nв одной находятся элементы, которые не меньше медианы, в другой — не больше медианы.\nПримечание: задачу можно решить без сортировки исходного массива.\nНо если это слишком сложно, используйте метод сортировки, который не рассматривался на уроках\n(сортировка слиянием также недопустима).\n\"\"\"\n\n\n\"\"\"В этой задаче как раз могла бы пригодиться быстрая сортировка Хоара или слиянием.\n\"Но без них не знаю, как можно написать более менее оптимизировано\"\"\"\n\nimport random\n\nm = random.randint(5, 10)\n# \"одномерный вещественный массив, заданный случайными числами на промежутке [0; 50)\" - т.е. [0; 49].\n# Не знаю, важно ли это. uniform включает последнее число, в отличии от range и большинства прочих функций\n# Для лучшей читабельности добавил округление\nmas = [round(random.uniform(0, 49), 3) for i in range(2 * m + 1)]\nprint(f'Исходный список: {mas}')\n\n\n# Через сортировку кучей\ndef heapify(array, size, ind):\n largest = ind\n left = (2 * ind) + 1\n right = (2 * ind) + 2\n\n if left < size and array[left] > array[largest]:\n largest = left\n\n if right < size and array[right] > array[largest]:\n largest = right\n\n if largest != ind:\n array[ind], array[largest] = array[largest], array[ind]\n heapify(array, size, largest)\n\n\ndef heap_sort(array):\n n = len(array)\n for i in range(n, -1, -1):\n heapify(array, n, i)\n\n for i in range(n - 1, 0, -1):\n array[i], array[0] = array[0], array[i]\n heapify(array, i, 0)\n\n\nheap_sort(mas)\nprint(f'Отсортированный список по возрастанию: {mas}')\nprint(f'Медиана: {mas[len(mas) // 2]}')\n\n\n# Читерский вариант :)\nimport statistics\n\nprint(statistics.median(mas))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def china_lunar():
today = str(date.today())
today_list = today.split('-')
lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(
datetime.datetime.now().month), int(datetime.datetime.now().day))
if lunar_day.Lleap:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
else:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
return today, china_day
<|reserved_special_token_0|>
def news_put():
news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1
] + '\n' + morning_news()
return news_spider_message
<|reserved_special_token_0|>
def NewYork_news(page=1):
society = 'https://cn.nytimes.com/society/{}/'.format(page)
response = requests.get(url=society, headers=headers)
mytree = lxml.etree.HTML(response.text)
title = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a')
news = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//p')
url = mytree.xpath(
'//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a/@href')
newss_1 = ''
number = 1
for t in title:
newss = str(number) + ':' + str_list(t.text
) + '。' + '\n' + ' 概要:' + str_list(news[title.index(t)].text
) + '。' + '\n' + ' 详情:' + '\n' + '\n'
newss_1 += newss
number += 1
return newss_1
<|reserved_special_token_0|>
def str_list(t):
m = ''
for i in list(t):
if i == '中':
china = 'Z'
m += china + '_'
else:
m += i + '_'
return m
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def china_lunar():
today = str(date.today())
today_list = today.split('-')
lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(
datetime.datetime.now().month), int(datetime.datetime.now().day))
if lunar_day.Lleap:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
else:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
return today, china_day
<|reserved_special_token_0|>
def morning_news():
news_api = (
'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'
)
response = requests.get(news_api)
print(dict(response.json()))
news_list = dict(response.json())
news = ''
m = 1
news_q = ''
for i in news_list['newslist']:
img_url = ''
if i['url'] == '':
img_url = i['imgsrc']
news = str(str(m) + ':' + i['title'] + '\n' + i['url'] + img_url + '\n'
)
news_q += str(news)
m += 1
return news_q
def news_put():
news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1
] + '\n' + morning_news()
return news_spider_message
<|reserved_special_token_0|>
def NewYork_news(page=1):
society = 'https://cn.nytimes.com/society/{}/'.format(page)
response = requests.get(url=society, headers=headers)
mytree = lxml.etree.HTML(response.text)
title = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a')
news = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//p')
url = mytree.xpath(
'//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a/@href')
newss_1 = ''
number = 1
for t in title:
newss = str(number) + ':' + str_list(t.text
) + '。' + '\n' + ' 概要:' + str_list(news[title.index(t)].text
) + '。' + '\n' + ' 详情:' + '\n' + '\n'
newss_1 += newss
number += 1
return newss_1
def NewYork_news_put(page=0):
news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[
1] + '\n' + NewYork_news(page)
return news_spider_message
def str_list(t):
m = ''
for i in list(t):
if i == '中':
china = 'Z'
m += china + '_'
else:
m += i + '_'
return m
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ymc = [u'十一', u'十二', u'正', u'二', u'三', u'四', u'五', u'六', u'七', u'八', u'九', u'十'
]
rmc = [u'初一', u'初二', u'初三', u'初四', u'初五', u'初六', u'初七', u'初八', u'初九', u'初十',
u'十一', u'十二', u'十三', u'十四', u'十五', u'十六', u'十七', u'十八', u'十九', u'二十',
u'廿一', u'廿二', u'廿三', u'廿四', u'廿五', u'廿六', u'廿七', u'廿八', u'廿九', u'三十', u'卅一'
]
lunar = sxtwl.Lunar()
def china_lunar():
today = str(date.today())
today_list = today.split('-')
lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(
datetime.datetime.now().month), int(datetime.datetime.now().day))
if lunar_day.Lleap:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
else:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
return today, china_day
<|reserved_special_token_0|>
def morning_news():
news_api = (
'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'
)
response = requests.get(news_api)
print(dict(response.json()))
news_list = dict(response.json())
news = ''
m = 1
news_q = ''
for i in news_list['newslist']:
img_url = ''
if i['url'] == '':
img_url = i['imgsrc']
news = str(str(m) + ':' + i['title'] + '\n' + i['url'] + img_url + '\n'
)
news_q += str(news)
m += 1
return news_q
def news_put():
news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1
] + '\n' + morning_news()
return news_spider_message
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'
}
def NewYork_news(page=1):
society = 'https://cn.nytimes.com/society/{}/'.format(page)
response = requests.get(url=society, headers=headers)
mytree = lxml.etree.HTML(response.text)
title = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a')
news = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//p')
url = mytree.xpath(
'//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a/@href')
newss_1 = ''
number = 1
for t in title:
newss = str(number) + ':' + str_list(t.text
) + '。' + '\n' + ' 概要:' + str_list(news[title.index(t)].text
) + '。' + '\n' + ' 详情:' + '\n' + '\n'
newss_1 += newss
number += 1
return newss_1
def NewYork_news_put(page=0):
news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[
1] + '\n' + NewYork_news(page)
return news_spider_message
def str_list(t):
m = ''
for i in list(t):
if i == '中':
china = 'Z'
m += china + '_'
else:
m += i + '_'
return m
<|reserved_special_token_1|>
import requests
import sxtwl
import datetime
from datetime import date
import lxml
from lxml import etree
ymc = [u'十一', u'十二', u'正', u'二', u'三', u'四', u'五', u'六', u'七', u'八', u'九', u'十'
]
rmc = [u'初一', u'初二', u'初三', u'初四', u'初五', u'初六', u'初七', u'初八', u'初九', u'初十',
u'十一', u'十二', u'十三', u'十四', u'十五', u'十六', u'十七', u'十八', u'十九', u'二十',
u'廿一', u'廿二', u'廿三', u'廿四', u'廿五', u'廿六', u'廿七', u'廿八', u'廿九', u'三十', u'卅一'
]
lunar = sxtwl.Lunar()
def china_lunar():
today = str(date.today())
today_list = today.split('-')
lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(
datetime.datetime.now().month), int(datetime.datetime.now().day))
if lunar_day.Lleap:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
else:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
return today, china_day
import json
def morning_news():
news_api = (
'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'
)
response = requests.get(news_api)
print(dict(response.json()))
news_list = dict(response.json())
news = ''
m = 1
news_q = ''
for i in news_list['newslist']:
img_url = ''
if i['url'] == '':
img_url = i['imgsrc']
news = str(str(m) + ':' + i['title'] + '\n' + i['url'] + img_url + '\n'
)
news_q += str(news)
m += 1
return news_q
def news_put():
news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1
] + '\n' + morning_news()
return news_spider_message
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'
}
def NewYork_news(page=1):
society = 'https://cn.nytimes.com/society/{}/'.format(page)
response = requests.get(url=society, headers=headers)
mytree = lxml.etree.HTML(response.text)
title = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a')
news = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//p')
url = mytree.xpath(
'//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a/@href')
newss_1 = ''
number = 1
for t in title:
newss = str(number) + ':' + str_list(t.text
) + '。' + '\n' + ' 概要:' + str_list(news[title.index(t)].text
) + '。' + '\n' + ' 详情:' + '\n' + '\n'
newss_1 += newss
number += 1
return newss_1
def NewYork_news_put(page=0):
news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[
1] + '\n' + NewYork_news(page)
return news_spider_message
def str_list(t):
m = ''
for i in list(t):
if i == '中':
china = 'Z'
m += china + '_'
else:
m += i + '_'
return m
<|reserved_special_token_1|>
import requests
import sxtwl
import datetime
from datetime import date
import lxml
from lxml import etree
# 日历中文索引
ymc = [u"十一", u"十二", u"正", u"二", u"三", u"四", u"五", u"六", u"七", u"八", u"九", u"十"]
rmc = [u"初一", u"初二", u"初三", u"初四", u"初五", u"初六", u"初七", u"初八", u"初九", u"初十", \
u"十一", u"十二", u"十三", u"十四", u"十五", u"十六", u"十七", u"十八", u"十九", \
u"二十", u"廿一", u"廿二", u"廿三", u"廿四", u"廿五", u"廿六", u"廿七", u"廿八", u"廿九", u"三十", u"卅一"]
# 日历库实例化
lunar = sxtwl.Lunar()
# 2.阳历转阴历
def china_lunar():
today = str(date.today())
today_list = today.split('-') # ['2019', '08', '08']
lunar_day = lunar.getDayBySolar((int)(datetime.datetime.now().year), (int)(datetime.datetime.now().month), (int)(datetime.datetime.now().day)) # 输入年月日
# 判断是否为润年
if (lunar_day.Lleap):
china_day = "农历:{0}月{1}".format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
else:
china_day ="农历:{0}月{1}".format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
return today,china_day
import json
def morning_news():
news_api = 'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'
response = requests.get(news_api)
print(dict(response.json()))
news_list = dict(response.json())
news = ''
m = 1
news_q=''
for i in news_list['newslist']:
img_url=''
if i['url'] == '':
img_url = i['imgsrc']
news = str(str(m)+":"+i['title']+"\n"+i['url']+img_url+"\n")
news_q += str(news)
m += 1
return news_q
def news_put():
news_spider_message = '【早间新闻】 '+china_lunar()[0]+" "+china_lunar()[1]+"\n"+morning_news()
return news_spider_message
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'}
def NewYork_news(page=1):
society = 'https://cn.nytimes.com/society/{}/'.format(page)
response = requests.get(url=society,headers =headers )
mytree = lxml.etree.HTML(response.text)
title = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a')
news = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//p')
url = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a/@href')
# print(mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div[2]/div/ul//h3/a')[1].text) #这个是标题
# print(mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div[2]/div/ul//p')[1].text) # 这个是简介
#
# print(mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div[2]/div/ul//h3/a/@href')[1]) # 这个是链接
newss_1 = ''
number = 1
for t in title:
newss = str(number)+":"+str_list(t.text) +'。'+'\n'+' 概要:'+str_list(news[title.index(t)].text)+'。'+'\n'+' 详情:'+'\n'+'\n'
newss_1 +=newss
number += 1
return newss_1
def NewYork_news_put(page=0):
news_spider_message = '【纽约时报中文网】'+china_lunar()[0]+" "+china_lunar()[1]+"\n"+NewYork_news(page)
return news_spider_message
def str_list(t):
m=''
for i in list(t):
if i == '中':
china = 'Z'
m += china +'_'
else:
m += i + '_'
return m
|
flexible
|
{
"blob_id": "e1d0648825695584d3ea518db961a9178ea0c66a",
"index": 50,
"step-1": "<mask token>\n\n\ndef china_lunar():\n today = str(date.today())\n today_list = today.split('-')\n lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(\n datetime.datetime.now().month), int(datetime.datetime.now().day))\n if lunar_day.Lleap:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n else:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n return today, china_day\n\n\n<mask token>\n\n\ndef news_put():\n news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1\n ] + '\\n' + morning_news()\n return news_spider_message\n\n\n<mask token>\n\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society, headers=headers)\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath(\n '//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n newss_1 = ''\n number = 1\n for t in title:\n newss = str(number) + ':' + str_list(t.text\n ) + '。' + '\\n' + ' 概要:' + str_list(news[title.index(t)].text\n ) + '。' + '\\n' + ' 详情:' + '\\n' + '\\n'\n newss_1 += newss\n number += 1\n return newss_1\n\n\n<mask token>\n\n\ndef str_list(t):\n m = ''\n for i in list(t):\n if i == '中':\n china = 'Z'\n m += china + '_'\n else:\n m += i + '_'\n return m\n",
"step-2": "<mask token>\n\n\ndef china_lunar():\n today = str(date.today())\n today_list = today.split('-')\n lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(\n datetime.datetime.now().month), int(datetime.datetime.now().day))\n if lunar_day.Lleap:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n else:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n return today, china_day\n\n\n<mask token>\n\n\ndef morning_news():\n news_api = (\n 'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'\n )\n response = requests.get(news_api)\n print(dict(response.json()))\n news_list = dict(response.json())\n news = ''\n m = 1\n news_q = ''\n for i in news_list['newslist']:\n img_url = ''\n if i['url'] == '':\n img_url = i['imgsrc']\n news = str(str(m) + ':' + i['title'] + '\\n' + i['url'] + img_url + '\\n'\n )\n news_q += str(news)\n m += 1\n return news_q\n\n\ndef news_put():\n news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1\n ] + '\\n' + morning_news()\n return news_spider_message\n\n\n<mask token>\n\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society, headers=headers)\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath(\n '//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n newss_1 = ''\n number = 1\n for t in title:\n newss = str(number) + ':' + str_list(t.text\n ) + '。' + '\\n' + ' 概要:' + str_list(news[title.index(t)].text\n ) + '。' + '\\n' + ' 详情:' + '\\n' + '\\n'\n newss_1 += newss\n number += 1\n return newss_1\n\n\ndef NewYork_news_put(page=0):\n news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[\n 1] + '\\n' + NewYork_news(page)\n return news_spider_message\n\n\ndef str_list(t):\n m = ''\n for i in list(t):\n if i == '中':\n china = 'Z'\n m += china + '_'\n else:\n m += i + '_'\n return m\n",
"step-3": "<mask token>\nymc = [u'十一', u'十二', u'正', u'二', u'三', u'四', u'五', u'六', u'七', u'八', u'九', u'十'\n ]\nrmc = [u'初一', u'初二', u'初三', u'初四', u'初五', u'初六', u'初七', u'初八', u'初九', u'初十',\n u'十一', u'十二', u'十三', u'十四', u'十五', u'十六', u'十七', u'十八', u'十九', u'二十',\n u'廿一', u'廿二', u'廿三', u'廿四', u'廿五', u'廿六', u'廿七', u'廿八', u'廿九', u'三十', u'卅一'\n ]\nlunar = sxtwl.Lunar()\n\n\ndef china_lunar():\n today = str(date.today())\n today_list = today.split('-')\n lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(\n datetime.datetime.now().month), int(datetime.datetime.now().day))\n if lunar_day.Lleap:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n else:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n return today, china_day\n\n\n<mask token>\n\n\ndef morning_news():\n news_api = (\n 'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'\n )\n response = requests.get(news_api)\n print(dict(response.json()))\n news_list = dict(response.json())\n news = ''\n m = 1\n news_q = ''\n for i in news_list['newslist']:\n img_url = ''\n if i['url'] == '':\n img_url = i['imgsrc']\n news = str(str(m) + ':' + i['title'] + '\\n' + i['url'] + img_url + '\\n'\n )\n news_q += str(news)\n m += 1\n return news_q\n\n\ndef news_put():\n news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1\n ] + '\\n' + morning_news()\n return news_spider_message\n\n\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'\n }\n\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society, headers=headers)\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath(\n '//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n newss_1 = ''\n number = 1\n for t in title:\n newss = str(number) + ':' + str_list(t.text\n ) + '。' + '\\n' + ' 概要:' + str_list(news[title.index(t)].text\n ) + '。' + '\\n' + ' 详情:' + '\\n' + '\\n'\n newss_1 += newss\n number += 1\n return newss_1\n\n\ndef NewYork_news_put(page=0):\n news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[\n 1] + '\\n' + NewYork_news(page)\n return news_spider_message\n\n\ndef str_list(t):\n m = ''\n for i in list(t):\n if i == '中':\n china = 'Z'\n m += china + '_'\n else:\n m += i + '_'\n return m\n",
"step-4": "import requests\nimport sxtwl\nimport datetime\nfrom datetime import date\nimport lxml\nfrom lxml import etree\nymc = [u'十一', u'十二', u'正', u'二', u'三', u'四', u'五', u'六', u'七', u'八', u'九', u'十'\n ]\nrmc = [u'初一', u'初二', u'初三', u'初四', u'初五', u'初六', u'初七', u'初八', u'初九', u'初十',\n u'十一', u'十二', u'十三', u'十四', u'十五', u'十六', u'十七', u'十八', u'十九', u'二十',\n u'廿一', u'廿二', u'廿三', u'廿四', u'廿五', u'廿六', u'廿七', u'廿八', u'廿九', u'三十', u'卅一'\n ]\nlunar = sxtwl.Lunar()\n\n\ndef china_lunar():\n today = str(date.today())\n today_list = today.split('-')\n lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(\n datetime.datetime.now().month), int(datetime.datetime.now().day))\n if lunar_day.Lleap:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n else:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n return today, china_day\n\n\nimport json\n\n\ndef morning_news():\n news_api = (\n 'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'\n )\n response = requests.get(news_api)\n print(dict(response.json()))\n news_list = dict(response.json())\n news = ''\n m = 1\n news_q = ''\n for i in news_list['newslist']:\n img_url = ''\n if i['url'] == '':\n img_url = i['imgsrc']\n news = str(str(m) + ':' + i['title'] + '\\n' + i['url'] + img_url + '\\n'\n )\n news_q += str(news)\n m += 1\n return news_q\n\n\ndef news_put():\n news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1\n ] + '\\n' + morning_news()\n return news_spider_message\n\n\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'\n }\n\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society, headers=headers)\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath(\n '//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n newss_1 = ''\n number = 1\n for t in title:\n newss = str(number) + ':' + str_list(t.text\n ) + '。' + '\\n' + ' 概要:' + str_list(news[title.index(t)].text\n ) + '。' + '\\n' + ' 详情:' + '\\n' + '\\n'\n newss_1 += newss\n number += 1\n return newss_1\n\n\ndef NewYork_news_put(page=0):\n news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[\n 1] + '\\n' + NewYork_news(page)\n return news_spider_message\n\n\ndef str_list(t):\n m = ''\n for i in list(t):\n if i == '中':\n china = 'Z'\n m += china + '_'\n else:\n m += i + '_'\n return m\n",
"step-5": "import requests\nimport sxtwl\nimport datetime\nfrom datetime import date\nimport lxml\nfrom lxml import etree\n# 日历中文索引\nymc = [u\"十一\", u\"十二\", u\"正\", u\"二\", u\"三\", u\"四\", u\"五\", u\"六\", u\"七\", u\"八\", u\"九\", u\"十\"]\nrmc = [u\"初一\", u\"初二\", u\"初三\", u\"初四\", u\"初五\", u\"初六\", u\"初七\", u\"初八\", u\"初九\", u\"初十\", \\\n u\"十一\", u\"十二\", u\"十三\", u\"十四\", u\"十五\", u\"十六\", u\"十七\", u\"十八\", u\"十九\", \\\n u\"二十\", u\"廿一\", u\"廿二\", u\"廿三\", u\"廿四\", u\"廿五\", u\"廿六\", u\"廿七\", u\"廿八\", u\"廿九\", u\"三十\", u\"卅一\"]\n\n# 日历库实例化\nlunar = sxtwl.Lunar()\n\n\n\n# 2.阳历转阴历\n\n\ndef china_lunar():\n today = str(date.today())\n\n today_list = today.split('-') # ['2019', '08', '08']\n lunar_day = lunar.getDayBySolar((int)(datetime.datetime.now().year), (int)(datetime.datetime.now().month), (int)(datetime.datetime.now().day)) # 输入年月日\n # 判断是否为润年\n if (lunar_day.Lleap):\n china_day = \"农历:{0}月{1}\".format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n else:\n china_day =\"农历:{0}月{1}\".format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n return today,china_day\n\n\nimport json\ndef morning_news():\n news_api = 'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'\n response = requests.get(news_api)\n print(dict(response.json()))\n news_list = dict(response.json())\n news = ''\n m = 1\n news_q=''\n for i in news_list['newslist']:\n img_url=''\n if i['url'] == '':\n img_url = i['imgsrc']\n news = str(str(m)+\":\"+i['title']+\"\\n\"+i['url']+img_url+\"\\n\")\n news_q += str(news)\n m += 1\n\n return news_q\n\ndef news_put():\n news_spider_message = '【早间新闻】 '+china_lunar()[0]+\" \"+china_lunar()[1]+\"\\n\"+morning_news()\n return news_spider_message\n\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'}\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society,headers =headers )\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n # print(mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div[2]/div/ul//h3/a')[1].text) #这个是标题\n # print(mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div[2]/div/ul//p')[1].text) # 这个是简介\n #\n # print(mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div[2]/div/ul//h3/a/@href')[1]) # 这个是链接\n newss_1 = ''\n number = 1\n for t in title:\n\n newss = str(number)+\":\"+str_list(t.text) +'。'+'\\n'+' 概要:'+str_list(news[title.index(t)].text)+'。'+'\\n'+' 详情:'+'\\n'+'\\n'\n newss_1 +=newss\n number += 1\n\n return newss_1\n\n\n\ndef NewYork_news_put(page=0):\n news_spider_message = '【纽约时报中文网】'+china_lunar()[0]+\" \"+china_lunar()[1]+\"\\n\"+NewYork_news(page)\n\n return news_spider_message\n\ndef str_list(t):\n m=''\n for i in list(t):\n if i == '中':\n china = 'Z'\n m += china +'_'\n else:\n\n m += i + '_'\n\n\n return m\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def gridsearchcv(X, y):
accuracy = []
stdlist = []
classifier = RandomForestClassifier(verbose=2, n_jobs=1, oob_score=1)
param_grid = {'n_estimators': np.arange(1, 100, 10)}
grid = GridSearchCV(classifier, param_grid=param_grid)
grid.fit(X, y)
fig = plt.figure(1, figsize=(16, 12))
plt.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
scores = grid.grid_scores_
for i in range(len(scores)):
accu = scores[i][1]
stdnum = np.std(scores[i][2])
accuracy.append(accu)
stdlist.append(stdnum)
ax1.plot(np.arange(1, 100, 10), accuracy, linewidth=2)
ax2.plot(np.arange(1, 100, 10), stdlist, linewidth=2)
plt.axis('tight')
ax1.set_xlabel('n_estimators')
ax1.set_ylabel('accuracy')
ax2.set_xlabel('n_estimators')
ax2.set_ylabel('std_accuracy')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def out(trainset):
trainset = trainset
X = trainset[:, :trainset.shape[1] - 1]
y = trainset[:, trainset.shape[1] - 1]
X = np.asarray(X)
y = np.asarray(y.T)[0]
return X, y
def gridsearchcv(X, y):
accuracy = []
stdlist = []
classifier = RandomForestClassifier(verbose=2, n_jobs=1, oob_score=1)
param_grid = {'n_estimators': np.arange(1, 100, 10)}
grid = GridSearchCV(classifier, param_grid=param_grid)
grid.fit(X, y)
fig = plt.figure(1, figsize=(16, 12))
plt.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
scores = grid.grid_scores_
for i in range(len(scores)):
accu = scores[i][1]
stdnum = np.std(scores[i][2])
accuracy.append(accu)
stdlist.append(stdnum)
ax1.plot(np.arange(1, 100, 10), accuracy, linewidth=2)
ax2.plot(np.arange(1, 100, 10), stdlist, linewidth=2)
plt.axis('tight')
ax1.set_xlabel('n_estimators')
ax1.set_ylabel('accuracy')
ax2.set_xlabel('n_estimators')
ax2.set_ylabel('std_accuracy')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loadTrainSet(filepath):
raw = np.loadtxt(filepath, delimiter=',', dtype=np.str, skiprows=1)
X, y = raw[:, 1:], raw[:, 0]
trainSet = np.hstack((X, y.reshape(-1, 1)))
return trainSet
def out(trainset):
trainset = trainset
X = trainset[:, :trainset.shape[1] - 1]
y = trainset[:, trainset.shape[1] - 1]
X = np.asarray(X)
y = np.asarray(y.T)[0]
return X, y
def gridsearchcv(X, y):
accuracy = []
stdlist = []
classifier = RandomForestClassifier(verbose=2, n_jobs=1, oob_score=1)
param_grid = {'n_estimators': np.arange(1, 100, 10)}
grid = GridSearchCV(classifier, param_grid=param_grid)
grid.fit(X, y)
fig = plt.figure(1, figsize=(16, 12))
plt.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
scores = grid.grid_scores_
for i in range(len(scores)):
accu = scores[i][1]
stdnum = np.std(scores[i][2])
accuracy.append(accu)
stdlist.append(stdnum)
ax1.plot(np.arange(1, 100, 10), accuracy, linewidth=2)
ax2.plot(np.arange(1, 100, 10), stdlist, linewidth=2)
plt.axis('tight')
ax1.set_xlabel('n_estimators')
ax1.set_ylabel('accuracy')
ax2.set_xlabel('n_estimators')
ax2.set_ylabel('std_accuracy')
<|reserved_special_token_1|>
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.grid_search import GridSearchCV
import matplotlib.pyplot as plt
def loadTrainSet(filepath):
raw = np.loadtxt(filepath, delimiter=',', dtype=np.str, skiprows=1)
X, y = raw[:, 1:], raw[:, 0]
trainSet = np.hstack((X, y.reshape(-1, 1)))
return trainSet
def out(trainset):
trainset = trainset
X = trainset[:, :trainset.shape[1] - 1]
y = trainset[:, trainset.shape[1] - 1]
X = np.asarray(X)
y = np.asarray(y.T)[0]
return X, y
def gridsearchcv(X, y):
accuracy = []
stdlist = []
classifier = RandomForestClassifier(verbose=2, n_jobs=1, oob_score=1)
param_grid = {'n_estimators': np.arange(1, 100, 10)}
grid = GridSearchCV(classifier, param_grid=param_grid)
grid.fit(X, y)
fig = plt.figure(1, figsize=(16, 12))
plt.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
scores = grid.grid_scores_
for i in range(len(scores)):
accu = scores[i][1]
stdnum = np.std(scores[i][2])
accuracy.append(accu)
stdlist.append(stdnum)
ax1.plot(np.arange(1, 100, 10), accuracy, linewidth=2)
ax2.plot(np.arange(1, 100, 10), stdlist, linewidth=2)
plt.axis('tight')
ax1.set_xlabel('n_estimators')
ax1.set_ylabel('accuracy')
ax2.set_xlabel('n_estimators')
ax2.set_ylabel('std_accuracy')
<|reserved_special_token_1|>
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.grid_search import GridSearchCV
import matplotlib.pyplot as plt
def loadTrainSet(filepath):
raw = np.loadtxt(filepath, delimiter=',', dtype=np.str, skiprows=1)
X, y = raw[:,1:], raw[:,0]
trainSet = np.hstack((X, y.reshape(-1,1)))
return trainSet
def out(trainset):
trainset = trainset
X=trainset[:,:(trainset.shape[1]-1)]
y=trainset[:,(trainset.shape[1]-1)]
X=np.asarray(X)
y=np.asarray(y.T)[0]
return X,y
def gridsearchcv(X,y):
accuracy=[]
stdlist=[]
classifier = RandomForestClassifier(verbose=2, n_jobs=1,oob_score=1)
param_grid={'n_estimators':np.arange(1, 100, 10)}
# param_grid={'n_estimators':np.arange(1, 202, 10)}
# param_grid={'n_estimators':[200], 'criterion':['gini', 'entropy']}
# param_grid={'n_estimators':[200], 'max_features':np.append(np.arange(28-20, 28, 1), np.arange(28, 28+20, 1))}
# param_grid={'n_estimators':[200], 'max_depth':np.arange(40, 40+20, 1)}
# param_grid={'n_estimators':[200], 'min_samples_split':np.arange(2, 2+10, 1)}
# param_grid={'n_estimators':[200], 'min_samples_leaf':np.arange(1, 1+10, 1)}
# param_grid={'n_estimators':[200], 'max_leaf_nodes':np.arange(3000, 3000+1000, 100)}
grid = GridSearchCV(classifier , param_grid=param_grid)
grid.fit(X,y)
fig=plt.figure(1, figsize=(16, 12))
plt.clf()
ax1=fig.add_subplot(1,2,1)
ax2=fig.add_subplot(1,2,2)
scores=grid.grid_scores_
for i in range(len(scores)):
accu=scores[i][1]
stdnum=np.std(scores[i][2])
accuracy.append(accu)
stdlist.append(stdnum)
ax1.plot(np.arange(1, 100, 10),accuracy, linewidth=2)
ax2.plot(np.arange(1, 100, 10),stdlist, linewidth=2)
plt.axis('tight')
ax1.set_xlabel('n_estimators')
ax1.set_ylabel('accuracy')
ax2.set_xlabel('n_estimators')
ax2.set_ylabel('std_accuracy')
|
flexible
|
{
"blob_id": "08f0b261b5a9b0f5133c468b3f92dc00285eda6a",
"index": 4477,
"step-1": "<mask token>\n\n\ndef gridsearchcv(X, y):\n accuracy = []\n stdlist = []\n classifier = RandomForestClassifier(verbose=2, n_jobs=1, oob_score=1)\n param_grid = {'n_estimators': np.arange(1, 100, 10)}\n grid = GridSearchCV(classifier, param_grid=param_grid)\n grid.fit(X, y)\n fig = plt.figure(1, figsize=(16, 12))\n plt.clf()\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n scores = grid.grid_scores_\n for i in range(len(scores)):\n accu = scores[i][1]\n stdnum = np.std(scores[i][2])\n accuracy.append(accu)\n stdlist.append(stdnum)\n ax1.plot(np.arange(1, 100, 10), accuracy, linewidth=2)\n ax2.plot(np.arange(1, 100, 10), stdlist, linewidth=2)\n plt.axis('tight')\n ax1.set_xlabel('n_estimators')\n ax1.set_ylabel('accuracy')\n ax2.set_xlabel('n_estimators')\n ax2.set_ylabel('std_accuracy')\n",
"step-2": "<mask token>\n\n\ndef out(trainset):\n trainset = trainset\n X = trainset[:, :trainset.shape[1] - 1]\n y = trainset[:, trainset.shape[1] - 1]\n X = np.asarray(X)\n y = np.asarray(y.T)[0]\n return X, y\n\n\ndef gridsearchcv(X, y):\n accuracy = []\n stdlist = []\n classifier = RandomForestClassifier(verbose=2, n_jobs=1, oob_score=1)\n param_grid = {'n_estimators': np.arange(1, 100, 10)}\n grid = GridSearchCV(classifier, param_grid=param_grid)\n grid.fit(X, y)\n fig = plt.figure(1, figsize=(16, 12))\n plt.clf()\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n scores = grid.grid_scores_\n for i in range(len(scores)):\n accu = scores[i][1]\n stdnum = np.std(scores[i][2])\n accuracy.append(accu)\n stdlist.append(stdnum)\n ax1.plot(np.arange(1, 100, 10), accuracy, linewidth=2)\n ax2.plot(np.arange(1, 100, 10), stdlist, linewidth=2)\n plt.axis('tight')\n ax1.set_xlabel('n_estimators')\n ax1.set_ylabel('accuracy')\n ax2.set_xlabel('n_estimators')\n ax2.set_ylabel('std_accuracy')\n",
"step-3": "<mask token>\n\n\ndef loadTrainSet(filepath):\n raw = np.loadtxt(filepath, delimiter=',', dtype=np.str, skiprows=1)\n X, y = raw[:, 1:], raw[:, 0]\n trainSet = np.hstack((X, y.reshape(-1, 1)))\n return trainSet\n\n\ndef out(trainset):\n trainset = trainset\n X = trainset[:, :trainset.shape[1] - 1]\n y = trainset[:, trainset.shape[1] - 1]\n X = np.asarray(X)\n y = np.asarray(y.T)[0]\n return X, y\n\n\ndef gridsearchcv(X, y):\n accuracy = []\n stdlist = []\n classifier = RandomForestClassifier(verbose=2, n_jobs=1, oob_score=1)\n param_grid = {'n_estimators': np.arange(1, 100, 10)}\n grid = GridSearchCV(classifier, param_grid=param_grid)\n grid.fit(X, y)\n fig = plt.figure(1, figsize=(16, 12))\n plt.clf()\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n scores = grid.grid_scores_\n for i in range(len(scores)):\n accu = scores[i][1]\n stdnum = np.std(scores[i][2])\n accuracy.append(accu)\n stdlist.append(stdnum)\n ax1.plot(np.arange(1, 100, 10), accuracy, linewidth=2)\n ax2.plot(np.arange(1, 100, 10), stdlist, linewidth=2)\n plt.axis('tight')\n ax1.set_xlabel('n_estimators')\n ax1.set_ylabel('accuracy')\n ax2.set_xlabel('n_estimators')\n ax2.set_ylabel('std_accuracy')\n",
"step-4": "import numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.grid_search import GridSearchCV\nimport matplotlib.pyplot as plt\n\n\ndef loadTrainSet(filepath):\n raw = np.loadtxt(filepath, delimiter=',', dtype=np.str, skiprows=1)\n X, y = raw[:, 1:], raw[:, 0]\n trainSet = np.hstack((X, y.reshape(-1, 1)))\n return trainSet\n\n\ndef out(trainset):\n trainset = trainset\n X = trainset[:, :trainset.shape[1] - 1]\n y = trainset[:, trainset.shape[1] - 1]\n X = np.asarray(X)\n y = np.asarray(y.T)[0]\n return X, y\n\n\ndef gridsearchcv(X, y):\n accuracy = []\n stdlist = []\n classifier = RandomForestClassifier(verbose=2, n_jobs=1, oob_score=1)\n param_grid = {'n_estimators': np.arange(1, 100, 10)}\n grid = GridSearchCV(classifier, param_grid=param_grid)\n grid.fit(X, y)\n fig = plt.figure(1, figsize=(16, 12))\n plt.clf()\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n scores = grid.grid_scores_\n for i in range(len(scores)):\n accu = scores[i][1]\n stdnum = np.std(scores[i][2])\n accuracy.append(accu)\n stdlist.append(stdnum)\n ax1.plot(np.arange(1, 100, 10), accuracy, linewidth=2)\n ax2.plot(np.arange(1, 100, 10), stdlist, linewidth=2)\n plt.axis('tight')\n ax1.set_xlabel('n_estimators')\n ax1.set_ylabel('accuracy')\n ax2.set_xlabel('n_estimators')\n ax2.set_ylabel('std_accuracy')\n",
"step-5": "import numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.grid_search import GridSearchCV\nimport matplotlib.pyplot as plt\n\ndef loadTrainSet(filepath):\n raw = np.loadtxt(filepath, delimiter=',', dtype=np.str, skiprows=1)\n X, y = raw[:,1:], raw[:,0]\n trainSet = np.hstack((X, y.reshape(-1,1)))\n return trainSet\n \n\ndef out(trainset):\n trainset = trainset\n X=trainset[:,:(trainset.shape[1]-1)]\n y=trainset[:,(trainset.shape[1]-1)]\n X=np.asarray(X)\n y=np.asarray(y.T)[0]\n return X,y\n\n\ndef gridsearchcv(X,y):\n accuracy=[]\n stdlist=[]\n classifier = RandomForestClassifier(verbose=2, n_jobs=1,oob_score=1)\n param_grid={'n_estimators':np.arange(1, 100, 10)}\n# param_grid={'n_estimators':np.arange(1, 202, 10)}\n# param_grid={'n_estimators':[200], 'criterion':['gini', 'entropy']}\n# param_grid={'n_estimators':[200], 'max_features':np.append(np.arange(28-20, 28, 1), np.arange(28, 28+20, 1))}\n# param_grid={'n_estimators':[200], 'max_depth':np.arange(40, 40+20, 1)}\n# param_grid={'n_estimators':[200], 'min_samples_split':np.arange(2, 2+10, 1)}\n# param_grid={'n_estimators':[200], 'min_samples_leaf':np.arange(1, 1+10, 1)}\n# param_grid={'n_estimators':[200], 'max_leaf_nodes':np.arange(3000, 3000+1000, 100)}\n\n grid = GridSearchCV(classifier , param_grid=param_grid)\n grid.fit(X,y)\n fig=plt.figure(1, figsize=(16, 12))\n plt.clf()\n ax1=fig.add_subplot(1,2,1)\n ax2=fig.add_subplot(1,2,2)\n scores=grid.grid_scores_\n for i in range(len(scores)):\n accu=scores[i][1]\n stdnum=np.std(scores[i][2])\n accuracy.append(accu)\n stdlist.append(stdnum) \n ax1.plot(np.arange(1, 100, 10),accuracy, linewidth=2)\n ax2.plot(np.arange(1, 100, 10),stdlist, linewidth=2)\n plt.axis('tight')\n ax1.set_xlabel('n_estimators')\n ax1.set_ylabel('accuracy')\n ax2.set_xlabel('n_estimators')\n ax2.set_ylabel('std_accuracy')\n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import numpy as np
import math
import activations
class FC_layer():
def __init__(self, input_size, output_size, weight_init_range, activation, debug):
self.type = "FC"
self.activation_name = activation
self.shape = (input_size, output_size)
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.input = None
self.output = None
self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size=(input_size, output_size))
self.bias = np.random.rand(1,output_size)
self.weights_grads = np.zeros(self.weights.shape)
self.bias_grads = np.zeros(self.bias.shape)
self.debug = debug
def forward(self, input_activations):
# Dot product of input with W plus bias. Cache, activate and return
output = np.dot(input_activations, self.weights) + self.bias
# Cache the weighted outputs and inputs
#self.output = output
self.input = input_activations
# Pass the output throug the activation function
output = self.activation(self, output)
self.output = output
return output
def backward(self, jacobian_L_Z):
# Get the jacobian linking the loss with respect of this layer output from the previous layer.
# PURPOSE: Calculate the weights gradients, the bias gradient and the input_loss
# that will be passed to the previous activation layer and so on, up to layer previous input
Y = self.input
# Create the jacobian J_Z_sum with the layer cached outputs and the derivative of activation function
jacobian_Z_sum = self.create_jacobian_Z_sum()
# Find the Weights gradients jacobian_L_W
# Compute the simple jacobian linking the outputs and the weights
simp_jacobian_Z_W = np.outer(Y, jacobian_Z_sum.diagonal())
# Then compute the jacobian linking the loss to the weights
jacobian_L_W = jacobian_L_Z * simp_jacobian_Z_W
# Calculate the input layer loss jacobian_L_Y
# by doing dot product of output layer loss and the weigths matrix transposed (so to invert M N to N M, where M < N, we go the other way around)
jacobian_Z_Y = np.dot(jacobian_Z_sum ,self.weights.T)
jacobian_L_Y = np.dot( jacobian_L_Z, jacobian_Z_Y)
# Bias loss is the as the output loss --> the bias influence on the loss == layer activation output influence on the loss
jacobian_L_B = jacobian_L_Z
# Now save the bias loss and weight loss (representing the calculated gradiants).
# This will be updated at the end of the batch, or SGD
self.weights_grads =self.weights_grads + jacobian_L_W
self.bias_grads = self.bias_grads + jacobian_L_B
#Finally return the calculated input loss --> this will be the output loss of the next layer
return jacobian_L_Y
def create_jacobian_Z_sum(self):
return np.identity(self.output[0].size) * self.d_activation(self, self.output)
def update_gradients(self, learning_rate, gradient_avg_factor = 1):
#Update gradients, usefull when doing batch learning
# Get the avg of the gradients (for SGD divide by 1, else divide by batchsize)
## UPDATE: removed the division by batchsize: Implemented this factor in the learning rate
#self.weights_grads = self.weights_grads / gradient_avg_factor
#self.bias_grads = self.bias_grads / gradient_avg_factor
# Update weights and biases
self.weights -= learning_rate * self.weights_grads
self.bias -= learning_rate * self.bias_grads
self.weights_grads = np.zeros(self.weights.shape)
self.bias_grads = np.zeros(self.bias.shape)
def __str__(self):
return "FC Layer type size = " + str(self.weights.shape) + " with activation = " + self.activation_name
class conv2D():
def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes, weight_init_range, activation, debug):
self.type = "conv2D"
self.input_shape = input_shape
self.activation_name = activation
#Kernel stack shape for the layer (N, I, K_x, K_y)
self.kernel_shape = (n_kernels, input_shape[0], kernel_shape[0], kernel_shape[1])
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.strides = strides
self.modes = modes
self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape)
self.weights_grads = np.zeros(self.weights.shape)
self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = self.calculate_padding()
self.output_shape = self.calculate_output_shape()
self.cached_calculation = {}
self.cache_weights_input_output_triplet_locations()
self.cached_output = None
self.debug = debug
'''print("###########################")
a = np.random.randint(1,4,(6,6))
print(a)
padded_a = self.apply_zero_padding(a)
print(padded_a)
print("kernel shape", (self.kernel_shape[2], self.kernel_shape[3]))
print("input shape", a.shape)
print("padded shape", padded_a.shape)
print("###########################")'''
def cache_weights_input_output_triplet_locations(self):
placeholder_input = np.zeros(self.input_shape)
array = placeholder_input[0]
kernel = self.weights[0][0]
stride_x_pointer = 0
while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):
stride_y_pointer = 0
#while the kernel does not go over the x-akse of the array
while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1):
#while the kernel does not go over the x-akse of the array
#cache all touched weights and input for each kernel (output or Coordinates??)
for row in range(kernel.shape[0]):
for column in range(kernel.shape[1]):
# Cache coordinate only: (weight, input) --> output
#format: key ((weight_x_pos, weight_y_pos), (input_x_pos, input_y_pos)) ---> (output_x_pos, output_y_pos)
conv_output_coordinate = (stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1])
self.cached_calculation[((row, column), (row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate
#Cache weight coordinate and input/output values
# Update the stride long the y-axis
stride_y_pointer += self.strides[1]
#update the stride long the x-axis
stride_x_pointer += self.strides[0]
#End of convolution
def forward(self, input_feature_maps):
#reset the cached calculations from the previous forward pass
#self.cached_calculation = {}
output = np.zeros(self.output_shape)
#Apply padding
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
#for each kernel stack
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
#for each kernel in the kernel stack (or input channel)
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print("**** NEW CONVOLUTION ****")
while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):
stride_y_pointer = 0
#while the kernel does not go over the x-akse of the array
while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1):
#while the kernel does not go over the x-akse of the array
#Get the snip of the array to apply convolution on
array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0], stride_y_pointer: stride_y_pointer + kernel.shape[1]]
#apply convolution and get the result
result = np.sum(np.multiply(array_snip, kernel))
#update the output tensor
conv_output_coordinate = (i, stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1])
output[conv_output_coordinate] += result
'''#cache all the results, touched weights and input for each kernel (output or Coordinates??)
for row in range(kernel.shape[0]):
for column in range(kernel.shape[1]):
# Cache coordinate only: (weight, input) --> output
#format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)
self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate
#Cache weight coordinate and input/output values
#ALTERNATIVE
# format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val
#self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result'''
if self.debug:
print("convolution nr ", conv_counter )
print("\narray_snip: \n", array_snip)
print("\nkernel: \n", kernel)
print("\nelementwise multiplication: \n", np.multiply(array_snip, kernel))
print("\nresult: ", result)
# Update the stride long the y-axis
stride_y_pointer += self.strides[1]
conv_counter+=1
#update the stride long the x-axis
stride_x_pointer += self.strides[0]
#End of convolution
if self.debug:
print("\n----REVIEW----\n")
print("Total convolutions: ", conv_counter)
print("\ninput_feature_map:\n ", array)
print("\napplied kernel:\n ", kernel)
print("\nconvolution result:\n ", output[i])
print("***********************************")
#Cache input and output
self.cached_output = output
self.cached_input = input_feature_maps
#Apply activation
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
#Reshape J_LZ from FC to Conv2D and pass through activation layer
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
#print("JLZ før relu\n", jacobian_L_Z)
#jacobian_L_Z = self.d_activation(self, jacobian_L_Z)
#print("cached out after activation\n", self.cached_output)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output)
#print("JLZ etter relu\n", jacobian_L_Z)
# J_L_Z * f'(cached_output)
#Calculate J_LW
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
#Calculate J_LX
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
#Pass Jacobian L Y upstream
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
#Iterate through all the weights (4 dimension)
#Iterate through the kernel stacks
for i in range(self.weights.shape[0]):
#Iterate throught each kernel/input channel
for j in range(self.weights.shape[1]):
#iterate through the x-axis of the kernel
for k in range(self.weights.shape[2]):
#iterate through the y-axis of the kernel
for l in range(self.weights.shape[3]):
#cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)}
for key in self.cached_calculation.keys():
if key[0] == (k,l):
grads[(i,j,k,l)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]]
return grads
def compute_J_LY(self, jacobian_L_Z):
jacobian_L_Y = np.zeros(self.input_shape)
#Iterate through all the inputs (3 dimension)
#iterate through all channels/kernel of a kernel stack
for i in range(self.input_shape[0]):
#iterate through x-akses of 2d input
for j in range(self.input_shape[1]):
#iterate through y-axes of 2d input
for k in range(self.input_shape[2]):
#cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)}
for key in self.cached_calculation.keys():
if key[1] == (j,k):
#for each kernel-stack
for l in range(self.weights.shape[0]):
jacobian_L_Y[(i,j,k)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]]
return jacobian_L_Y
def calculate_output_shape(self):
width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.strides[0] + 1)
height = math.floor((self.input_shape[2] - self.kernel_shape[3] + self.p_y_start + self.p_y_stop)/self.strides[1] + 1 )
return (self.kernel_shape[0], width, height)
def calculate_padding(self):
#Calculate padding long the x axis
s = self.strides[0]
f = self.kernel_shape[2]
i = self.input_shape[1]
if self.modes[0] == "full":
#Every pixel must experience every weight of the kernel
p_x_start = f - 1
p_x_stop = f - 1
elif self.modes[0] == "same":
#Every pixel must experience the middle weight of the kernel
p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)
p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)
else:
p_x_start = 0
p_x_stop = 0
#Calculate padding long y axis
s = self.strides[1]
f = self.kernel_shape[3]
i = self.input_shape[2]
if self.modes[1] == "full":
#Every pixel must experience every weight of the kernel
p_y_start = f - 1
p_y_stop = f - 1
elif self.modes[1] == "same":
#Every pixel must experience the middle weight of the kernel
p_y_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)
p_y_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)
else:
p_y_start = 0
p_y_stop = 0
return p_x_start, p_x_stop, p_y_start, p_y_stop
def apply_zero_padding(self, input_feature_maps):
# Apply zero padding to the input feature maps according to the modes, strides and kernel size
padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop, input_feature_maps.shape[2] + self.p_y_start + self.p_y_stop ))
for channel in range(input_feature_maps.shape[0]):
array = input_feature_maps[channel]
#Create the background zero array
padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop, array.shape[1] + self.p_y_start + self.p_y_stop))
#Copy the array in the middle of the zero background
padded_array[self.p_x_start:array.shape[0]+ self.p_x_start, self.p_y_start:array.shape[1]+ self.p_y_start] = array
#Save the array
padded_input_feature_maps[channel] = padded_array
return padded_input_feature_maps
def __str__(self):
return "Conv 2D Layer type with "+ str(self.kernel_shape[0]) +" kernels of shape = " + str(self.kernel_shape[1:]) +"input/output of shape" + str(self.input_shape)+"/" + str(self.output_shape) + " strides= s" + str(self.strides) + " modes= " + str(self.modes) +" with activation = " + self.activation_name
class conv1D():
def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode, weight_init_range, activation, debug):
self.type = "conv1D"
self.input_shape = input_shape
self.activation_name = activation
#Kernel stack shape for the layer (Num_kernel_stacks, Channels, Kernel_x)'
self.kernel_shape = (n_kernels, input_shape[0], kernel_shape)
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.stride = stride
self.mode = mode
self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape)
self.weights_grads = np.zeros(self.weights.shape)
self.p_x_start, self.p_x_stop = self.calculate_padding()
self.output_shape = self.calculate_output_shape()
self.cached_calculation = {}
self.cache_weights_input_output_triplet_locations()
self.cached_output = None
self.debug = debug
def cache_weights_input_output_triplet_locations(self):
#Performe an empty convolution and cache all the position of the kernel, input and output triplet
placeholder_input = np.zeros(self.input_shape)
array = placeholder_input[0]
kernel = self.weights[0][0]
stride_x_pointer = 0
while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):
#while the kernel does not go over the x-akse of the array
#cache all touched weights and input for each kernel
for column in range(kernel.shape[0]):
# Cache coordinate only: (weight, input) --> output
#format: key ((weight_x_pos), (input_x_pos)) ---> (output_x_pos)
conv_output_coordinate = (stride_x_pointer // self.stride)
self.cached_calculation[(column, column + stride_x_pointer)] = conv_output_coordinate
#Cache weight coordinate and input/output values
#update the stride long the x-axis
stride_x_pointer += self.stride
#End of convolution
def forward(self, input_feature_maps):
output = np.zeros(self.output_shape)
#Apply padding
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
#for each kernel stack
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
#for each kernel in the kernel stack (or input channel)
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print("**** NEW CONVOLUTION ****")
while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):
#while the kernel does not go over the x-akse of the array
#Get the snip of the array to apply convolution on
array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0]]
#apply convolution and get the result
result = np.sum(np.multiply(array_snip, kernel))
#update the output tensor
conv_output_coordinate = (i, stride_x_pointer // self.stride)
output[conv_output_coordinate] += result
if self.debug:
print("convolution nr ", conv_counter )
print("\narray_snip: \n", array_snip)
print("\nkernel: \n", kernel)
print("\nelementwise multiplication: \n", np.multiply(array_snip, kernel))
print("\nresult: ", result)
conv_counter+=1
#update the stride long the x-axis
stride_x_pointer += self.stride
#End of convolution
if self.debug:
print("\n----REVIEW----\n")
print("Total convolutions: ", conv_counter)
print("\ninput_feature_map:\n ", array)
print("\napplied kernel:\n ", kernel)
print("\nconvolution result:\n ", output[i])
print("***********************************")
#Cache input and output
self.cached_output = output
self.cached_input = input_feature_maps
#Apply activation
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
#Reshape J_LZ from FC to Conv2D and pass through activation layer
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
#print("JLZ før relu\n", jacobian_L_Z)
#jacobian_L_Z = self.d_activation(self, jacobian_L_Z)
#print("cached out after activation\n", self.cached_output)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output)
#print("JLZ etter relu\n", jacobian_L_Z)
# J_L_Z * f'(cached_output)
#Calculate J_LW
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
#Calculate J_LX
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
#Pass Jacobian L Y upstream
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
#Iterate through all the weights (3 dimension)
for i in range(self.weights.shape[0]):
for j in range(self.weights.shape[1]):
for k in range(self.weights.shape[2]):
for key in self.cached_calculation.keys():
if key[0] == k:
grads[(i,j,k)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]]
return grads
def compute_J_LY(self, jacobian_L_Z):
jacobian_L_Y = np.zeros(self.input_shape)
#Iterate through all the inputs (3 dimension)
#iterate through all channels/kernel of a kernel stack
for i in range(self.input_shape[0]):
#iterate through x-akses of 1d input
for j in range(self.input_shape[1]):
for key in self.cached_calculation.keys():
if key[1] == j:
#for each kernel-stack
for l in range(self.weights.shape[0]):
jacobian_L_Y[(i,j)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]]
return jacobian_L_Y
def calculate_output_shape(self):
width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.stride + 1)
return (self.kernel_shape[0], width)
def calculate_padding(self):
#Calculate padding long the x axis
s = self.stride
f = self.kernel_shape[2]
i = self.input_shape[1]
if self.mode == "full":
#Every pixel must experience every weight of the kernel
p_x_start = f - 1
p_x_stop = f - 1
elif self.mode == "same":
#Every pixel must experience the middle weight of the kernel
p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)
p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)
else:
p_x_start = 0
p_x_stop = 0
return p_x_start, p_x_stop
def apply_zero_padding(self, input_feature_maps):
# Apply zero padding to the input feature maps according to the modes, strides and kernel size
#if self.p_x_start == 0 and self.p_x_stop == 0:
# return input_feature_maps
padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))
for channel in range(input_feature_maps.shape[0]):
array = input_feature_maps[channel]
#Create the background zero array
padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop))
#Copy the array in the middle of the zero background
padded_array[self.p_x_start:array.shape[0]+ self.p_x_start] = array
#Save the array
padded_input_feature_maps[channel] = padded_array
return padded_input_feature_maps
def __str__(self):
return "Conv 1D Layer type with "+ str(self.kernel_shape[0]) +" kernels of shape = " + str(self.kernel_shape[1:]) +"input/output of shape" + str(self.input_shape)+"/" + str(self.output_shape) + " stride= " + str(self.stride) + " mode= " + str(self.mode) +" with activation = " + self.activation_name
class softmax():
def __init__(self, size):
self.size = size
self.shape = (1, size)
self.type = "softmax"
self.activation_function = activations.softmax
def forward(self, input_data):
return self.activation_function(self, input_data)
def backward(self, jacobian_L_S, softmaxed_network_output):
# Create jacobian of derivate of softmax
jacobian_soft = self.compute_j_soft(softmaxed_network_output)
# Compute jacobian linking Loss to output
jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)
return jacobian_L_Z
def compute_j_soft(self, S):
S = np.squeeze(S)
n = len(S)
j_soft = np.zeros((n,n))
for i in range(n):
for j in range(n):
if i == j:
j_soft[i][j] = S[i] - S[i]**2
else:
j_soft[i][j] = -S[i]*S[j]
return j_soft
def __str__(self):
return "Softmax Layer of size = " + str(self.size)
|
normal
|
{
"blob_id": "ff99b5fd168d7987e488d7f6d0455619e988f15a",
"index": 3574,
"step-1": "<mask token>\n\n\nclass conv2D:\n <mask token>\n <mask token>\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1\n ] - 1 <= array.shape[1] - 1:\n array_snip = array[stride_x_pointer:\n stride_x_pointer + kernel.shape[0],\n stride_y_pointer:stride_y_pointer + kernel.shape[1]\n ]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = (i, stride_x_pointer //\n self.strides[0], stride_y_pointer // self.\n strides[1])\n output[conv_output_coordinate] += result\n \"\"\"#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result\"\"\"\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n stride_y_pointer += self.strides[1]\n conv_counter += 1\n stride_x_pointer += self.strides[0]\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for l in range(self.weights.shape[3]):\n for key in self.cached_calculation.keys():\n if key[0] == (k, l):\n grads[i, j, k, l] += self.cached_input[j][key\n [1]] * jacobian_L_Z[i][self.\n cached_calculation[key]]\n return grads\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' strides= s' + str(self.strides\n ) + ' modes= ' + str(self.modes\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv1D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,\n weight_init_range, activation, debug):\n self.type = 'conv1D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n for column in range(kernel.shape[0]):\n conv_output_coordinate = stride_x_pointer // self.stride\n self.cached_calculation[column, column + stride_x_pointer\n ] = conv_output_coordinate\n stride_x_pointer += self.stride\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n array_snip = array[stride_x_pointer:stride_x_pointer +\n kernel.shape[0]]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = i, stride_x_pointer // self.stride\n output[conv_output_coordinate] += result\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n conv_counter += 1\n stride_x_pointer += self.stride\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[i, j, k] += self.cached_input[j][key[1]\n ] * jacobian_L_Z[i][self.cached_calculation\n [key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j] += self.weights[l][i][key[0]\n ] * jacobian_L_Z[l][self.cached_calculation\n [key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.stride + 1)\n return self.kernel_shape[0], width\n\n def calculate_padding(self):\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros(array.shape[0] + self.p_x_start + self.\n p_x_stop)\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start\n ] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' stride= ' + str(self.stride\n ) + ' mode= ' + str(self.mode\n ) + ' with activation = ' + self.activation_name\n\n\nclass softmax:\n\n def __init__(self, size):\n self.size = size\n self.shape = 1, size\n self.type = 'softmax'\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n jacobian_soft = self.compute_j_soft(softmaxed_network_output)\n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i] ** 2\n else:\n j_soft[i][j] = -S[i] * S[j]\n return j_soft\n\n def __str__(self):\n return 'Softmax Layer of size = ' + str(self.size)\n",
"step-2": "<mask token>\n\n\nclass conv2D:\n <mask token>\n <mask token>\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1\n ] - 1 <= array.shape[1] - 1:\n array_snip = array[stride_x_pointer:\n stride_x_pointer + kernel.shape[0],\n stride_y_pointer:stride_y_pointer + kernel.shape[1]\n ]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = (i, stride_x_pointer //\n self.strides[0], stride_y_pointer // self.\n strides[1])\n output[conv_output_coordinate] += result\n \"\"\"#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result\"\"\"\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n stride_y_pointer += self.strides[1]\n conv_counter += 1\n stride_x_pointer += self.strides[0]\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for l in range(self.weights.shape[3]):\n for key in self.cached_calculation.keys():\n if key[0] == (k, l):\n grads[i, j, k, l] += self.cached_input[j][key\n [1]] * jacobian_L_Z[i][self.\n cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for k in range(self.input_shape[2]):\n for key in self.cached_calculation.keys():\n if key[1] == (j, k):\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j, k] += self.weights[l][i][key\n [0]] * jacobian_L_Z[l][self.\n cached_calculation[key]]\n return jacobian_L_Y\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' strides= s' + str(self.strides\n ) + ' modes= ' + str(self.modes\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv1D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,\n weight_init_range, activation, debug):\n self.type = 'conv1D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n for column in range(kernel.shape[0]):\n conv_output_coordinate = stride_x_pointer // self.stride\n self.cached_calculation[column, column + stride_x_pointer\n ] = conv_output_coordinate\n stride_x_pointer += self.stride\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n array_snip = array[stride_x_pointer:stride_x_pointer +\n kernel.shape[0]]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = i, stride_x_pointer // self.stride\n output[conv_output_coordinate] += result\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n conv_counter += 1\n stride_x_pointer += self.stride\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[i, j, k] += self.cached_input[j][key[1]\n ] * jacobian_L_Z[i][self.cached_calculation\n [key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j] += self.weights[l][i][key[0]\n ] * jacobian_L_Z[l][self.cached_calculation\n [key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.stride + 1)\n return self.kernel_shape[0], width\n\n def calculate_padding(self):\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros(array.shape[0] + self.p_x_start + self.\n p_x_stop)\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start\n ] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' stride= ' + str(self.stride\n ) + ' mode= ' + str(self.mode\n ) + ' with activation = ' + self.activation_name\n\n\nclass softmax:\n\n def __init__(self, size):\n self.size = size\n self.shape = 1, size\n self.type = 'softmax'\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n jacobian_soft = self.compute_j_soft(softmaxed_network_output)\n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i] ** 2\n else:\n j_soft[i][j] = -S[i] * S[j]\n return j_soft\n\n def __str__(self):\n return 'Softmax Layer of size = ' + str(self.size)\n",
"step-3": "<mask token>\n\n\nclass conv2D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes,\n weight_init_range, activation, debug):\n self.type = 'conv2D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape[0\n ], kernel_shape[1]\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.strides = strides\n self.modes = modes\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = (self\n .calculate_padding())\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n \"\"\"print(\"###########################\")\n a = np.random.randint(1,4,(6,6))\n print(a)\n padded_a = self.apply_zero_padding(a)\n print(padded_a)\n print(\"kernel shape\", (self.kernel_shape[2], self.kernel_shape[3]))\n print(\"input shape\", a.shape)\n print(\"padded shape\", padded_a.shape)\n print(\"###########################\")\"\"\"\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1] - 1 <= array.shape[1] - 1:\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n conv_output_coordinate = (stride_x_pointer // self.\n strides[0], stride_y_pointer // self.strides[1])\n self.cached_calculation[(row, column), (row +\n stride_x_pointer, column + stride_y_pointer)\n ] = conv_output_coordinate\n stride_y_pointer += self.strides[1]\n stride_x_pointer += self.strides[0]\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1\n ] - 1 <= array.shape[1] - 1:\n array_snip = array[stride_x_pointer:\n stride_x_pointer + kernel.shape[0],\n stride_y_pointer:stride_y_pointer + kernel.shape[1]\n ]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = (i, stride_x_pointer //\n self.strides[0], stride_y_pointer // self.\n strides[1])\n output[conv_output_coordinate] += result\n \"\"\"#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result\"\"\"\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n stride_y_pointer += self.strides[1]\n conv_counter += 1\n stride_x_pointer += self.strides[0]\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for l in range(self.weights.shape[3]):\n for key in self.cached_calculation.keys():\n if key[0] == (k, l):\n grads[i, j, k, l] += self.cached_input[j][key\n [1]] * jacobian_L_Z[i][self.\n cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for k in range(self.input_shape[2]):\n for key in self.cached_calculation.keys():\n if key[1] == (j, k):\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j, k] += self.weights[l][i][key\n [0]] * jacobian_L_Z[l][self.\n cached_calculation[key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.strides[0] + 1)\n height = math.floor((self.input_shape[2] - self.kernel_shape[3] +\n self.p_y_start + self.p_y_stop) / self.strides[1] + 1)\n return self.kernel_shape[0], width, height\n <mask token>\n <mask token>\n\n def __str__(self):\n return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' strides= s' + str(self.strides\n ) + ' modes= ' + str(self.modes\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv1D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,\n weight_init_range, activation, debug):\n self.type = 'conv1D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n for column in range(kernel.shape[0]):\n conv_output_coordinate = stride_x_pointer // self.stride\n self.cached_calculation[column, column + stride_x_pointer\n ] = conv_output_coordinate\n stride_x_pointer += self.stride\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n array_snip = array[stride_x_pointer:stride_x_pointer +\n kernel.shape[0]]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = i, stride_x_pointer // self.stride\n output[conv_output_coordinate] += result\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n conv_counter += 1\n stride_x_pointer += self.stride\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[i, j, k] += self.cached_input[j][key[1]\n ] * jacobian_L_Z[i][self.cached_calculation\n [key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j] += self.weights[l][i][key[0]\n ] * jacobian_L_Z[l][self.cached_calculation\n [key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.stride + 1)\n return self.kernel_shape[0], width\n\n def calculate_padding(self):\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros(array.shape[0] + self.p_x_start + self.\n p_x_stop)\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start\n ] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' stride= ' + str(self.stride\n ) + ' mode= ' + str(self.mode\n ) + ' with activation = ' + self.activation_name\n\n\nclass softmax:\n\n def __init__(self, size):\n self.size = size\n self.shape = 1, size\n self.type = 'softmax'\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n jacobian_soft = self.compute_j_soft(softmaxed_network_output)\n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i] ** 2\n else:\n j_soft[i][j] = -S[i] * S[j]\n return j_soft\n\n def __str__(self):\n return 'Softmax Layer of size = ' + str(self.size)\n",
"step-4": "<mask token>\n\n\nclass FC_layer:\n <mask token>\n <mask token>\n\n def backward(self, jacobian_L_Z):\n Y = self.input\n jacobian_Z_sum = self.create_jacobian_Z_sum()\n simp_jacobian_Z_W = np.outer(Y, jacobian_Z_sum.diagonal())\n jacobian_L_W = jacobian_L_Z * simp_jacobian_Z_W\n jacobian_Z_Y = np.dot(jacobian_Z_sum, self.weights.T)\n jacobian_L_Y = np.dot(jacobian_L_Z, jacobian_Z_Y)\n jacobian_L_B = jacobian_L_Z\n self.weights_grads = self.weights_grads + jacobian_L_W\n self.bias_grads = self.bias_grads + jacobian_L_B\n return jacobian_L_Y\n <mask token>\n <mask token>\n\n def __str__(self):\n return 'FC Layer type size = ' + str(self.weights.shape\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv2D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes,\n weight_init_range, activation, debug):\n self.type = 'conv2D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape[0\n ], kernel_shape[1]\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.strides = strides\n self.modes = modes\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = (self\n .calculate_padding())\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n \"\"\"print(\"###########################\")\n a = np.random.randint(1,4,(6,6))\n print(a)\n padded_a = self.apply_zero_padding(a)\n print(padded_a)\n print(\"kernel shape\", (self.kernel_shape[2], self.kernel_shape[3]))\n print(\"input shape\", a.shape)\n print(\"padded shape\", padded_a.shape)\n print(\"###########################\")\"\"\"\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1] - 1 <= array.shape[1] - 1:\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n conv_output_coordinate = (stride_x_pointer // self.\n strides[0], stride_y_pointer // self.strides[1])\n self.cached_calculation[(row, column), (row +\n stride_x_pointer, column + stride_y_pointer)\n ] = conv_output_coordinate\n stride_y_pointer += self.strides[1]\n stride_x_pointer += self.strides[0]\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1\n ] - 1 <= array.shape[1] - 1:\n array_snip = array[stride_x_pointer:\n stride_x_pointer + kernel.shape[0],\n stride_y_pointer:stride_y_pointer + kernel.shape[1]\n ]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = (i, stride_x_pointer //\n self.strides[0], stride_y_pointer // self.\n strides[1])\n output[conv_output_coordinate] += result\n \"\"\"#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result\"\"\"\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n stride_y_pointer += self.strides[1]\n conv_counter += 1\n stride_x_pointer += self.strides[0]\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for l in range(self.weights.shape[3]):\n for key in self.cached_calculation.keys():\n if key[0] == (k, l):\n grads[i, j, k, l] += self.cached_input[j][key\n [1]] * jacobian_L_Z[i][self.\n cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for k in range(self.input_shape[2]):\n for key in self.cached_calculation.keys():\n if key[1] == (j, k):\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j, k] += self.weights[l][i][key\n [0]] * jacobian_L_Z[l][self.\n cached_calculation[key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.strides[0] + 1)\n height = math.floor((self.input_shape[2] - self.kernel_shape[3] +\n self.p_y_start + self.p_y_stop) / self.strides[1] + 1)\n return self.kernel_shape[0], width, height\n\n def calculate_padding(self):\n s = self.strides[0]\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.modes[0] == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.modes[0] == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n s = self.strides[1]\n f = self.kernel_shape[3]\n i = self.input_shape[2]\n if self.modes[1] == 'full':\n p_y_start = f - 1\n p_y_stop = f - 1\n elif self.modes[1] == 'same':\n p_y_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_y_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_y_start = 0\n p_y_stop = 0\n return p_x_start, p_x_stop, p_y_start, p_y_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop, \n input_feature_maps.shape[2] + self.p_y_start + self.p_y_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros((array.shape[0] + self.p_x_start + self\n .p_x_stop, array.shape[1] + self.p_y_start + self.p_y_stop))\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start,\n self.p_y_start:array.shape[1] + self.p_y_start] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' strides= s' + str(self.strides\n ) + ' modes= ' + str(self.modes\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv1D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,\n weight_init_range, activation, debug):\n self.type = 'conv1D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n for column in range(kernel.shape[0]):\n conv_output_coordinate = stride_x_pointer // self.stride\n self.cached_calculation[column, column + stride_x_pointer\n ] = conv_output_coordinate\n stride_x_pointer += self.stride\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n array_snip = array[stride_x_pointer:stride_x_pointer +\n kernel.shape[0]]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = i, stride_x_pointer // self.stride\n output[conv_output_coordinate] += result\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n conv_counter += 1\n stride_x_pointer += self.stride\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[i, j, k] += self.cached_input[j][key[1]\n ] * jacobian_L_Z[i][self.cached_calculation\n [key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j] += self.weights[l][i][key[0]\n ] * jacobian_L_Z[l][self.cached_calculation\n [key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.stride + 1)\n return self.kernel_shape[0], width\n\n def calculate_padding(self):\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros(array.shape[0] + self.p_x_start + self.\n p_x_stop)\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start\n ] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' stride= ' + str(self.stride\n ) + ' mode= ' + str(self.mode\n ) + ' with activation = ' + self.activation_name\n\n\nclass softmax:\n\n def __init__(self, size):\n self.size = size\n self.shape = 1, size\n self.type = 'softmax'\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n jacobian_soft = self.compute_j_soft(softmaxed_network_output)\n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i] ** 2\n else:\n j_soft[i][j] = -S[i] * S[j]\n return j_soft\n\n def __str__(self):\n return 'Softmax Layer of size = ' + str(self.size)\n",
"step-5": "import numpy as np\nimport math\nimport activations\n\nclass FC_layer():\n def __init__(self, input_size, output_size, weight_init_range, activation, debug):\n self.type = \"FC\"\n self.activation_name = activation\n self.shape = (input_size, output_size)\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.input = None\n self.output = None\n self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size=(input_size, output_size))\n self.bias = np.random.rand(1,output_size)\n self.weights_grads = np.zeros(self.weights.shape)\n self.bias_grads = np.zeros(self.bias.shape)\n self.debug = debug\n\n def forward(self, input_activations):\n # Dot product of input with W plus bias. Cache, activate and return\n output = np.dot(input_activations, self.weights) + self.bias\n # Cache the weighted outputs and inputs\n #self.output = output\n self.input = input_activations\n # Pass the output throug the activation function\n output = self.activation(self, output)\n self.output = output\n return output\n \n def backward(self, jacobian_L_Z):\n # Get the jacobian linking the loss with respect of this layer output from the previous layer.\n # PURPOSE: Calculate the weights gradients, the bias gradient and the input_loss\n # that will be passed to the previous activation layer and so on, up to layer previous input\n Y = self.input\n # Create the jacobian J_Z_sum with the layer cached outputs and the derivative of activation function\n jacobian_Z_sum = self.create_jacobian_Z_sum()\n\n # Find the Weights gradients jacobian_L_W\n # Compute the simple jacobian linking the outputs and the weights\n simp_jacobian_Z_W = np.outer(Y, jacobian_Z_sum.diagonal())\n # Then compute the jacobian linking the loss to the weights\n jacobian_L_W = jacobian_L_Z * simp_jacobian_Z_W\n\n # Calculate the input layer loss jacobian_L_Y\n # by doing dot product of output layer loss and the weigths matrix transposed (so to invert M N to N M, where M < N, we go the other way around)\n jacobian_Z_Y = np.dot(jacobian_Z_sum ,self.weights.T)\n jacobian_L_Y = np.dot( jacobian_L_Z, jacobian_Z_Y)\n \n\n # Bias loss is the as the output loss --> the bias influence on the loss == layer activation output influence on the loss\n jacobian_L_B = jacobian_L_Z\n\n # Now save the bias loss and weight loss (representing the calculated gradiants).\n # This will be updated at the end of the batch, or SGD\n self.weights_grads =self.weights_grads + jacobian_L_W\n self.bias_grads = self.bias_grads + jacobian_L_B\n \n #Finally return the calculated input loss --> this will be the output loss of the next layer\n return jacobian_L_Y\n\n def create_jacobian_Z_sum(self):\n return np.identity(self.output[0].size) * self.d_activation(self, self.output)\n\n def update_gradients(self, learning_rate, gradient_avg_factor = 1):\n #Update gradients, usefull when doing batch learning\n # Get the avg of the gradients (for SGD divide by 1, else divide by batchsize)\n ## UPDATE: removed the division by batchsize: Implemented this factor in the learning rate\n #self.weights_grads = self.weights_grads / gradient_avg_factor\n #self.bias_grads = self.bias_grads / gradient_avg_factor\n\n # Update weights and biases\n self.weights -= learning_rate * self.weights_grads\n self.bias -= learning_rate * self.bias_grads\n self.weights_grads = np.zeros(self.weights.shape)\n self.bias_grads = np.zeros(self.bias.shape)\n\n\n def __str__(self):\n return \"FC Layer type size = \" + str(self.weights.shape) + \" with activation = \" + self.activation_name\n\nclass conv2D():\n def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes, weight_init_range, activation, debug):\n self.type = \"conv2D\"\n self.input_shape = input_shape\n self.activation_name = activation\n #Kernel stack shape for the layer (N, I, K_x, K_y)\n self.kernel_shape = (n_kernels, input_shape[0], kernel_shape[0], kernel_shape[1])\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.strides = strides\n self.modes = modes\n self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n \n \n '''print(\"###########################\")\n a = np.random.randint(1,4,(6,6))\n print(a)\n padded_a = self.apply_zero_padding(a)\n print(padded_a)\n print(\"kernel shape\", (self.kernel_shape[2], self.kernel_shape[3]))\n print(\"input shape\", a.shape)\n print(\"padded shape\", padded_a.shape)\n print(\"###########################\")'''\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):\n stride_y_pointer = 0\n #while the kernel does not go over the x-akse of the array\n while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1):\n #while the kernel does not go over the x-akse of the array\n #cache all touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((weight_x_pos, weight_y_pos), (input_x_pos, input_y_pos)) ---> (output_x_pos, output_y_pos)\n conv_output_coordinate = (stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1])\n self.cached_calculation[((row, column), (row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n # Update the stride long the y-axis\n stride_y_pointer += self.strides[1]\n #update the stride long the x-axis\n stride_x_pointer += self.strides[0]\n #End of convolution\n \n\n def forward(self, input_feature_maps):\n #reset the cached calculations from the previous forward pass\n #self.cached_calculation = {}\n output = np.zeros(self.output_shape)\n #Apply padding\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n #for each kernel stack\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n #for each kernel in the kernel stack (or input channel)\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print(\"**** NEW CONVOLUTION ****\")\n while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):\n stride_y_pointer = 0\n #while the kernel does not go over the x-akse of the array\n while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1):\n #while the kernel does not go over the x-akse of the array\n #Get the snip of the array to apply convolution on\n array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0], stride_y_pointer: stride_y_pointer + kernel.shape[1]]\n #apply convolution and get the result \n result = np.sum(np.multiply(array_snip, kernel)) \n #update the output tensor\n conv_output_coordinate = (i, stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1])\n output[conv_output_coordinate] += result\n '''#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result'''\n if self.debug:\n print(\"convolution nr \", conv_counter )\n print(\"\\narray_snip: \\n\", array_snip)\n print(\"\\nkernel: \\n\", kernel)\n print(\"\\nelementwise multiplication: \\n\", np.multiply(array_snip, kernel))\n print(\"\\nresult: \", result)\n # Update the stride long the y-axis\n stride_y_pointer += self.strides[1]\n conv_counter+=1\n #update the stride long the x-axis\n stride_x_pointer += self.strides[0]\n #End of convolution\n if self.debug:\n print(\"\\n----REVIEW----\\n\")\n print(\"Total convolutions: \", conv_counter)\n print(\"\\ninput_feature_map:\\n \", array)\n print(\"\\napplied kernel:\\n \", kernel)\n print(\"\\nconvolution result:\\n \", output[i])\n print(\"***********************************\")\n #Cache input and output\n self.cached_output = output\n self.cached_input = input_feature_maps\n #Apply activation\n output = self.activation(self, output)\n return output\n \n \n def backward(self, jacobian_L_Z):\n #Reshape J_LZ from FC to Conv2D and pass through activation layer\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n #print(\"JLZ før relu\\n\", jacobian_L_Z)\n #jacobian_L_Z = self.d_activation(self, jacobian_L_Z)\n #print(\"cached out after activation\\n\", self.cached_output)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output)\n #print(\"JLZ etter relu\\n\", jacobian_L_Z)\n # J_L_Z * f'(cached_output)\n\n #Calculate J_LW\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n\n #Calculate J_LX\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n\n #Pass Jacobian L Y upstream\n return jacobian_L_Y\n \n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n #Iterate through all the weights (4 dimension)\n #Iterate through the kernel stacks\n for i in range(self.weights.shape[0]):\n #Iterate throught each kernel/input channel\n for j in range(self.weights.shape[1]):\n #iterate through the x-axis of the kernel\n for k in range(self.weights.shape[2]):\n #iterate through the y-axis of the kernel\n for l in range(self.weights.shape[3]):\n #cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)}\n for key in self.cached_calculation.keys():\n if key[0] == (k,l):\n grads[(i,j,k,l)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n #Iterate through all the inputs (3 dimension)\n #iterate through all channels/kernel of a kernel stack\n for i in range(self.input_shape[0]):\n #iterate through x-akses of 2d input\n for j in range(self.input_shape[1]):\n #iterate through y-axes of 2d input\n for k in range(self.input_shape[2]):\n #cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)}\n for key in self.cached_calculation.keys():\n if key[1] == (j,k):\n #for each kernel-stack\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[(i,j,k)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]]\n return jacobian_L_Y\n \n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.strides[0] + 1)\n height = math.floor((self.input_shape[2] - self.kernel_shape[3] + self.p_y_start + self.p_y_stop)/self.strides[1] + 1 )\n return (self.kernel_shape[0], width, height)\n\n def calculate_padding(self):\n #Calculate padding long the x axis\n s = self.strides[0]\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.modes[0] == \"full\":\n #Every pixel must experience every weight of the kernel\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.modes[0] == \"same\":\n #Every pixel must experience the middle weight of the kernel\n p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)\n p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)\n else:\n p_x_start = 0\n p_x_stop = 0\n\n\n #Calculate padding long y axis\n s = self.strides[1]\n f = self.kernel_shape[3]\n i = self.input_shape[2]\n if self.modes[1] == \"full\":\n #Every pixel must experience every weight of the kernel\n p_y_start = f - 1\n p_y_stop = f - 1\n elif self.modes[1] == \"same\":\n #Every pixel must experience the middle weight of the kernel\n p_y_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)\n p_y_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)\n else:\n p_y_start = 0\n p_y_stop = 0\n\n\n return p_x_start, p_x_stop, p_y_start, p_y_stop\n \n def apply_zero_padding(self, input_feature_maps):\n # Apply zero padding to the input feature maps according to the modes, strides and kernel size\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop, input_feature_maps.shape[2] + self.p_y_start + self.p_y_stop ))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n #Create the background zero array\n padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop, array.shape[1] + self.p_y_start + self.p_y_stop))\n #Copy the array in the middle of the zero background\n padded_array[self.p_x_start:array.shape[0]+ self.p_x_start, self.p_y_start:array.shape[1]+ self.p_y_start] = array \n #Save the array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return \"Conv 2D Layer type with \"+ str(self.kernel_shape[0]) +\" kernels of shape = \" + str(self.kernel_shape[1:]) +\"input/output of shape\" + str(self.input_shape)+\"/\" + str(self.output_shape) + \" strides= s\" + str(self.strides) + \" modes= \" + str(self.modes) +\" with activation = \" + self.activation_name\n\nclass conv1D():\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode, weight_init_range, activation, debug):\n self.type = \"conv1D\"\n self.input_shape = input_shape\n self.activation_name = activation\n #Kernel stack shape for the layer (Num_kernel_stacks, Channels, Kernel_x)'\n self.kernel_shape = (n_kernels, input_shape[0], kernel_shape)\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n #Performe an empty convolution and cache all the position of the kernel, input and output triplet\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):\n #while the kernel does not go over the x-akse of the array\n #cache all touched weights and input for each kernel\n for column in range(kernel.shape[0]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((weight_x_pos), (input_x_pos)) ---> (output_x_pos)\n conv_output_coordinate = (stride_x_pointer // self.stride)\n self.cached_calculation[(column, column + stride_x_pointer)] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #update the stride long the x-axis\n stride_x_pointer += self.stride\n #End of convolution\n \n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n #Apply padding\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n #for each kernel stack\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n #for each kernel in the kernel stack (or input channel)\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print(\"**** NEW CONVOLUTION ****\")\n while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):\n #while the kernel does not go over the x-akse of the array\n #Get the snip of the array to apply convolution on\n array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0]]\n #apply convolution and get the result \n result = np.sum(np.multiply(array_snip, kernel)) \n #update the output tensor\n conv_output_coordinate = (i, stride_x_pointer // self.stride)\n output[conv_output_coordinate] += result\n if self.debug:\n print(\"convolution nr \", conv_counter )\n print(\"\\narray_snip: \\n\", array_snip)\n print(\"\\nkernel: \\n\", kernel)\n print(\"\\nelementwise multiplication: \\n\", np.multiply(array_snip, kernel))\n print(\"\\nresult: \", result)\n conv_counter+=1\n #update the stride long the x-axis\n stride_x_pointer += self.stride\n #End of convolution\n if self.debug:\n print(\"\\n----REVIEW----\\n\")\n print(\"Total convolutions: \", conv_counter)\n print(\"\\ninput_feature_map:\\n \", array)\n print(\"\\napplied kernel:\\n \", kernel)\n print(\"\\nconvolution result:\\n \", output[i])\n print(\"***********************************\")\n #Cache input and output\n self.cached_output = output\n self.cached_input = input_feature_maps\n #Apply activation\n output = self.activation(self, output)\n return output\n \n \n def backward(self, jacobian_L_Z):\n #Reshape J_LZ from FC to Conv2D and pass through activation layer\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n #print(\"JLZ før relu\\n\", jacobian_L_Z)\n #jacobian_L_Z = self.d_activation(self, jacobian_L_Z)\n #print(\"cached out after activation\\n\", self.cached_output)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output)\n #print(\"JLZ etter relu\\n\", jacobian_L_Z)\n # J_L_Z * f'(cached_output)\n\n #Calculate J_LW\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n\n #Calculate J_LX\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n\n #Pass Jacobian L Y upstream\n return jacobian_L_Y\n \n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n #Iterate through all the weights (3 dimension)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[(i,j,k)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n #Iterate through all the inputs (3 dimension)\n #iterate through all channels/kernel of a kernel stack\n for i in range(self.input_shape[0]):\n #iterate through x-akses of 1d input\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n #for each kernel-stack\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[(i,j)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.stride + 1)\n return (self.kernel_shape[0], width)\n\n def calculate_padding(self):\n #Calculate padding long the x axis\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == \"full\":\n #Every pixel must experience every weight of the kernel\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == \"same\":\n\n #Every pixel must experience the middle weight of the kernel\n p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)\n p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n \n def apply_zero_padding(self, input_feature_maps):\n # Apply zero padding to the input feature maps according to the modes, strides and kernel size\n #if self.p_x_start == 0 and self.p_x_stop == 0:\n # return input_feature_maps\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n #Create the background zero array\n padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop))\n #Copy the array in the middle of the zero background\n padded_array[self.p_x_start:array.shape[0]+ self.p_x_start] = array \n #Save the array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return \"Conv 1D Layer type with \"+ str(self.kernel_shape[0]) +\" kernels of shape = \" + str(self.kernel_shape[1:]) +\"input/output of shape\" + str(self.input_shape)+\"/\" + str(self.output_shape) + \" stride= \" + str(self.stride) + \" mode= \" + str(self.mode) +\" with activation = \" + self.activation_name\n\nclass softmax():\n def __init__(self, size):\n self.size = size\n self.shape = (1, size)\n self.type = \"softmax\"\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n # Create jacobian of derivate of softmax\n jacobian_soft = self.compute_j_soft(softmaxed_network_output) \n # Compute jacobian linking Loss to output \n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i]**2\n else:\n j_soft[i][j] = -S[i]*S[j]\n return j_soft\n\n def __str__(self):\n return \"Softmax Layer of size = \" + str(self.size)\n\n",
"step-ids": [
24,
25,
28,
33,
39
]
}
|
[
24,
25,
28,
33,
39
] |
# Python 3.6. Written by Alex Clarke
# Breakup a large fits image into smaller ones, with overlap, and save to disk.
# Sourecfinding is run on each cutout, and catalogues are sifted to remove duplicates from the overlap.
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import multiprocessing
import itertools
import bdsf
import glob
import pickle
from matplotlib.pyplot import cm
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
from memory_profiler import profile
# list of functions
# load/save pickle objects
# save_cutout
# do_image_chopping
# make_image_cubes
# do_sourcefinding
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
#Loading/saving python data objects
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def update_header_from_cutout2D(hdu, cutout):
# update data
newdata = np.zeros((1,1,cutout.data.shape[0], cutout.data.shape[1]), dtype=np.float32)
newdata[0,0,:,:] = cutout.data
hdu.data = newdata
# update header cards returned from cutout2D wcs:
hdu.header.set('CRVAL1', cutout.wcs.wcs.crval[0])
hdu.header.set('CRVAL2', cutout.wcs.wcs.crval[1])
hdu.header.set('CRPIX1', cutout.wcs.wcs.crpix[0])
hdu.header.set('CRPIX2', cutout.wcs.wcs.crpix[1])
hdu.header.set('CDELT1', cutout.wcs.wcs.cdelt[0])
hdu.header.set('CDELT2', cutout.wcs.wcs.cdelt[1])
hdu.header.set('NAXIS1', cutout.wcs.pixel_shape[0])
hdu.header.set('NAXIS2', cutout.wcs.pixel_shape[1])
return hdu
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_primarybeam_correction(pbname, imagename):
print(' Preparing to apply the primary beam correction to {0}'.format(imagename))
hdu = fits.open(imagename)[0]
pb = fits.open(pbname)[0]
wcs = WCS(pb.header)
# cutout pb field of view to match image field of view
x_size = hdu.header['NAXIS1']
x_pixel_deg = hdu.header['CDELT2'] # CDELT1 is negative, so take positive one
size = (x_size*x_pixel_deg*u.degree, x_size*x_pixel_deg*u.degree) # angular size of cutout, using astropy coord. approx 32768*0.6 arcseconds.
position = SkyCoord(pb.header['CRVAL1']*u.degree, pb.header['CRVAL2']*u.degree) # RA and DEC of beam PB pointing
print(' Cutting out image FOV from primary beam image...')
cutout = Cutout2D(pb.data[0,0,:,:], position=position, size=size, mode='trim', wcs=wcs.celestial, copy=True)
# Update the FITS header with the cutout WCS by hand using my own function
# don't use cutout.wcs.to_header() because it doesn't account for the freq and stokes axes. is only compatible with 2D fits images.
#pb.header.update(cutout.wcs.to_header()) #
pb = update_header_from_cutout2D(pb, cutout)
# write updated fits file to disk
pb.writeto(pbname[:-5]+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file
# regrid PB image cutout to match pixel scale of the image FOV
print(' Regridding image...')
# get header of image to match PB to
montage.mGetHdr(imagename, 'hdu_tmp.hdr')
# regrid pb image (270 pixels) to size of ref image (32k pixels)
montage.reproject(in_images=pbname[:-5]+'_cutout.fits', out_images=pbname[:-5]+'_cutout_regrid.fits', header='hdu_tmp.hdr', exact_size=True)
os.remove('hdu_tmp.hdr') # get rid of header text file saved to disk
# update montage output to float32
pb = fits.open(pbname[:-5]+'_cutout_regrid.fits', mode='update')
newdata = np.zeros((1,1,pb[0].data.shape[0], pb[0].data.shape[1]), dtype=np.float32)
newdata[0,0,:,:] = pb[0].data
pb[0].data = newdata # naxis will automatically update to 4 in the header
# fix nans introduced in primary beam by montage at edges and write to new file
print(' A small buffer of NaNs is introduced around the image by Montage when regridding to match the size, \n these have been set to the value of their nearest neighbours to maintain the same image dimensions')
mask = np.isnan(pb[0].data)
pb[0].data[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), pb[0].data[~mask])
pb.flush()
pb.close()
# apply primary beam correction
pb = fits.open(pbname[:-5]+'_cutout_regrid.fits')[0]
hdu.data = hdu.data / pb.data
hdu.writeto(imagename[:-5]+'_PBCOR.fits', overwrite=True)
print(' Primary beam correction applied to {0}'.format(imagename[:-5]+'_PBCOR.fits') )
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_image_chopping(input_image, split_into):
hdu = fits.open(input_image)[0]
wcs = WCS(hdu.header)
# currently hard coded to only accept square images
im_width = hdu.header['NAXIS1'] # get image width
print(' Input fits image dimensions: {0}'.format(im_width))
print(' Cutting into {0} images of dimensions {1}'.format(split_into**2, im_width/split_into))
# get centre positions for each new fits image. assuming x=y. divide image width by split_into*2
positions = np.array(range(1,(split_into*2),2))*(im_width/(split_into*2))
# round to integer as in pixel coordinates. this approximation shouldn't matter since we include a buffer later
positions = positions.astype(int) # keep as original
positions_x = positions # make copy to append to in loop
positions_y = positions # make copy to append to in loop
# Make a 2D array of all centre positions. length = split_into**2.
for i in range(split_into-1):
# stack x coords repeating split_into times.
positions_x = np.hstack(( positions_x, positions )) # e.g. [ x1, x2, x3, x4, x1, x2, x3, x4, repeat split_into times]
# stack y coords, but np.roll shifts array indices by 1 to get different combinations
positions_y = np.hstack(( positions_y, np.roll(positions,i+1) )) # e.g. [ (y1, y2, y3, y4), (y2, y3, y4, y1), (y3, y4, y1, y2), ... ]
# create 2D array with coordinates: [ [x1,y1], [x2,y2], [x3,y3]... ]
position_coords_inpixels = np.array([positions_x,positions_y]).T
# create buffer of 5% so images overlap. This can be small... only needs to account for image edge cutting through
size = (im_width/split_into) * 1.05 # e.g. 4000 pixel image becomes 4200. sifting to remove duplicates later
# size array needs to be same shape as position_coords_inpixels
size_inpixels = np.array([[size,size]]*(split_into**2)).astype(int)
# loop over images to be cut out
plt.figure() # plot original image and overlay cutout boundaries at the end.
data[data<1e-7]=1e-7 # min pixel brightness to display
data[data>1e-5]=1e-5 # max pixel brightness to display
plt.imshow(hdu.data[0,0,:,:], origin='lower')
colourlist=iter(cm.rainbow(np.linspace(0,1,split_into**2))) # each cutout a different colour
for i in range(split_into**2):
print(' Cutting out image {0} of {1}'.format(i+1, split_into**2))
cutout = Cutout2D(hdu.data[0,0,:,:], position=tuple(position_coords_inpixels[i], size=tuple(size_inpixels[i]), mode='trim', wcs=wcs.celestial, copy=True)
cutout.plot_on_original(color=next(colourlist))
# Update the FITS header with the cutout WCS by hand using my own function
hdu = update_header_from_cutout2D(hdu, cutout)
hdu.writeto(input_image[:-5]+'_'+str(i)+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file
print(' Saving cutout arrangement as {0}'.format(input_image+'_cutouts.png'))
plt.savefig(input_image+'_cutout_annotation.png')
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
# make image cube for pybdsf spectral index mode, looping over all cutouts
def make_image_cubes_for_cutouts():
# get cutout file names, must be in same order so they are matched correctly
images_560 = sorted(glob.glob('560*_cutout.fits'))
images_1400 = sorted(glob.glob('1400*_cutout.fits'))
# loop over image cutouts to make cube for each of them
for file560, file1400, i in zip(images_560, images_1400, range(len(images_560))):
print(' Making cube {0} of {1}'.format(i, len(images_560)-1))
hdu560 = fits.open(file560)[0]
hdu1400 = fits.open(file1400)[0]
# make cube from the input files along freq axis
cube = np.zeros((2,hdu560.data.shape[0],hdu560.data.shape[1]))
cube[0,:,:] = hdu560.data[0,0,:,:] # add 560 Mhz data
cube[1,:,:] = hdu1400.data[0,0,:,:] # add 1400 Mhz data
hdu_new = fits.PrimaryHDU(data=cube, header=hdu560.header)
# update frequency info in the header. It puts 560MHz as ch0, but incorrectly assigns the interval to the next freq channel
hdu_new.header.set('CDELT3', 840000000) # 1400 MHz - 560 MHz = 840 MHz.
hdu_new.writeto('cube_cutout_'+str(i)+'.fits')
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_sourcefinding(imagename, si=True):
# get beam info manually. SKA image seems to cause PyBDSF issues finding this info.
f = fits.open(imagename)
beam_maj = f[0].header['BMAJ']
beam_min = f[0].header['BMIN']
#beam_pa = f[0].header['BPA'] # not in SKA fits header, but we know it's circular
beam_pa = 0
f.close()
# using some sensible and thorough hyper-parameters. PSF_vary and adaptive_rms_box is more computationally intensive, but needed.
if si==True:
img = bdsf.process_image(imagename, adaptive_rms_box=False, spectralindex_do=True, advanced_opts=True,\
atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\
blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, \
collapse_mode='average', collapse_wt='unity', frequency_sp=[560e6, 1400e6])
if si==False:
img = bdsf.process_image(imagename, adaptive_rms_box=True, advanced_opts=True,\
atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\
blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, psf_snrtop=0.30)
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
if __name__ == '__main__':
# Applying primary beam correction
do_primarybeam_correction('560mhz_primarybeam.fits', '560mhz1000hours.fits')
do_primarybeam_correction('1400mhz_primarybeam.fits', '1400mhz1000hours.fits')
# divide x and y axes by split_into. This gives split_into**2 output images.
# a 3 by 3 grid allows pybdsf to run efficiently (fails on the 4GB 32k x 32k pixel image) whilst avoiding cutting through the centre of the image
split_into = 3
# load image to get properties
input_image_560 = '560mhz1000hours.fits'
input_image_1400 = '1400mhz1000hours.fits'
# cut up images and save to disk
do_image_chopping(input_image_560, split_into)
do_image_chopping(input_image_1400, split_into)
# make image cube of the frequencies per cutout and save to disk, so pybdsf can use spectral index mode
# currently not working since don't need this part at the moment.
make_image_cubes()
# sourcefinding on individual frequency bands
imagenames = glob.glob('*_cutout.fits')
for image in imagenames:
do_sourcefinding(image)
# sourcefinding on cube to get spectral indcies (si=True)
# currently not working since need to chop images to same field of view before making cubes.
# use code from pipeline.py if needed?
#imagenames = sorted(glob.glob('cube_cutout_*.fits'))
#for image in imagenames:
# do_sourcefinding(image, si=True)
#
|
normal
|
{
"blob_id": "a22aa66bd65033750f23f47481ee84449fa80dbc",
"index": 8995,
"step-1": "# Python 3.6. Written by Alex Clarke\n# Breakup a large fits image into smaller ones, with overlap, and save to disk.\n# Sourecfinding is run on each cutout, and catalogues are sifted to remove duplicates from the overlap.\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport multiprocessing\nimport itertools\nimport bdsf\nimport glob\nimport pickle\n\nfrom matplotlib.pyplot import cm\nfrom astropy.io import fits\nfrom astropy.nddata import Cutout2D\nfrom astropy.wcs import WCS\nfrom memory_profiler import profile\n\n# list of functions\n# load/save pickle objects\n# save_cutout\n# do_image_chopping\n# make_image_cubes\n# do_sourcefinding\n\n\n\n # ------ ------ ------ ------ ------ ------ ------ ------ ------ ------\n\t\n\t\n\n#Loading/saving python data objects\ndef save_obj(obj, name ):\n with open(name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_obj(name ):\n with open(name + '.pkl', 'rb') as f:\n return pickle.load(f)\n\n\n\n # ------ ------ ------ ------ ------ ------ ------ ------ ------ ------\n\n\n\ndef update_header_from_cutout2D(hdu, cutout):\n # update data\n newdata = np.zeros((1,1,cutout.data.shape[0], cutout.data.shape[1]), dtype=np.float32)\n newdata[0,0,:,:] = cutout.data\n hdu.data = newdata\n # update header cards returned from cutout2D wcs:\n hdu.header.set('CRVAL1', cutout.wcs.wcs.crval[0])\n hdu.header.set('CRVAL2', cutout.wcs.wcs.crval[1])\n hdu.header.set('CRPIX1', cutout.wcs.wcs.crpix[0])\n hdu.header.set('CRPIX2', cutout.wcs.wcs.crpix[1])\n hdu.header.set('CDELT1', cutout.wcs.wcs.cdelt[0])\n hdu.header.set('CDELT2', cutout.wcs.wcs.cdelt[1])\n hdu.header.set('NAXIS1', cutout.wcs.pixel_shape[0])\n hdu.header.set('NAXIS2', cutout.wcs.pixel_shape[1])\n return hdu\n\n\n\n # ------ ------ ------ ------ ------ ------ ------ ------ ------ ------\n\n\t\n\t\ndef do_primarybeam_correction(pbname, imagename):\n print(' Preparing to apply the primary beam correction to {0}'.format(imagename))\n hdu = fits.open(imagename)[0]\n pb = fits.open(pbname)[0]\n wcs = WCS(pb.header)\n\n # cutout pb field of view to match image field of view\n x_size = hdu.header['NAXIS1']\n x_pixel_deg = hdu.header['CDELT2'] # CDELT1 is negative, so take positive one\n size = (x_size*x_pixel_deg*u.degree, x_size*x_pixel_deg*u.degree) # angular size of cutout, using astropy coord. approx 32768*0.6 arcseconds.\n position = SkyCoord(pb.header['CRVAL1']*u.degree, pb.header['CRVAL2']*u.degree) # RA and DEC of beam PB pointing\n print(' Cutting out image FOV from primary beam image...')\n cutout = Cutout2D(pb.data[0,0,:,:], position=position, size=size, mode='trim', wcs=wcs.celestial, copy=True)\n\n # Update the FITS header with the cutout WCS by hand using my own function\n # don't use cutout.wcs.to_header() because it doesn't account for the freq and stokes axes. is only compatible with 2D fits images.\n #pb.header.update(cutout.wcs.to_header()) #\n pb = update_header_from_cutout2D(pb, cutout)\n # write updated fits file to disk\n pb.writeto(pbname[:-5]+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file\n\n # regrid PB image cutout to match pixel scale of the image FOV\n print(' Regridding image...')\n # get header of image to match PB to\n montage.mGetHdr(imagename, 'hdu_tmp.hdr')\n # regrid pb image (270 pixels) to size of ref image (32k pixels)\n montage.reproject(in_images=pbname[:-5]+'_cutout.fits', out_images=pbname[:-5]+'_cutout_regrid.fits', header='hdu_tmp.hdr', exact_size=True)\n os.remove('hdu_tmp.hdr') # get rid of header text file saved to disk\n\n # update montage output to float32\n pb = fits.open(pbname[:-5]+'_cutout_regrid.fits', mode='update')\n newdata = np.zeros((1,1,pb[0].data.shape[0], pb[0].data.shape[1]), dtype=np.float32)\n newdata[0,0,:,:] = pb[0].data\n pb[0].data = newdata # naxis will automatically update to 4 in the header\n\n # fix nans introduced in primary beam by montage at edges and write to new file\n print(' A small buffer of NaNs is introduced around the image by Montage when regridding to match the size, \\n these have been set to the value of their nearest neighbours to maintain the same image dimensions')\n mask = np.isnan(pb[0].data)\n pb[0].data[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), pb[0].data[~mask])\n pb.flush()\n pb.close()\n\n # apply primary beam correction\n pb = fits.open(pbname[:-5]+'_cutout_regrid.fits')[0]\n hdu.data = hdu.data / pb.data\n hdu.writeto(imagename[:-5]+'_PBCOR.fits', overwrite=True)\n print(' Primary beam correction applied to {0}'.format(imagename[:-5]+'_PBCOR.fits') )\n\n\n\t\n # ------ ------ ------ ------ ------ ------ ------ ------ ------ ------\n\n\n\ndef do_image_chopping(input_image, split_into):\n hdu = fits.open(input_image)[0]\n wcs = WCS(hdu.header)\n # currently hard coded to only accept square images\n im_width = hdu.header['NAXIS1'] # get image width\n print(' Input fits image dimensions: {0}'.format(im_width))\n print(' Cutting into {0} images of dimensions {1}'.format(split_into**2, im_width/split_into))\n \n # get centre positions for each new fits image. assuming x=y. divide image width by split_into*2\n positions = np.array(range(1,(split_into*2),2))*(im_width/(split_into*2))\n # round to integer as in pixel coordinates. this approximation shouldn't matter since we include a buffer later\n positions = positions.astype(int) # keep as original\n positions_x = positions # make copy to append to in loop\n positions_y = positions # make copy to append to in loop\n \n # Make a 2D array of all centre positions. length = split_into**2.\n for i in range(split_into-1):\n # stack x coords repeating split_into times.\n positions_x = np.hstack(( positions_x, positions )) # e.g. [ x1, x2, x3, x4, x1, x2, x3, x4, repeat split_into times] \n # stack y coords, but np.roll shifts array indices by 1 to get different combinations\n positions_y = np.hstack(( positions_y, np.roll(positions,i+1) )) # e.g. [ (y1, y2, y3, y4), (y2, y3, y4, y1), (y3, y4, y1, y2), ... ]\n \n # create 2D array with coordinates: [ [x1,y1], [x2,y2], [x3,y3]... ]\n position_coords_inpixels = np.array([positions_x,positions_y]).T\n # create buffer of 5% so images overlap. This can be small... only needs to account for image edge cutting through \n size = (im_width/split_into) * 1.05 # e.g. 4000 pixel image becomes 4200. sifting to remove duplicates later\n # size array needs to be same shape as position_coords_inpixels\n size_inpixels = np.array([[size,size]]*(split_into**2)).astype(int)\n \n # loop over images to be cut out\n plt.figure() # plot original image and overlay cutout boundaries at the end.\n data[data<1e-7]=1e-7 # min pixel brightness to display\n data[data>1e-5]=1e-5 # max pixel brightness to display\n plt.imshow(hdu.data[0,0,:,:], origin='lower')\n colourlist=iter(cm.rainbow(np.linspace(0,1,split_into**2))) # each cutout a different colour\n for i in range(split_into**2):\n print(' Cutting out image {0} of {1}'.format(i+1, split_into**2))\n\tcutout = Cutout2D(hdu.data[0,0,:,:], position=tuple(position_coords_inpixels[i], size=tuple(size_inpixels[i]), mode='trim', wcs=wcs.celestial, copy=True)\n\tcutout.plot_on_original(color=next(colourlist))\n \t# Update the FITS header with the cutout WCS by hand using my own function\n \thdu = update_header_from_cutout2D(hdu, cutout)\n \thdu.writeto(input_image[:-5]+'_'+str(i)+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file\n print(' Saving cutout arrangement as {0}'.format(input_image+'_cutouts.png'))\n plt.savefig(input_image+'_cutout_annotation.png')\n \n\n\n # ------ ------ ------ ------ ------ ------ ------ ------ ------ ------\n\n\n \n# make image cube for pybdsf spectral index mode, looping over all cutouts\ndef make_image_cubes_for_cutouts():\n # get cutout file names, must be in same order so they are matched correctly\n images_560 = sorted(glob.glob('560*_cutout.fits'))\n images_1400 = sorted(glob.glob('1400*_cutout.fits'))\n # loop over image cutouts to make cube for each of them\n for file560, file1400, i in zip(images_560, images_1400, range(len(images_560))):\n print(' Making cube {0} of {1}'.format(i, len(images_560)-1))\n hdu560 = fits.open(file560)[0]\n hdu1400 = fits.open(file1400)[0]\n # make cube from the input files along freq axis\n cube = np.zeros((2,hdu560.data.shape[0],hdu560.data.shape[1]))\n cube[0,:,:] = hdu560.data[0,0,:,:] # add 560 Mhz data\n cube[1,:,:] = hdu1400.data[0,0,:,:] # add 1400 Mhz data\n hdu_new = fits.PrimaryHDU(data=cube, header=hdu560.header)\n # update frequency info in the header. It puts 560MHz as ch0, but incorrectly assigns the interval to the next freq channel\n hdu_new.header.set('CDELT3', 840000000) # 1400 MHz - 560 MHz = 840 MHz.\n hdu_new.writeto('cube_cutout_'+str(i)+'.fits')\n \n \n \n # ------ ------ ------ ------ ------ ------ ------ ------ ------ ------\n \n \n \ndef do_sourcefinding(imagename, si=True):\n # get beam info manually. SKA image seems to cause PyBDSF issues finding this info.\n f = fits.open(imagename)\n beam_maj = f[0].header['BMAJ']\n beam_min = f[0].header['BMIN']\n #beam_pa = f[0].header['BPA'] # not in SKA fits header, but we know it's circular\n beam_pa = 0\n f.close()\n # using some sensible and thorough hyper-parameters. PSF_vary and adaptive_rms_box is more computationally intensive, but needed.\n if si==True:\n img = bdsf.process_image(imagename, adaptive_rms_box=False, spectralindex_do=True, advanced_opts=True,\\\n atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\\\n blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, \\\n collapse_mode='average', collapse_wt='unity', frequency_sp=[560e6, 1400e6])\n\t\t\t\t\t\t\t\t\t\n if si==False: \n img = bdsf.process_image(imagename, adaptive_rms_box=True, advanced_opts=True,\\\n atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\\\n blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, psf_snrtop=0.30)\n\n\n\n # ------ ------ ------ ------ ------ ------ ------ ------ ------ ------\n \n \n\nif __name__ == '__main__':\n\t\t\t \t\t \n # Applying primary beam correction\n do_primarybeam_correction('560mhz_primarybeam.fits', '560mhz1000hours.fits')\n do_primarybeam_correction('1400mhz_primarybeam.fits', '1400mhz1000hours.fits')\n\t\t\t \n # divide x and y axes by split_into. This gives split_into**2 output images.\n # a 3 by 3 grid allows pybdsf to run efficiently (fails on the 4GB 32k x 32k pixel image) whilst avoiding cutting through the centre of the image\n split_into = 3\n \n # load image to get properties\n input_image_560 = '560mhz1000hours.fits'\n input_image_1400 = '1400mhz1000hours.fits'\n \n # cut up images and save to disk\n do_image_chopping(input_image_560, split_into)\n do_image_chopping(input_image_1400, split_into)\n \n # make image cube of the frequencies per cutout and save to disk, so pybdsf can use spectral index mode\n # currently not working since don't need this part at the moment.\n make_image_cubes() \n \n # sourcefinding on individual frequency bands\n imagenames = glob.glob('*_cutout.fits')\n for image in imagenames:\n do_sourcefinding(image)\n\t\t\t \n # sourcefinding on cube to get spectral indcies (si=True)\n # currently not working since need to chop images to same field of view before making cubes.\n # use code from pipeline.py if needed?\n #imagenames = sorted(glob.glob('cube_cutout_*.fits'))\n #for image in imagenames:\n # do_sourcefinding(image, si=True)\n\n \n \n \n #\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5],
'go-', label='line 1', linewidth=2)
plt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25],
'rs--', label='line 2', linewidth=4)
plt.axis([0, 6, 0, 26])
plt.legend(loc="upper right")
plt.show()
|
normal
|
{
"blob_id": "7eeba06e78bd1e7139b1706574c4d040465d4566",
"index": 4178,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5], 'go-', label='line 1', linewidth=2)\nplt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25], 'rs--', label='line 2',\n linewidth=4)\nplt.axis([0, 6, 0, 26])\nplt.legend(loc='upper right')\nplt.show()\n",
"step-3": "import matplotlib.pyplot as plt\nplt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5], 'go-', label='line 1', linewidth=2)\nplt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25], 'rs--', label='line 2',\n linewidth=4)\nplt.axis([0, 6, 0, 26])\nplt.legend(loc='upper right')\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\n\nplt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5],\n 'go-', label='line 1', linewidth=2)\n\nplt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25],\n 'rs--', label='line 2', linewidth=4)\n\nplt.axis([0, 6, 0, 26])\nplt.legend(loc=\"upper right\")\nplt.show()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
def digit_sum(x):
sum = 0
while x != 0:
sum = sum + x % 10
x = x // 10
return sum
for i in sys.stdin:
test_num = int(i)
if test_num == 0:
break
count = 11
while digit_sum(test_num) != digit_sum(count * test_num):
count = count + 1
print('{}'.format(count))
|
normal
|
{
"blob_id": "0d37b6f0ea8854f9d4d4cd2ff235fa39bab7cc12",
"index": 6549,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef digit_sum(x):\n sum = 0\n while x != 0:\n sum = sum + x % 10\n x = x // 10\n return sum\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef digit_sum(x):\n sum = 0\n while x != 0:\n sum = sum + x % 10\n x = x // 10\n return sum\n\n\nfor i in sys.stdin:\n test_num = int(i)\n if test_num == 0:\n break\n count = 11\n while digit_sum(test_num) != digit_sum(count * test_num):\n count = count + 1\n print('{}'.format(count))\n",
"step-4": "import sys\n\n\ndef digit_sum(x):\n sum = 0\n while x != 0:\n sum = sum + x % 10\n x = x // 10\n return sum\n\n\nfor i in sys.stdin:\n test_num = int(i)\n if test_num == 0:\n break\n count = 11\n while digit_sum(test_num) != digit_sum(count * test_num):\n count = count + 1\n print('{}'.format(count))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sqlite3
connection = sqlite3.connect('database.db')
cursor = connection.cursor()
# cursor.execute('CREATE TABLE users (id int, username text, password text)')
cursor.execute('INSERT INTO users VALUES(?,?,?)',(1,'ilia','qwerty'))
users = [(2,'nika','asdf'),(3,'nino','sdfg')]
cursor.executemany('INSERT INTO users VALUES(?,?,?)', users)
for row in cursor.execute('SELECT * FROM users'):
print(row)
connection.commit()
connection.close()
|
normal
|
{
"blob_id": "d6b49533573dfefba6286ac2bffc2bd7a4075063",
"index": 1731,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncursor.execute('INSERT INTO users VALUES(?,?,?)', (1, 'ilia', 'qwerty'))\n<mask token>\ncursor.executemany('INSERT INTO users VALUES(?,?,?)', users)\nfor row in cursor.execute('SELECT * FROM users'):\n print(row)\nconnection.commit()\nconnection.close()\n",
"step-3": "<mask token>\nconnection = sqlite3.connect('database.db')\ncursor = connection.cursor()\ncursor.execute('INSERT INTO users VALUES(?,?,?)', (1, 'ilia', 'qwerty'))\nusers = [(2, 'nika', 'asdf'), (3, 'nino', 'sdfg')]\ncursor.executemany('INSERT INTO users VALUES(?,?,?)', users)\nfor row in cursor.execute('SELECT * FROM users'):\n print(row)\nconnection.commit()\nconnection.close()\n",
"step-4": "import sqlite3\nconnection = sqlite3.connect('database.db')\ncursor = connection.cursor()\ncursor.execute('INSERT INTO users VALUES(?,?,?)', (1, 'ilia', 'qwerty'))\nusers = [(2, 'nika', 'asdf'), (3, 'nino', 'sdfg')]\ncursor.executemany('INSERT INTO users VALUES(?,?,?)', users)\nfor row in cursor.execute('SELECT * FROM users'):\n print(row)\nconnection.commit()\nconnection.close()\n",
"step-5": "import sqlite3\n\nconnection = sqlite3.connect('database.db')\n\ncursor = connection.cursor()\n\n# cursor.execute('CREATE TABLE users (id int, username text, password text)')\n\ncursor.execute('INSERT INTO users VALUES(?,?,?)',(1,'ilia','qwerty'))\n\nusers = [(2,'nika','asdf'),(3,'nino','sdfg')]\n\ncursor.executemany('INSERT INTO users VALUES(?,?,?)', users)\n\nfor row in cursor.execute('SELECT * FROM users'):\n print(row)\n\nconnection.commit()\n\nconnection.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import queue
from enum import IntEnum
from time import sleep
import keyboard
# I know, I copy pasted this horrobly written class
# again...
# and again.. I should really write a proper intcode computer
class IntCodeComputer:
def __init__(self, code):
self.defaultCode = code
self.runningCode = self.defaultCode.copy()
self.instructionPointer = 0
self.outputQueue = queue.Queue()
self.relativeBase = 0
def AccessLocation(self, index):
if index >= len(self.runningCode):
self.runningCode.extend([0 for i in range(0, index - len(self.runningCode) + 1)])
return self.runningCode[index]
def StoreLocation(self, index, value):
if index >= len(self.runningCode):
self.runningCode.extend([0 for i in range(0, index - len(self.runningCode) + 1)])
self.runningCode[index] = value
def Run(self, inputArray, reset):
if reset == True:
self.runningCode = self.defaultCode.copy()
self.instructionPointer = 0
self.outputQueue = queue.Queue()
self.relativeBase = 0
inputIndex = 0
while self.instructionPointer < len(self.runningCode):
instruction = self.runningCode[self.instructionPointer] % 100;
aMode = (self.runningCode[self.instructionPointer] // 100) % 10
bMode = (self.runningCode[self.instructionPointer] // 1000) % 10
cMode = (self.runningCode[self.instructionPointer] // 10000) % 10
a = b = c = 0
if instruction == 1 or instruction == 2 or instruction == 7 or instruction == 8:
a = self.AccessLocation(self.instructionPointer + 1)
b = self.AccessLocation(self.instructionPointer + 2)
c = self.AccessLocation(self.instructionPointer + 3)
if aMode == 0:
a = self.AccessLocation(a)
if aMode == 2:
a = self.AccessLocation(a + self.relativeBase)
if bMode == 0:
b = self.AccessLocation(b)
if bMode == 2:
b = self.AccessLocation(b + self.relativeBase)
if cMode == 2:
c = c + self.relativeBase
if instruction == 5 or instruction == 6:
a = self.AccessLocation(self.instructionPointer + 1)
b = self.AccessLocation(self.instructionPointer + 2)
if aMode == 0:
a = self.AccessLocation(a)
if aMode == 2:
a = self.AccessLocation(a + self.relativeBase)
if bMode == 0:
b = self.AccessLocation(b)
if bMode == 2:
b = self.AccessLocation(b + self.relativeBase)
if instruction == 1:
self.StoreLocation(c, a + b)
self.instructionPointer += 4
elif instruction == 2:
self.StoreLocation(c, a * b)
self.instructionPointer += 4
elif instruction == 3:
a = self.AccessLocation(self.instructionPointer + 1)
if aMode == 2:
a = a + self.relativeBase
self.StoreLocation(a, inputArray[inputIndex])
inputIndex += 1
self.instructionPointer += 2
elif instruction == 4:
a = self.AccessLocation(self.instructionPointer + 1)
if aMode == 0:
a = self.AccessLocation(a)
if aMode == 2:
a = self.AccessLocation(a + self.relativeBase)
self.instructionPointer += 2
return a
elif instruction == 5:
if a != 0:
self.instructionPointer = b
else:
self.instructionPointer += 3
elif instruction == 6:
if a == 0:
self.instructionPointer = b
else:
self.instructionPointer += 3
elif instruction == 7:
if a < b:
self.StoreLocation(c, 1)
else:
self.StoreLocation(c, 0)
self.instructionPointer += 4
elif instruction == 8:
if a == b:
self.StoreLocation(c, 1)
else:
self.StoreLocation(c, 0)
self.instructionPointer += 4
elif instruction == 9:
a = self.AccessLocation(self.instructionPointer + 1)
if aMode == 0:
a = self.AccessLocation(a)
if aMode == 2:
a = self.AccessLocation(a + self.relativeBase)
self.relativeBase += a
self.instructionPointer += 2
elif instruction == 99:
self.instructionPointer = len(self.runningCode) + 1
return None
else:
print ("WTF")
return None
return None
def Render(screenMatrix):
finalString = ""
for row in range(0, len(screenMatrix)):
for column in range(0, len(screenMatrix[i])):
finalString += str(screenMatrix[row][column])
finalString += "\n"
print (finalString, end = "\r")
def GetBallX(screenMatrix):
for row in range(0, len(screenMatrix)):
for column in range(0, len(screenMatrix[i])):
if screenMatrix[row][column] == 4:
return column
return 0
def GetPadX(screenMatrix):
for row in range(0, len(screenMatrix)):
for column in range(0, len(screenMatrix[i])):
if screenMatrix[row][column] == 3:
return column
return 0
inputFile = open("input.txt", "r")
code = [int(x) for x in inputFile.read().split(",")]
computer = IntCodeComputer(code)
screenMatrix = [0] * 24
for i in range(0, len(screenMatrix)):
screenMatrix[i] = [0] * 42
cond = True
while cond:
result1 = computer.Run([], False)
if result1 != None:
result2 = computer.Run([], False)
result3 = computer.Run([], False)
screenMatrix[result2][result1] = result3
else:
cond = False
counter = 0;
for i in range(0, len(screenMatrix)):
for j in range(0, len(screenMatrix[i])):
if screenMatrix[i][j] == 2:
counter += 1
print (counter)
code[0] = 2
computer = IntCodeComputer(code)
screenMatrix = [0] * 24
for i in range(0, len(screenMatrix)):
screenMatrix[i] = [0] * 42
cond = True
iter = 0
score = 0
while cond:
cond2 = True
exec = 0
if iter >= len(screenMatrix) * len(screenMatrix[0]):
sleep(0.001)
while cond2 or iter < len(screenMatrix) * len(screenMatrix[0]):
cond2 = True
exec += 1
inp = 0
ballX = GetBallX(screenMatrix)
padX = GetPadX(screenMatrix)
if padX == ballX:
inp = 0
elif padX > ballX:
inp = -1
else:
inp = 1
result1 = computer.Run([inp], False)
if result1 != None:
result2 = computer.Run([inp], False)
result3 = computer.Run([inp], False)
if result1 == -1 and result2 == 0:
score = result3
else:
screenMatrix[result2][result1] = result3
if result3 == 4 or exec >= 10:
cond2 = False
else:
cond = False
break
Render(screenMatrix)
iter += 1
print(score)
inputFile.close()
|
normal
|
{
"blob_id": "6eac04bc10ef712ab4e2cde4730950ddcbe42585",
"index": 8983,
"step-1": "<mask token>\n\n\nclass IntCodeComputer:\n\n def __init__(self, code):\n self.defaultCode = code\n self.runningCode = self.defaultCode.copy()\n self.instructionPointer = 0\n self.outputQueue = queue.Queue()\n self.relativeBase = 0\n\n def AccessLocation(self, index):\n if index >= len(self.runningCode):\n self.runningCode.extend([(0) for i in range(0, index - len(self\n .runningCode) + 1)])\n return self.runningCode[index]\n\n def StoreLocation(self, index, value):\n if index >= len(self.runningCode):\n self.runningCode.extend([(0) for i in range(0, index - len(self\n .runningCode) + 1)])\n self.runningCode[index] = value\n\n def Run(self, inputArray, reset):\n if reset == True:\n self.runningCode = self.defaultCode.copy()\n self.instructionPointer = 0\n self.outputQueue = queue.Queue()\n self.relativeBase = 0\n inputIndex = 0\n while self.instructionPointer < len(self.runningCode):\n instruction = self.runningCode[self.instructionPointer] % 100\n aMode = self.runningCode[self.instructionPointer] // 100 % 10\n bMode = self.runningCode[self.instructionPointer] // 1000 % 10\n cMode = self.runningCode[self.instructionPointer] // 10000 % 10\n a = b = c = 0\n if (instruction == 1 or instruction == 2 or instruction == 7 or\n instruction == 8):\n a = self.AccessLocation(self.instructionPointer + 1)\n b = self.AccessLocation(self.instructionPointer + 2)\n c = self.AccessLocation(self.instructionPointer + 3)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n if bMode == 0:\n b = self.AccessLocation(b)\n if bMode == 2:\n b = self.AccessLocation(b + self.relativeBase)\n if cMode == 2:\n c = c + self.relativeBase\n if instruction == 5 or instruction == 6:\n a = self.AccessLocation(self.instructionPointer + 1)\n b = self.AccessLocation(self.instructionPointer + 2)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n if bMode == 0:\n b = self.AccessLocation(b)\n if bMode == 2:\n b = self.AccessLocation(b + self.relativeBase)\n if instruction == 1:\n self.StoreLocation(c, a + b)\n self.instructionPointer += 4\n elif instruction == 2:\n self.StoreLocation(c, a * b)\n self.instructionPointer += 4\n elif instruction == 3:\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 2:\n a = a + self.relativeBase\n self.StoreLocation(a, inputArray[inputIndex])\n inputIndex += 1\n self.instructionPointer += 2\n elif instruction == 4:\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n self.instructionPointer += 2\n return a\n elif instruction == 5:\n if a != 0:\n self.instructionPointer = b\n else:\n self.instructionPointer += 3\n elif instruction == 6:\n if a == 0:\n self.instructionPointer = b\n else:\n self.instructionPointer += 3\n elif instruction == 7:\n if a < b:\n self.StoreLocation(c, 1)\n else:\n self.StoreLocation(c, 0)\n self.instructionPointer += 4\n elif instruction == 8:\n if a == b:\n self.StoreLocation(c, 1)\n else:\n self.StoreLocation(c, 0)\n self.instructionPointer += 4\n elif instruction == 9:\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n self.relativeBase += a\n self.instructionPointer += 2\n elif instruction == 99:\n self.instructionPointer = len(self.runningCode) + 1\n return None\n else:\n print('WTF')\n return None\n return None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass IntCodeComputer:\n\n def __init__(self, code):\n self.defaultCode = code\n self.runningCode = self.defaultCode.copy()\n self.instructionPointer = 0\n self.outputQueue = queue.Queue()\n self.relativeBase = 0\n\n def AccessLocation(self, index):\n if index >= len(self.runningCode):\n self.runningCode.extend([(0) for i in range(0, index - len(self\n .runningCode) + 1)])\n return self.runningCode[index]\n\n def StoreLocation(self, index, value):\n if index >= len(self.runningCode):\n self.runningCode.extend([(0) for i in range(0, index - len(self\n .runningCode) + 1)])\n self.runningCode[index] = value\n\n def Run(self, inputArray, reset):\n if reset == True:\n self.runningCode = self.defaultCode.copy()\n self.instructionPointer = 0\n self.outputQueue = queue.Queue()\n self.relativeBase = 0\n inputIndex = 0\n while self.instructionPointer < len(self.runningCode):\n instruction = self.runningCode[self.instructionPointer] % 100\n aMode = self.runningCode[self.instructionPointer] // 100 % 10\n bMode = self.runningCode[self.instructionPointer] // 1000 % 10\n cMode = self.runningCode[self.instructionPointer] // 10000 % 10\n a = b = c = 0\n if (instruction == 1 or instruction == 2 or instruction == 7 or\n instruction == 8):\n a = self.AccessLocation(self.instructionPointer + 1)\n b = self.AccessLocation(self.instructionPointer + 2)\n c = self.AccessLocation(self.instructionPointer + 3)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n if bMode == 0:\n b = self.AccessLocation(b)\n if bMode == 2:\n b = self.AccessLocation(b + self.relativeBase)\n if cMode == 2:\n c = c + self.relativeBase\n if instruction == 5 or instruction == 6:\n a = self.AccessLocation(self.instructionPointer + 1)\n b = self.AccessLocation(self.instructionPointer + 2)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n if bMode == 0:\n b = self.AccessLocation(b)\n if bMode == 2:\n b = self.AccessLocation(b + self.relativeBase)\n if instruction == 1:\n self.StoreLocation(c, a + b)\n self.instructionPointer += 4\n elif instruction == 2:\n self.StoreLocation(c, a * b)\n self.instructionPointer += 4\n elif instruction == 3:\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 2:\n a = a + self.relativeBase\n self.StoreLocation(a, inputArray[inputIndex])\n inputIndex += 1\n self.instructionPointer += 2\n elif instruction == 4:\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n self.instructionPointer += 2\n return a\n elif instruction == 5:\n if a != 0:\n self.instructionPointer = b\n else:\n self.instructionPointer += 3\n elif instruction == 6:\n if a == 0:\n self.instructionPointer = b\n else:\n self.instructionPointer += 3\n elif instruction == 7:\n if a < b:\n self.StoreLocation(c, 1)\n else:\n self.StoreLocation(c, 0)\n self.instructionPointer += 4\n elif instruction == 8:\n if a == b:\n self.StoreLocation(c, 1)\n else:\n self.StoreLocation(c, 0)\n self.instructionPointer += 4\n elif instruction == 9:\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n self.relativeBase += a\n self.instructionPointer += 2\n elif instruction == 99:\n self.instructionPointer = len(self.runningCode) + 1\n return None\n else:\n print('WTF')\n return None\n return None\n\n\ndef Render(screenMatrix):\n finalString = ''\n for row in range(0, len(screenMatrix)):\n for column in range(0, len(screenMatrix[i])):\n finalString += str(screenMatrix[row][column])\n finalString += '\\n'\n print(finalString, end='\\r')\n\n\ndef GetBallX(screenMatrix):\n for row in range(0, len(screenMatrix)):\n for column in range(0, len(screenMatrix[i])):\n if screenMatrix[row][column] == 4:\n return column\n return 0\n\n\ndef GetPadX(screenMatrix):\n for row in range(0, len(screenMatrix)):\n for column in range(0, len(screenMatrix[i])):\n if screenMatrix[row][column] == 3:\n return column\n return 0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass IntCodeComputer:\n\n def __init__(self, code):\n self.defaultCode = code\n self.runningCode = self.defaultCode.copy()\n self.instructionPointer = 0\n self.outputQueue = queue.Queue()\n self.relativeBase = 0\n\n def AccessLocation(self, index):\n if index >= len(self.runningCode):\n self.runningCode.extend([(0) for i in range(0, index - len(self\n .runningCode) + 1)])\n return self.runningCode[index]\n\n def StoreLocation(self, index, value):\n if index >= len(self.runningCode):\n self.runningCode.extend([(0) for i in range(0, index - len(self\n .runningCode) + 1)])\n self.runningCode[index] = value\n\n def Run(self, inputArray, reset):\n if reset == True:\n self.runningCode = self.defaultCode.copy()\n self.instructionPointer = 0\n self.outputQueue = queue.Queue()\n self.relativeBase = 0\n inputIndex = 0\n while self.instructionPointer < len(self.runningCode):\n instruction = self.runningCode[self.instructionPointer] % 100\n aMode = self.runningCode[self.instructionPointer] // 100 % 10\n bMode = self.runningCode[self.instructionPointer] // 1000 % 10\n cMode = self.runningCode[self.instructionPointer] // 10000 % 10\n a = b = c = 0\n if (instruction == 1 or instruction == 2 or instruction == 7 or\n instruction == 8):\n a = self.AccessLocation(self.instructionPointer + 1)\n b = self.AccessLocation(self.instructionPointer + 2)\n c = self.AccessLocation(self.instructionPointer + 3)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n if bMode == 0:\n b = self.AccessLocation(b)\n if bMode == 2:\n b = self.AccessLocation(b + self.relativeBase)\n if cMode == 2:\n c = c + self.relativeBase\n if instruction == 5 or instruction == 6:\n a = self.AccessLocation(self.instructionPointer + 1)\n b = self.AccessLocation(self.instructionPointer + 2)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n if bMode == 0:\n b = self.AccessLocation(b)\n if bMode == 2:\n b = self.AccessLocation(b + self.relativeBase)\n if instruction == 1:\n self.StoreLocation(c, a + b)\n self.instructionPointer += 4\n elif instruction == 2:\n self.StoreLocation(c, a * b)\n self.instructionPointer += 4\n elif instruction == 3:\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 2:\n a = a + self.relativeBase\n self.StoreLocation(a, inputArray[inputIndex])\n inputIndex += 1\n self.instructionPointer += 2\n elif instruction == 4:\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n self.instructionPointer += 2\n return a\n elif instruction == 5:\n if a != 0:\n self.instructionPointer = b\n else:\n self.instructionPointer += 3\n elif instruction == 6:\n if a == 0:\n self.instructionPointer = b\n else:\n self.instructionPointer += 3\n elif instruction == 7:\n if a < b:\n self.StoreLocation(c, 1)\n else:\n self.StoreLocation(c, 0)\n self.instructionPointer += 4\n elif instruction == 8:\n if a == b:\n self.StoreLocation(c, 1)\n else:\n self.StoreLocation(c, 0)\n self.instructionPointer += 4\n elif instruction == 9:\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n self.relativeBase += a\n self.instructionPointer += 2\n elif instruction == 99:\n self.instructionPointer = len(self.runningCode) + 1\n return None\n else:\n print('WTF')\n return None\n return None\n\n\ndef Render(screenMatrix):\n finalString = ''\n for row in range(0, len(screenMatrix)):\n for column in range(0, len(screenMatrix[i])):\n finalString += str(screenMatrix[row][column])\n finalString += '\\n'\n print(finalString, end='\\r')\n\n\ndef GetBallX(screenMatrix):\n for row in range(0, len(screenMatrix)):\n for column in range(0, len(screenMatrix[i])):\n if screenMatrix[row][column] == 4:\n return column\n return 0\n\n\ndef GetPadX(screenMatrix):\n for row in range(0, len(screenMatrix)):\n for column in range(0, len(screenMatrix[i])):\n if screenMatrix[row][column] == 3:\n return column\n return 0\n\n\n<mask token>\nfor i in range(0, len(screenMatrix)):\n screenMatrix[i] = [0] * 42\n<mask token>\nwhile cond:\n result1 = computer.Run([], False)\n if result1 != None:\n result2 = computer.Run([], False)\n result3 = computer.Run([], False)\n screenMatrix[result2][result1] = result3\n else:\n cond = False\n<mask token>\nfor i in range(0, len(screenMatrix)):\n for j in range(0, len(screenMatrix[i])):\n if screenMatrix[i][j] == 2:\n counter += 1\nprint(counter)\n<mask token>\nfor i in range(0, len(screenMatrix)):\n screenMatrix[i] = [0] * 42\n<mask token>\nwhile cond:\n cond2 = True\n exec = 0\n if iter >= len(screenMatrix) * len(screenMatrix[0]):\n sleep(0.001)\n while cond2 or iter < len(screenMatrix) * len(screenMatrix[0]):\n cond2 = True\n exec += 1\n inp = 0\n ballX = GetBallX(screenMatrix)\n padX = GetPadX(screenMatrix)\n if padX == ballX:\n inp = 0\n elif padX > ballX:\n inp = -1\n else:\n inp = 1\n result1 = computer.Run([inp], False)\n if result1 != None:\n result2 = computer.Run([inp], False)\n result3 = computer.Run([inp], False)\n if result1 == -1 and result2 == 0:\n score = result3\n else:\n screenMatrix[result2][result1] = result3\n if result3 == 4 or exec >= 10:\n cond2 = False\n else:\n cond = False\n break\n Render(screenMatrix)\n iter += 1\nprint(score)\ninputFile.close()\n",
"step-4": "<mask token>\n\n\nclass IntCodeComputer:\n\n def __init__(self, code):\n self.defaultCode = code\n self.runningCode = self.defaultCode.copy()\n self.instructionPointer = 0\n self.outputQueue = queue.Queue()\n self.relativeBase = 0\n\n def AccessLocation(self, index):\n if index >= len(self.runningCode):\n self.runningCode.extend([(0) for i in range(0, index - len(self\n .runningCode) + 1)])\n return self.runningCode[index]\n\n def StoreLocation(self, index, value):\n if index >= len(self.runningCode):\n self.runningCode.extend([(0) for i in range(0, index - len(self\n .runningCode) + 1)])\n self.runningCode[index] = value\n\n def Run(self, inputArray, reset):\n if reset == True:\n self.runningCode = self.defaultCode.copy()\n self.instructionPointer = 0\n self.outputQueue = queue.Queue()\n self.relativeBase = 0\n inputIndex = 0\n while self.instructionPointer < len(self.runningCode):\n instruction = self.runningCode[self.instructionPointer] % 100\n aMode = self.runningCode[self.instructionPointer] // 100 % 10\n bMode = self.runningCode[self.instructionPointer] // 1000 % 10\n cMode = self.runningCode[self.instructionPointer] // 10000 % 10\n a = b = c = 0\n if (instruction == 1 or instruction == 2 or instruction == 7 or\n instruction == 8):\n a = self.AccessLocation(self.instructionPointer + 1)\n b = self.AccessLocation(self.instructionPointer + 2)\n c = self.AccessLocation(self.instructionPointer + 3)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n if bMode == 0:\n b = self.AccessLocation(b)\n if bMode == 2:\n b = self.AccessLocation(b + self.relativeBase)\n if cMode == 2:\n c = c + self.relativeBase\n if instruction == 5 or instruction == 6:\n a = self.AccessLocation(self.instructionPointer + 1)\n b = self.AccessLocation(self.instructionPointer + 2)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n if bMode == 0:\n b = self.AccessLocation(b)\n if bMode == 2:\n b = self.AccessLocation(b + self.relativeBase)\n if instruction == 1:\n self.StoreLocation(c, a + b)\n self.instructionPointer += 4\n elif instruction == 2:\n self.StoreLocation(c, a * b)\n self.instructionPointer += 4\n elif instruction == 3:\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 2:\n a = a + self.relativeBase\n self.StoreLocation(a, inputArray[inputIndex])\n inputIndex += 1\n self.instructionPointer += 2\n elif instruction == 4:\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n self.instructionPointer += 2\n return a\n elif instruction == 5:\n if a != 0:\n self.instructionPointer = b\n else:\n self.instructionPointer += 3\n elif instruction == 6:\n if a == 0:\n self.instructionPointer = b\n else:\n self.instructionPointer += 3\n elif instruction == 7:\n if a < b:\n self.StoreLocation(c, 1)\n else:\n self.StoreLocation(c, 0)\n self.instructionPointer += 4\n elif instruction == 8:\n if a == b:\n self.StoreLocation(c, 1)\n else:\n self.StoreLocation(c, 0)\n self.instructionPointer += 4\n elif instruction == 9:\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n self.relativeBase += a\n self.instructionPointer += 2\n elif instruction == 99:\n self.instructionPointer = len(self.runningCode) + 1\n return None\n else:\n print('WTF')\n return None\n return None\n\n\ndef Render(screenMatrix):\n finalString = ''\n for row in range(0, len(screenMatrix)):\n for column in range(0, len(screenMatrix[i])):\n finalString += str(screenMatrix[row][column])\n finalString += '\\n'\n print(finalString, end='\\r')\n\n\ndef GetBallX(screenMatrix):\n for row in range(0, len(screenMatrix)):\n for column in range(0, len(screenMatrix[i])):\n if screenMatrix[row][column] == 4:\n return column\n return 0\n\n\ndef GetPadX(screenMatrix):\n for row in range(0, len(screenMatrix)):\n for column in range(0, len(screenMatrix[i])):\n if screenMatrix[row][column] == 3:\n return column\n return 0\n\n\ninputFile = open('input.txt', 'r')\ncode = [int(x) for x in inputFile.read().split(',')]\ncomputer = IntCodeComputer(code)\nscreenMatrix = [0] * 24\nfor i in range(0, len(screenMatrix)):\n screenMatrix[i] = [0] * 42\ncond = True\nwhile cond:\n result1 = computer.Run([], False)\n if result1 != None:\n result2 = computer.Run([], False)\n result3 = computer.Run([], False)\n screenMatrix[result2][result1] = result3\n else:\n cond = False\ncounter = 0\nfor i in range(0, len(screenMatrix)):\n for j in range(0, len(screenMatrix[i])):\n if screenMatrix[i][j] == 2:\n counter += 1\nprint(counter)\ncode[0] = 2\ncomputer = IntCodeComputer(code)\nscreenMatrix = [0] * 24\nfor i in range(0, len(screenMatrix)):\n screenMatrix[i] = [0] * 42\ncond = True\niter = 0\nscore = 0\nwhile cond:\n cond2 = True\n exec = 0\n if iter >= len(screenMatrix) * len(screenMatrix[0]):\n sleep(0.001)\n while cond2 or iter < len(screenMatrix) * len(screenMatrix[0]):\n cond2 = True\n exec += 1\n inp = 0\n ballX = GetBallX(screenMatrix)\n padX = GetPadX(screenMatrix)\n if padX == ballX:\n inp = 0\n elif padX > ballX:\n inp = -1\n else:\n inp = 1\n result1 = computer.Run([inp], False)\n if result1 != None:\n result2 = computer.Run([inp], False)\n result3 = computer.Run([inp], False)\n if result1 == -1 and result2 == 0:\n score = result3\n else:\n screenMatrix[result2][result1] = result3\n if result3 == 4 or exec >= 10:\n cond2 = False\n else:\n cond = False\n break\n Render(screenMatrix)\n iter += 1\nprint(score)\ninputFile.close()\n",
"step-5": "import queue\nfrom enum import IntEnum\nfrom time import sleep\nimport keyboard\n\n# I know, I copy pasted this horrobly written class\n# again...\n# and again.. I should really write a proper intcode computer\nclass IntCodeComputer:\n\n def __init__(self, code):\n self.defaultCode = code\n self.runningCode = self.defaultCode.copy()\n self.instructionPointer = 0\n self.outputQueue = queue.Queue()\n self.relativeBase = 0\n\n def AccessLocation(self, index):\n if index >= len(self.runningCode):\n self.runningCode.extend([0 for i in range(0, index - len(self.runningCode) + 1)])\n return self.runningCode[index]\n\n def StoreLocation(self, index, value):\n if index >= len(self.runningCode):\n self.runningCode.extend([0 for i in range(0, index - len(self.runningCode) + 1)])\n self.runningCode[index] = value\n\n def Run(self, inputArray, reset):\n\n if reset == True:\n self.runningCode = self.defaultCode.copy()\n self.instructionPointer = 0\n self.outputQueue = queue.Queue()\n self.relativeBase = 0\n\n inputIndex = 0\n \n while self.instructionPointer < len(self.runningCode):\n instruction = self.runningCode[self.instructionPointer] % 100;\n\n aMode = (self.runningCode[self.instructionPointer] // 100) % 10\n bMode = (self.runningCode[self.instructionPointer] // 1000) % 10\n cMode = (self.runningCode[self.instructionPointer] // 10000) % 10\n\n a = b = c = 0\n\n if instruction == 1 or instruction == 2 or instruction == 7 or instruction == 8:\n a = self.AccessLocation(self.instructionPointer + 1)\n b = self.AccessLocation(self.instructionPointer + 2)\n c = self.AccessLocation(self.instructionPointer + 3)\n \n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n\n if bMode == 0:\n b = self.AccessLocation(b)\n if bMode == 2:\n b = self.AccessLocation(b + self.relativeBase)\n\n if cMode == 2:\n c = c + self.relativeBase\n\n if instruction == 5 or instruction == 6:\n a = self.AccessLocation(self.instructionPointer + 1)\n b = self.AccessLocation(self.instructionPointer + 2)\n\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n\n if bMode == 0:\n b = self.AccessLocation(b)\n if bMode == 2:\n b = self.AccessLocation(b + self.relativeBase)\n\n if instruction == 1:\n self.StoreLocation(c, a + b)\n self.instructionPointer += 4\n\n elif instruction == 2:\n self.StoreLocation(c, a * b)\n self.instructionPointer += 4\n\n elif instruction == 3:\n\n a = self.AccessLocation(self.instructionPointer + 1)\n\n if aMode == 2:\n a = a + self.relativeBase\n\n self.StoreLocation(a, inputArray[inputIndex])\n inputIndex += 1\n self.instructionPointer += 2\n\n elif instruction == 4:\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n\n self.instructionPointer += 2\n return a\n\n elif instruction == 5:\n if a != 0:\n self.instructionPointer = b\n else:\n self.instructionPointer += 3\n\n elif instruction == 6:\n if a == 0:\n self.instructionPointer = b\n else:\n self.instructionPointer += 3\n\n elif instruction == 7:\n if a < b:\n self.StoreLocation(c, 1)\n else:\n self.StoreLocation(c, 0)\n self.instructionPointer += 4\n\n elif instruction == 8:\n if a == b:\n self.StoreLocation(c, 1)\n else:\n self.StoreLocation(c, 0)\n self.instructionPointer += 4\n\n elif instruction == 9:\n\n a = self.AccessLocation(self.instructionPointer + 1)\n if aMode == 0:\n a = self.AccessLocation(a)\n if aMode == 2:\n a = self.AccessLocation(a + self.relativeBase)\n\n self.relativeBase += a\n self.instructionPointer += 2\n\n elif instruction == 99:\n self.instructionPointer = len(self.runningCode) + 1\n return None\n else:\n print (\"WTF\")\n return None\n\n return None\n\ndef Render(screenMatrix):\n finalString = \"\"\n for row in range(0, len(screenMatrix)):\n for column in range(0, len(screenMatrix[i])):\n finalString += str(screenMatrix[row][column])\n finalString += \"\\n\"\n print (finalString, end = \"\\r\")\n\ndef GetBallX(screenMatrix):\n for row in range(0, len(screenMatrix)):\n for column in range(0, len(screenMatrix[i])):\n if screenMatrix[row][column] == 4:\n return column\n return 0\n\ndef GetPadX(screenMatrix):\n for row in range(0, len(screenMatrix)):\n for column in range(0, len(screenMatrix[i])):\n if screenMatrix[row][column] == 3:\n return column\n return 0\n\ninputFile = open(\"input.txt\", \"r\")\n\ncode = [int(x) for x in inputFile.read().split(\",\")]\ncomputer = IntCodeComputer(code)\n\nscreenMatrix = [0] * 24\nfor i in range(0, len(screenMatrix)):\n screenMatrix[i] = [0] * 42\n\ncond = True\nwhile cond:\n\n result1 = computer.Run([], False)\n if result1 != None:\n result2 = computer.Run([], False)\n result3 = computer.Run([], False)\n\n screenMatrix[result2][result1] = result3\n\n else:\n cond = False\n\ncounter = 0;\n\nfor i in range(0, len(screenMatrix)):\n for j in range(0, len(screenMatrix[i])):\n if screenMatrix[i][j] == 2:\n counter += 1\n\nprint (counter)\n\ncode[0] = 2\n\ncomputer = IntCodeComputer(code)\n\nscreenMatrix = [0] * 24\nfor i in range(0, len(screenMatrix)):\n screenMatrix[i] = [0] * 42\n\ncond = True\niter = 0\nscore = 0\nwhile cond:\n\n cond2 = True\n exec = 0\n\n if iter >= len(screenMatrix) * len(screenMatrix[0]):\n sleep(0.001)\n\n while cond2 or iter < len(screenMatrix) * len(screenMatrix[0]):\n cond2 = True\n exec += 1\n\n inp = 0\n ballX = GetBallX(screenMatrix)\n padX = GetPadX(screenMatrix)\n if padX == ballX:\n inp = 0\n elif padX > ballX:\n inp = -1\n else:\n inp = 1\n\n result1 = computer.Run([inp], False)\n if result1 != None:\n result2 = computer.Run([inp], False)\n result3 = computer.Run([inp], False)\n\n if result1 == -1 and result2 == 0:\n score = result3\n else:\n screenMatrix[result2][result1] = result3\n \n if result3 == 4 or exec >= 10:\n cond2 = False\n\n else:\n cond = False\n break\n\n Render(screenMatrix)\n\n iter += 1\n\nprint(score)\ninputFile.close()",
"step-ids": [
5,
8,
9,
10,
12
]
}
|
[
5,
8,
9,
10,
12
] |
class Handlers():
change_store = "/change_store"
change_status = "/change_status"
mail = "/mail"
get_status = "/get_status"
create_order = "/create_order"
ask_store = "/ask_store"
check = "/check"
test = "/test"
|
normal
|
{
"blob_id": "32e3eed2e279706bca2925d3d9d897a928243b4c",
"index": 4518,
"step-1": "<mask token>\n",
"step-2": "class Handlers:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "class Handlers:\n change_store = '/change_store'\n change_status = '/change_status'\n mail = '/mail'\n get_status = '/get_status'\n create_order = '/create_order'\n ask_store = '/ask_store'\n check = '/check'\n test = '/test'\n",
"step-4": "class Handlers():\n change_store = \"/change_store\"\n change_status = \"/change_status\"\n mail = \"/mail\"\n get_status = \"/get_status\"\n create_order = \"/create_order\"\n ask_store = \"/ask_store\"\n check = \"/check\"\n test = \"/test\"\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.parametrize('n, result', ASSERTIONS)
def test_flatten_me(n, result):
"""Test flatten_me() for proper output in test cases."""
from flatten_me import flatten_me
assert flatten_me(n) == result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ASSERTIONS = [[[1, [2, 3], 4], [1, 2, 3, 4]], [[['a', 'b'], 'c', ['d']], [
'a', 'b', 'c', 'd']], [['!', '?'], ['!', '?']], [[[True, False], ['!'],
['?'], [71, '@']], [True, False, '!', '?', 71, '@']]]
@pytest.mark.parametrize('n, result', ASSERTIONS)
def test_flatten_me(n, result):
"""Test flatten_me() for proper output in test cases."""
from flatten_me import flatten_me
assert flatten_me(n) == result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pytest
ASSERTIONS = [[[1, [2, 3], 4], [1, 2, 3, 4]], [[['a', 'b'], 'c', ['d']], [
'a', 'b', 'c', 'd']], [['!', '?'], ['!', '?']], [[[True, False], ['!'],
['?'], [71, '@']], [True, False, '!', '?', 71, '@']]]
@pytest.mark.parametrize('n, result', ASSERTIONS)
def test_flatten_me(n, result):
"""Test flatten_me() for proper output in test cases."""
from flatten_me import flatten_me
assert flatten_me(n) == result
<|reserved_special_token_1|>
"""Tests for flatten_me.flatten_me."""
import pytest
ASSERTIONS = [
[[1, [2, 3], 4], [1, 2, 3, 4]],
[[['a', 'b'], 'c', ['d']], ['a', 'b', 'c', 'd']],
[['!', '?'], ['!', '?']],
[[[True, False], ['!'], ['?'], [71, '@']], [True, False, '!', '?', 71, '@']]
]
@pytest.mark.parametrize("n, result", ASSERTIONS)
def test_flatten_me(n, result):
"""Test flatten_me() for proper output in test cases."""
from flatten_me import flatten_me
assert flatten_me(n) == result
|
flexible
|
{
"blob_id": "c233ce4e14e9a59a9fb0f29589ced947efeb73a9",
"index": 3120,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.mark.parametrize('n, result', ASSERTIONS)\ndef test_flatten_me(n, result):\n \"\"\"Test flatten_me() for proper output in test cases.\"\"\"\n from flatten_me import flatten_me\n assert flatten_me(n) == result\n",
"step-3": "<mask token>\nASSERTIONS = [[[1, [2, 3], 4], [1, 2, 3, 4]], [[['a', 'b'], 'c', ['d']], [\n 'a', 'b', 'c', 'd']], [['!', '?'], ['!', '?']], [[[True, False], ['!'],\n ['?'], [71, '@']], [True, False, '!', '?', 71, '@']]]\n\n\n@pytest.mark.parametrize('n, result', ASSERTIONS)\ndef test_flatten_me(n, result):\n \"\"\"Test flatten_me() for proper output in test cases.\"\"\"\n from flatten_me import flatten_me\n assert flatten_me(n) == result\n",
"step-4": "<mask token>\nimport pytest\nASSERTIONS = [[[1, [2, 3], 4], [1, 2, 3, 4]], [[['a', 'b'], 'c', ['d']], [\n 'a', 'b', 'c', 'd']], [['!', '?'], ['!', '?']], [[[True, False], ['!'],\n ['?'], [71, '@']], [True, False, '!', '?', 71, '@']]]\n\n\n@pytest.mark.parametrize('n, result', ASSERTIONS)\ndef test_flatten_me(n, result):\n \"\"\"Test flatten_me() for proper output in test cases.\"\"\"\n from flatten_me import flatten_me\n assert flatten_me(n) == result\n",
"step-5": "\"\"\"Tests for flatten_me.flatten_me.\"\"\"\nimport pytest\n\n\nASSERTIONS = [\n [[1, [2, 3], 4], [1, 2, 3, 4]],\n [[['a', 'b'], 'c', ['d']], ['a', 'b', 'c', 'd']],\n [['!', '?'], ['!', '?']],\n [[[True, False], ['!'], ['?'], [71, '@']], [True, False, '!', '?', 71, '@']]\n]\n\n\n@pytest.mark.parametrize(\"n, result\", ASSERTIONS)\ndef test_flatten_me(n, result):\n \"\"\"Test flatten_me() for proper output in test cases.\"\"\"\n from flatten_me import flatten_me\n assert flatten_me(n) == result\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def rotate_pdf_pages(filename, rotation, output_name):
pdf_reader = PdfFileReader('{}.pdf'.format(filename))
pdf_writer = PdfFileWriter()
for page in range(pdf_reader.getNumPages()):
if rotation == '1':
rotated_page = pdf_reader.getPage(page).rotateClockwise(90)
if rotation == '2':
rotated_page = pdf_reader.getPage(page).rotateClockwise(180)
if rotation == '3':
rotated_page = pdf_reader.getPage(page).rotateCounterClockwise(90)
pdf_writer.addPage(rotated_page)
with open('{}.pdf'.format(output_name), 'wb') as out:
pdf_writer.write(out)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def rotate_pdf(working_dir, filename, rotation):
os.chdir(working_dir)
output_name = 'pages'
rotate_pdf_pages(filename, rotation, output_name)
memory_file = BytesIO()
with open('{}.pdf'.format(output_name), 'rb') as fin:
memory_file = BytesIO(fin.read())
memory_file.seek(0)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
return send_file(memory_file, attachment_filename='{}.pdf'.format(
output_name), as_attachment=True)
def rotate_pdf_pages(filename, rotation, output_name):
pdf_reader = PdfFileReader('{}.pdf'.format(filename))
pdf_writer = PdfFileWriter()
for page in range(pdf_reader.getNumPages()):
if rotation == '1':
rotated_page = pdf_reader.getPage(page).rotateClockwise(90)
if rotation == '2':
rotated_page = pdf_reader.getPage(page).rotateClockwise(180)
if rotation == '3':
rotated_page = pdf_reader.getPage(page).rotateCounterClockwise(90)
pdf_writer.addPage(rotated_page)
with open('{}.pdf'.format(output_name), 'wb') as out:
pdf_writer.write(out)
<|reserved_special_token_1|>
import os, sys
from io import BytesIO
from pathlib import Path
from flask_config import app
from flask import send_file
from PyPDF2 import PdfFileReader, PdfFileWriter
def rotate_pdf(working_dir, filename, rotation):
os.chdir(working_dir)
output_name = 'pages'
rotate_pdf_pages(filename, rotation, output_name)
memory_file = BytesIO()
with open('{}.pdf'.format(output_name), 'rb') as fin:
memory_file = BytesIO(fin.read())
memory_file.seek(0)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
return send_file(memory_file, attachment_filename='{}.pdf'.format(
output_name), as_attachment=True)
def rotate_pdf_pages(filename, rotation, output_name):
pdf_reader = PdfFileReader('{}.pdf'.format(filename))
pdf_writer = PdfFileWriter()
for page in range(pdf_reader.getNumPages()):
if rotation == '1':
rotated_page = pdf_reader.getPage(page).rotateClockwise(90)
if rotation == '2':
rotated_page = pdf_reader.getPage(page).rotateClockwise(180)
if rotation == '3':
rotated_page = pdf_reader.getPage(page).rotateCounterClockwise(90)
pdf_writer.addPage(rotated_page)
with open('{}.pdf'.format(output_name), 'wb') as out:
pdf_writer.write(out)
<|reserved_special_token_1|>
#!/use/bin/python
import os, sys
from io import BytesIO
from pathlib import Path
from flask_config import app
from flask import send_file
from PyPDF2 import PdfFileReader, PdfFileWriter
def rotate_pdf(working_dir, filename, rotation):
os.chdir(working_dir)
output_name = 'pages'
rotate_pdf_pages(filename, rotation, output_name)
memory_file = BytesIO()
with open('{}.pdf'.format(output_name), 'rb') as fin:
memory_file = BytesIO(fin.read())
memory_file.seek(0)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
return send_file(memory_file, attachment_filename='{}.pdf'.format(output_name), as_attachment=True)
def rotate_pdf_pages(filename, rotation, output_name):
pdf_reader = PdfFileReader('{}.pdf'.format(filename))
pdf_writer = PdfFileWriter()
for page in range(pdf_reader.getNumPages()):
if rotation == '1':
rotated_page = pdf_reader.getPage(page).rotateClockwise(90)
if rotation == '2':
rotated_page = pdf_reader.getPage(page).rotateClockwise(180)
if rotation == '3':
rotated_page = pdf_reader.getPage(page).rotateCounterClockwise(90)
pdf_writer.addPage(rotated_page)
with open('{}.pdf'.format(output_name), 'wb') as out:
pdf_writer.write(out)
|
flexible
|
{
"blob_id": "624027373f53f62ededc40bfc859f28b5a83ca04",
"index": 3266,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef rotate_pdf_pages(filename, rotation, output_name):\n pdf_reader = PdfFileReader('{}.pdf'.format(filename))\n pdf_writer = PdfFileWriter()\n for page in range(pdf_reader.getNumPages()):\n if rotation == '1':\n rotated_page = pdf_reader.getPage(page).rotateClockwise(90)\n if rotation == '2':\n rotated_page = pdf_reader.getPage(page).rotateClockwise(180)\n if rotation == '3':\n rotated_page = pdf_reader.getPage(page).rotateCounterClockwise(90)\n pdf_writer.addPage(rotated_page)\n with open('{}.pdf'.format(output_name), 'wb') as out:\n pdf_writer.write(out)\n",
"step-3": "<mask token>\n\n\ndef rotate_pdf(working_dir, filename, rotation):\n os.chdir(working_dir)\n output_name = 'pages'\n rotate_pdf_pages(filename, rotation, output_name)\n memory_file = BytesIO()\n with open('{}.pdf'.format(output_name), 'rb') as fin:\n memory_file = BytesIO(fin.read())\n memory_file.seek(0)\n os.chdir(os.path.dirname(os.path.realpath(__file__)))\n return send_file(memory_file, attachment_filename='{}.pdf'.format(\n output_name), as_attachment=True)\n\n\ndef rotate_pdf_pages(filename, rotation, output_name):\n pdf_reader = PdfFileReader('{}.pdf'.format(filename))\n pdf_writer = PdfFileWriter()\n for page in range(pdf_reader.getNumPages()):\n if rotation == '1':\n rotated_page = pdf_reader.getPage(page).rotateClockwise(90)\n if rotation == '2':\n rotated_page = pdf_reader.getPage(page).rotateClockwise(180)\n if rotation == '3':\n rotated_page = pdf_reader.getPage(page).rotateCounterClockwise(90)\n pdf_writer.addPage(rotated_page)\n with open('{}.pdf'.format(output_name), 'wb') as out:\n pdf_writer.write(out)\n",
"step-4": "import os, sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom flask_config import app\nfrom flask import send_file\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\n\n\ndef rotate_pdf(working_dir, filename, rotation):\n os.chdir(working_dir)\n output_name = 'pages'\n rotate_pdf_pages(filename, rotation, output_name)\n memory_file = BytesIO()\n with open('{}.pdf'.format(output_name), 'rb') as fin:\n memory_file = BytesIO(fin.read())\n memory_file.seek(0)\n os.chdir(os.path.dirname(os.path.realpath(__file__)))\n return send_file(memory_file, attachment_filename='{}.pdf'.format(\n output_name), as_attachment=True)\n\n\ndef rotate_pdf_pages(filename, rotation, output_name):\n pdf_reader = PdfFileReader('{}.pdf'.format(filename))\n pdf_writer = PdfFileWriter()\n for page in range(pdf_reader.getNumPages()):\n if rotation == '1':\n rotated_page = pdf_reader.getPage(page).rotateClockwise(90)\n if rotation == '2':\n rotated_page = pdf_reader.getPage(page).rotateClockwise(180)\n if rotation == '3':\n rotated_page = pdf_reader.getPage(page).rotateCounterClockwise(90)\n pdf_writer.addPage(rotated_page)\n with open('{}.pdf'.format(output_name), 'wb') as out:\n pdf_writer.write(out)\n",
"step-5": "#!/use/bin/python\n\nimport os, sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom flask_config import app\nfrom flask import send_file\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\n\ndef rotate_pdf(working_dir, filename, rotation):\n os.chdir(working_dir)\n output_name = 'pages'\n rotate_pdf_pages(filename, rotation, output_name)\n \n memory_file = BytesIO()\n with open('{}.pdf'.format(output_name), 'rb') as fin:\n memory_file = BytesIO(fin.read())\n memory_file.seek(0)\n os.chdir(os.path.dirname(os.path.realpath(__file__)))\n return send_file(memory_file, attachment_filename='{}.pdf'.format(output_name), as_attachment=True)\n\ndef rotate_pdf_pages(filename, rotation, output_name):\n pdf_reader = PdfFileReader('{}.pdf'.format(filename))\n pdf_writer = PdfFileWriter()\n \n for page in range(pdf_reader.getNumPages()):\n if rotation == '1':\n rotated_page = pdf_reader.getPage(page).rotateClockwise(90)\n if rotation == '2':\n rotated_page = pdf_reader.getPage(page).rotateClockwise(180)\n if rotation == '3':\n rotated_page = pdf_reader.getPage(page).rotateCounterClockwise(90)\n pdf_writer.addPage(rotated_page)\n \n with open('{}.pdf'.format(output_name), 'wb') as out:\n pdf_writer.write(out)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.conf import settings
from django.db import migrations, models
import django_otp.plugins.otp_totp.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TOTPDevice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='The human-readable name of this device.', max_length=64)),
('confirmed', models.BooleanField(default=True, help_text='Is this device ready for use?')),
('key', models.CharField(default=django_otp.plugins.otp_totp.models.default_key, help_text='A hex-encoded secret key of up to 40 bytes.', max_length=80, validators=[django_otp.plugins.otp_totp.models.key_validator])),
('step', models.PositiveSmallIntegerField(default=30, help_text='The time step in seconds.')),
('t0', models.BigIntegerField(default=0, help_text='The Unix time at which to begin counting steps.')),
('digits', models.PositiveSmallIntegerField(default=6, help_text='The number of digits to expect in a token.', choices=[(6, 6), (8, 8)])),
('tolerance', models.PositiveSmallIntegerField(default=1, help_text='The number of time steps in the past or future to allow.')),
('drift', models.SmallIntegerField(default=0, help_text='The number of time steps the prover is known to deviate from our clock.')),
('last_t', models.BigIntegerField(default=-1, help_text='The t value of the latest verified token. The next token must be at a higher time step.')),
('user', models.ForeignKey(help_text='The user that this device belongs to.', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'abstract': False,
'verbose_name': 'TOTP device',
},
bases=(models.Model,),
),
]
|
normal
|
{
"blob_id": "2e448176a755828e5c7c90e4224102a285098460",
"index": 4852,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='TOTPDevice', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('name', models.CharField(help_text=\n 'The human-readable name of this device.', max_length=64)), (\n 'confirmed', models.BooleanField(default=True, help_text=\n 'Is this device ready for use?')), ('key', models.CharField(default\n =django_otp.plugins.otp_totp.models.default_key, help_text=\n 'A hex-encoded secret key of up to 40 bytes.', max_length=80,\n validators=[django_otp.plugins.otp_totp.models.key_validator])), (\n 'step', models.PositiveSmallIntegerField(default=30, help_text=\n 'The time step in seconds.')), ('t0', models.BigIntegerField(\n default=0, help_text=\n 'The Unix time at which to begin counting steps.')), ('digits',\n models.PositiveSmallIntegerField(default=6, help_text=\n 'The number of digits to expect in a token.', choices=[(6, 6), (8, \n 8)])), ('tolerance', models.PositiveSmallIntegerField(default=1,\n help_text=\n 'The number of time steps in the past or future to allow.')), (\n 'drift', models.SmallIntegerField(default=0, help_text=\n 'The number of time steps the prover is known to deviate from our clock.'\n )), ('last_t', models.BigIntegerField(default=-1, help_text=\n 'The t value of the latest verified token. The next token must be at a higher time step.'\n )), ('user', models.ForeignKey(help_text=\n 'The user that this device belongs to.', to=settings.\n AUTH_USER_MODEL, on_delete=models.CASCADE))], options={'abstract': \n False, 'verbose_name': 'TOTP device'}, bases=(models.Model,))]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django_otp.plugins.otp_totp.models\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='TOTPDevice', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('name', models.CharField(help_text=\n 'The human-readable name of this device.', max_length=64)), (\n 'confirmed', models.BooleanField(default=True, help_text=\n 'Is this device ready for use?')), ('key', models.CharField(default\n =django_otp.plugins.otp_totp.models.default_key, help_text=\n 'A hex-encoded secret key of up to 40 bytes.', max_length=80,\n validators=[django_otp.plugins.otp_totp.models.key_validator])), (\n 'step', models.PositiveSmallIntegerField(default=30, help_text=\n 'The time step in seconds.')), ('t0', models.BigIntegerField(\n default=0, help_text=\n 'The Unix time at which to begin counting steps.')), ('digits',\n models.PositiveSmallIntegerField(default=6, help_text=\n 'The number of digits to expect in a token.', choices=[(6, 6), (8, \n 8)])), ('tolerance', models.PositiveSmallIntegerField(default=1,\n help_text=\n 'The number of time steps in the past or future to allow.')), (\n 'drift', models.SmallIntegerField(default=0, help_text=\n 'The number of time steps the prover is known to deviate from our clock.'\n )), ('last_t', models.BigIntegerField(default=-1, help_text=\n 'The t value of the latest verified token. The next token must be at a higher time step.'\n )), ('user', models.ForeignKey(help_text=\n 'The user that this device belongs to.', to=settings.\n AUTH_USER_MODEL, on_delete=models.CASCADE))], options={'abstract': \n False, 'verbose_name': 'TOTP device'}, bases=(models.Model,))]\n",
"step-5": "from django.conf import settings\nfrom django.db import migrations, models\n\nimport django_otp.plugins.otp_totp.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='TOTPDevice',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(help_text='The human-readable name of this device.', max_length=64)),\n ('confirmed', models.BooleanField(default=True, help_text='Is this device ready for use?')),\n ('key', models.CharField(default=django_otp.plugins.otp_totp.models.default_key, help_text='A hex-encoded secret key of up to 40 bytes.', max_length=80, validators=[django_otp.plugins.otp_totp.models.key_validator])),\n ('step', models.PositiveSmallIntegerField(default=30, help_text='The time step in seconds.')),\n ('t0', models.BigIntegerField(default=0, help_text='The Unix time at which to begin counting steps.')),\n ('digits', models.PositiveSmallIntegerField(default=6, help_text='The number of digits to expect in a token.', choices=[(6, 6), (8, 8)])),\n ('tolerance', models.PositiveSmallIntegerField(default=1, help_text='The number of time steps in the past or future to allow.')),\n ('drift', models.SmallIntegerField(default=0, help_text='The number of time steps the prover is known to deviate from our clock.')),\n ('last_t', models.BigIntegerField(default=-1, help_text='The t value of the latest verified token. The next token must be at a higher time step.')),\n ('user', models.ForeignKey(help_text='The user that this device belongs to.', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),\n ],\n options={\n 'abstract': False,\n 'verbose_name': 'TOTP device',\n },\n bases=(models.Model,),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(wikipedia.summary(input_))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
input_ = input('Type in your question ')
print(wikipedia.summary(input_))
<|reserved_special_token_1|>
import wikipedia
input_ = input('Type in your question ')
print(wikipedia.summary(input_))
<|reserved_special_token_1|>
import wikipedia
input_ = input("Type in your question ")
print(wikipedia.summary(input_))
|
flexible
|
{
"blob_id": "5eb5388ffe7a7c880d8fcfaa137c2c9a133a0636",
"index": 713,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(wikipedia.summary(input_))\n",
"step-3": "<mask token>\ninput_ = input('Type in your question ')\nprint(wikipedia.summary(input_))\n",
"step-4": "import wikipedia\ninput_ = input('Type in your question ')\nprint(wikipedia.summary(input_))\n",
"step-5": "import wikipedia\ninput_ = input(\"Type in your question \")\nprint(wikipedia.summary(input_))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
n = input('Right or left? ')
if n == 'right':
right(60)
forward(100)
elif n == 'left':
left(60)
forward(100)
<|reserved_special_token_1|>
from turtle import *
while True:
n = input('Right or left? ')
if n == 'right':
right(60)
forward(100)
elif n == 'left':
left(60)
forward(100)
<|reserved_special_token_1|>
from turtle import *
while True:
n=input("Right or left? ")
if n == 'right':
right(60)
forward(100)
elif n == 'left':
left(60)
forward(100)
|
flexible
|
{
"blob_id": "6f698196e9391d73bd99cda0a098a5bf7a3832ff",
"index": 963,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n n = input('Right or left? ')\n if n == 'right':\n right(60)\n forward(100)\n elif n == 'left':\n left(60)\n forward(100)\n",
"step-3": "from turtle import *\nwhile True:\n n = input('Right or left? ')\n if n == 'right':\n right(60)\n forward(100)\n elif n == 'left':\n left(60)\n forward(100)\n",
"step-4": "from turtle import *\nwhile True:\n n=input(\"Right or left? \")\n\n if n == 'right':\n right(60)\n forward(100)\n elif n == 'left':\n left(60)\n forward(100)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.shtiker.CogPageGlobals
COG_QUOTAS = ((30, 25, 20, 15, 10, 5, 2, 1), (45, 40, 35, 30, 25, 20, 15, 10))
COG_UNSEEN = 1
COG_BATTLED = 2
COG_DEFEATED = 3
COG_COMPLETE1 = 4
COG_COMPLETE2 = 5
|
normal
|
{
"blob_id": "fdb680f12dfb4b29f25cfe4f7af80469dc4294cf",
"index": 2437,
"step-1": "<mask token>\n",
"step-2": "COG_QUOTAS = (30, 25, 20, 15, 10, 5, 2, 1), (45, 40, 35, 30, 25, 20, 15, 10)\nCOG_UNSEEN = 1\nCOG_BATTLED = 2\nCOG_DEFEATED = 3\nCOG_COMPLETE1 = 4\nCOG_COMPLETE2 = 5\n",
"step-3": "# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.shtiker.CogPageGlobals\r\nCOG_QUOTAS = ((30, 25, 20, 15, 10, 5, 2, 1), (45, 40, 35, 30, 25, 20, 15, 10))\r\nCOG_UNSEEN = 1\r\nCOG_BATTLED = 2\r\nCOG_DEFEATED = 3\r\nCOG_COMPLETE1 = 4\r\nCOG_COMPLETE2 = 5",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class First(BaseGame):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def has_won(self, draws):
return draws[0] in (1, 2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class First(BaseGame):
key = 'F'
code = 'FIRST'
short_description = 'Vinci se esce 1 o 2. x2.8'
long_description = (
'Si lancia un unico dado, se esce 1 o 2 vinci 2.8 volte quello che hai puntato.'
)
min_bet = 20
multiplier = 2.8
def has_won(self, draws):
return draws[0] in (1, 2)
<|reserved_special_token_1|>
from game import BaseGame
class First(BaseGame):
key = 'F'
code = 'FIRST'
short_description = 'Vinci se esce 1 o 2. x2.8'
long_description = (
'Si lancia un unico dado, se esce 1 o 2 vinci 2.8 volte quello che hai puntato.'
)
min_bet = 20
multiplier = 2.8
def has_won(self, draws):
return draws[0] in (1, 2)
<|reserved_special_token_1|>
from game import BaseGame
class First(BaseGame):
key = 'F'
code = 'FIRST'
short_description = 'Vinci se esce 1 o 2. x2.8'
long_description = (
'Si lancia un unico dado, se esce 1 o 2 vinci 2.8 volte quello che hai'
' puntato.')
min_bet = 20
multiplier = 2.8
def has_won(self, draws):
return draws[0] in (1, 2)
|
flexible
|
{
"blob_id": "81fa3129d971fe8296a89a7b772d61ff50a8b9f7",
"index": 9284,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass First(BaseGame):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def has_won(self, draws):\n return draws[0] in (1, 2)\n",
"step-3": "<mask token>\n\n\nclass First(BaseGame):\n key = 'F'\n code = 'FIRST'\n short_description = 'Vinci se esce 1 o 2. x2.8'\n long_description = (\n 'Si lancia un unico dado, se esce 1 o 2 vinci 2.8 volte quello che hai puntato.'\n )\n min_bet = 20\n multiplier = 2.8\n\n def has_won(self, draws):\n return draws[0] in (1, 2)\n",
"step-4": "from game import BaseGame\n\n\nclass First(BaseGame):\n key = 'F'\n code = 'FIRST'\n short_description = 'Vinci se esce 1 o 2. x2.8'\n long_description = (\n 'Si lancia un unico dado, se esce 1 o 2 vinci 2.8 volte quello che hai puntato.'\n )\n min_bet = 20\n multiplier = 2.8\n\n def has_won(self, draws):\n return draws[0] in (1, 2)\n",
"step-5": "from game import BaseGame\n\n\nclass First(BaseGame):\n key = 'F'\n code = 'FIRST'\n short_description = 'Vinci se esce 1 o 2. x2.8'\n long_description = (\n 'Si lancia un unico dado, se esce 1 o 2 vinci 2.8 volte quello che hai'\n ' puntato.')\n min_bet = 20\n multiplier = 2.8\n\n def has_won(self, draws):\n return draws[0] in (1, 2)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Piece(Source, PageNumbersMixin):
"""A piece (e.g., essay)."""
type = models.CharField(verbose_name=_('piece type'), max_length=
TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])
def __html__(self) ->str:
"""Return the piece's citation HTML string."""
components = [self.attributee_html, f'"{self.linked_title}"', self.
date.string if self.date else '']
return self.components_to_html(components)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TYPE_MAX_LENGTH: int = 10
class Piece(Source, PageNumbersMixin):
"""A piece (e.g., essay)."""
type = models.CharField(verbose_name=_('piece type'), max_length=
TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])
def __html__(self) ->str:
"""Return the piece's citation HTML string."""
components = [self.attributee_html, f'"{self.linked_title}"', self.
date.string if self.date else '']
return self.components_to_html(components)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
PIECE_TYPES = ('essay', 'Essay'),
TYPE_MAX_LENGTH: int = 10
class Piece(Source, PageNumbersMixin):
"""A piece (e.g., essay)."""
type = models.CharField(verbose_name=_('piece type'), max_length=
TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])
def __html__(self) ->str:
"""Return the piece's citation HTML string."""
components = [self.attributee_html, f'"{self.linked_title}"', self.
date.string if self.date else '']
return self.components_to_html(components)
<|reserved_special_token_1|>
from django.db import models
from django.utils.translation import ugettext_lazy as _
from apps.sources.models.mixins.page_numbers import PageNumbersMixin
from apps.sources.models.source import Source
PIECE_TYPES = ('essay', 'Essay'),
TYPE_MAX_LENGTH: int = 10
class Piece(Source, PageNumbersMixin):
"""A piece (e.g., essay)."""
type = models.CharField(verbose_name=_('piece type'), max_length=
TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])
def __html__(self) ->str:
"""Return the piece's citation HTML string."""
components = [self.attributee_html, f'"{self.linked_title}"', self.
date.string if self.date else '']
return self.components_to_html(components)
<|reserved_special_token_1|>
from django.db import models
from django.utils.translation import ugettext_lazy as _
from apps.sources.models.mixins.page_numbers import PageNumbersMixin
from apps.sources.models.source import Source
PIECE_TYPES = (('essay', 'Essay'),)
TYPE_MAX_LENGTH: int = 10
class Piece(Source, PageNumbersMixin):
"""A piece (e.g., essay)."""
type = models.CharField(
verbose_name=_('piece type'),
max_length=TYPE_MAX_LENGTH,
choices=PIECE_TYPES,
default=PIECE_TYPES[0][0],
)
def __html__(self) -> str:
"""Return the piece's citation HTML string."""
components = [
self.attributee_html,
f'"{self.linked_title}"',
self.date.string if self.date else '',
]
return self.components_to_html(components)
|
flexible
|
{
"blob_id": "30c24b9a4738c1952fc5d36a4bc36d8d3576ed3b",
"index": 7201,
"step-1": "<mask token>\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n type = models.CharField(verbose_name=_('piece type'), max_length=\n TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])\n\n def __html__(self) ->str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [self.attributee_html, f'\"{self.linked_title}\"', self.\n date.string if self.date else '']\n return self.components_to_html(components)\n",
"step-2": "<mask token>\nTYPE_MAX_LENGTH: int = 10\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n type = models.CharField(verbose_name=_('piece type'), max_length=\n TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])\n\n def __html__(self) ->str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [self.attributee_html, f'\"{self.linked_title}\"', self.\n date.string if self.date else '']\n return self.components_to_html(components)\n",
"step-3": "<mask token>\nPIECE_TYPES = ('essay', 'Essay'),\nTYPE_MAX_LENGTH: int = 10\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n type = models.CharField(verbose_name=_('piece type'), max_length=\n TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])\n\n def __html__(self) ->str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [self.attributee_html, f'\"{self.linked_title}\"', self.\n date.string if self.date else '']\n return self.components_to_html(components)\n",
"step-4": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom apps.sources.models.mixins.page_numbers import PageNumbersMixin\nfrom apps.sources.models.source import Source\nPIECE_TYPES = ('essay', 'Essay'),\nTYPE_MAX_LENGTH: int = 10\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n type = models.CharField(verbose_name=_('piece type'), max_length=\n TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])\n\n def __html__(self) ->str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [self.attributee_html, f'\"{self.linked_title}\"', self.\n date.string if self.date else '']\n return self.components_to_html(components)\n",
"step-5": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom apps.sources.models.mixins.page_numbers import PageNumbersMixin\nfrom apps.sources.models.source import Source\n\nPIECE_TYPES = (('essay', 'Essay'),)\nTYPE_MAX_LENGTH: int = 10\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n\n type = models.CharField(\n verbose_name=_('piece type'),\n max_length=TYPE_MAX_LENGTH,\n choices=PIECE_TYPES,\n default=PIECE_TYPES[0][0],\n )\n\n def __html__(self) -> str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [\n self.attributee_html,\n f'\"{self.linked_title}\"',\n self.date.string if self.date else '',\n ]\n return self.components_to_html(components)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import os
import random
import cv2
import numpy as np
from keras.preprocessing.image import img_to_array
import numpy as np
import keras
from scipy import ndimage, misc
def preprocess_image(img):
img = img.astype(np.uint8)
(channel_b, channel_g, channel_r) = cv2.split(img)
result = ndimage.maximum_filter(channel_g, size=5)
# ret3,result = cv2.threshold(result,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
ret,result = cv2.threshold(channel_g,120,255,cv2.THRESH_BINARY_INV)
clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(11, 11))
clahe_g = clahe.apply(channel_g)
image = np.zeros((img.shape[0], img.shape[1], img.shape[2]))
image[:,:,0] = channel_g
image[:,:,1] = clahe_g
image[:,:,2] = result
image = image.astype(np.uint8)
image = img_to_array(image)
return image
def preprocess_mask(img):
img = img.astype(np.uint8)
return img[:,:,0].reshape((256,256,1))
# img=cv2.imread("/home/team6/Project/MiMM_SBILab/patches/train/images/0/1015.jpg")
# img_result=preprocess_image(img)
# cv2.imwrite("preprocess.jpg",img_result)
|
normal
|
{
"blob_id": "586d39556d2922a288a2bef3bcffbc6f9e3dc39d",
"index": 6707,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef preprocess_image(img):\n img = img.astype(np.uint8)\n channel_b, channel_g, channel_r = cv2.split(img)\n result = ndimage.maximum_filter(channel_g, size=5)\n ret, result = cv2.threshold(channel_g, 120, 255, cv2.THRESH_BINARY_INV)\n clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(11, 11))\n clahe_g = clahe.apply(channel_g)\n image = np.zeros((img.shape[0], img.shape[1], img.shape[2]))\n image[:, :, 0] = channel_g\n image[:, :, 1] = clahe_g\n image[:, :, 2] = result\n image = image.astype(np.uint8)\n image = img_to_array(image)\n return image\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef preprocess_image(img):\n img = img.astype(np.uint8)\n channel_b, channel_g, channel_r = cv2.split(img)\n result = ndimage.maximum_filter(channel_g, size=5)\n ret, result = cv2.threshold(channel_g, 120, 255, cv2.THRESH_BINARY_INV)\n clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(11, 11))\n clahe_g = clahe.apply(channel_g)\n image = np.zeros((img.shape[0], img.shape[1], img.shape[2]))\n image[:, :, 0] = channel_g\n image[:, :, 1] = clahe_g\n image[:, :, 2] = result\n image = image.astype(np.uint8)\n image = img_to_array(image)\n return image\n\n\ndef preprocess_mask(img):\n img = img.astype(np.uint8)\n return img[:, :, 0].reshape((256, 256, 1))\n",
"step-4": "import os\nimport random\nimport cv2\nimport numpy as np\nfrom keras.preprocessing.image import img_to_array\nimport numpy as np\nimport keras\nfrom scipy import ndimage, misc\n\n\ndef preprocess_image(img):\n img = img.astype(np.uint8)\n channel_b, channel_g, channel_r = cv2.split(img)\n result = ndimage.maximum_filter(channel_g, size=5)\n ret, result = cv2.threshold(channel_g, 120, 255, cv2.THRESH_BINARY_INV)\n clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(11, 11))\n clahe_g = clahe.apply(channel_g)\n image = np.zeros((img.shape[0], img.shape[1], img.shape[2]))\n image[:, :, 0] = channel_g\n image[:, :, 1] = clahe_g\n image[:, :, 2] = result\n image = image.astype(np.uint8)\n image = img_to_array(image)\n return image\n\n\ndef preprocess_mask(img):\n img = img.astype(np.uint8)\n return img[:, :, 0].reshape((256, 256, 1))\n",
"step-5": "import os\nimport random\nimport cv2\nimport numpy as np\nfrom keras.preprocessing.image import img_to_array\nimport numpy as np\nimport keras\nfrom scipy import ndimage, misc\n\ndef preprocess_image(img):\n img = img.astype(np.uint8)\n (channel_b, channel_g, channel_r) = cv2.split(img)\n\n result = ndimage.maximum_filter(channel_g, size=5)\n # ret3,result = cv2.threshold(result,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n ret,result = cv2.threshold(channel_g,120,255,cv2.THRESH_BINARY_INV)\n\n clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(11, 11))\n clahe_g = clahe.apply(channel_g)\n\n image = np.zeros((img.shape[0], img.shape[1], img.shape[2]))\n\n image[:,:,0] = channel_g\n image[:,:,1] = clahe_g\n image[:,:,2] = result\n\n image = image.astype(np.uint8)\n\n image = img_to_array(image)\n\n return image\n\ndef preprocess_mask(img):\n img = img.astype(np.uint8)\n return img[:,:,0].reshape((256,256,1))\n\n\n# img=cv2.imread(\"/home/team6/Project/MiMM_SBILab/patches/train/images/0/1015.jpg\")\n# img_result=preprocess_image(img)\n# cv2.imwrite(\"preprocess.jpg\",img_result)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
warnings.filterwarnings('ignore',
'Your application has authenticated using end user credentials')
<|reserved_special_token_0|>
for exam in exams:
print('checking', exam)
exam_json = json.dumps(get_exam(exam=exam))
roster = get_roster(exam=exam)
flagged = set()
for email, _ in roster:
template_questions = extract_questions(json.loads(exam_json))
student_questions = list(extract_questions(scramble(email, json.
loads(exam_json), keep_data=True)))
student_question_lookup = {q['id']: q for q in student_questions}
for question in template_questions:
if question['id'] not in student_question_lookup:
continue
if question['type'] not in ['multiple_choice', 'select_all']:
continue
if question['id'] in flagged:
continue
for i, option in enumerate(question['options']):
option['index'] = i
s = lambda options: sorted(options, key=lambda q: q['text'])
for a, b in zip(s(question['options']), s(
student_question_lookup[question['id']]['options'])):
if a['index'] != b.get('index', a['index']):
flagged.add(question['id'])
continue
if flagged:
print(exam, flagged)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
warnings.filterwarnings('ignore',
'Your application has authenticated using end user credentials')
db = firestore.Client()
exams = [x.id for x in db.collection('exams').stream()]
for exam in exams:
print('checking', exam)
exam_json = json.dumps(get_exam(exam=exam))
roster = get_roster(exam=exam)
flagged = set()
for email, _ in roster:
template_questions = extract_questions(json.loads(exam_json))
student_questions = list(extract_questions(scramble(email, json.
loads(exam_json), keep_data=True)))
student_question_lookup = {q['id']: q for q in student_questions}
for question in template_questions:
if question['id'] not in student_question_lookup:
continue
if question['type'] not in ['multiple_choice', 'select_all']:
continue
if question['id'] in flagged:
continue
for i, option in enumerate(question['options']):
option['index'] = i
s = lambda options: sorted(options, key=lambda q: q['text'])
for a, b in zip(s(question['options']), s(
student_question_lookup[question['id']]['options'])):
if a['index'] != b.get('index', a['index']):
flagged.add(question['id'])
continue
if flagged:
print(exam, flagged)
<|reserved_special_token_1|>
import json
from examtool.api.database import get_exam, get_roster
from examtool.api.extract_questions import extract_questions
from examtool.api.scramble import scramble
from google.cloud import firestore
import warnings
warnings.filterwarnings('ignore',
'Your application has authenticated using end user credentials')
db = firestore.Client()
exams = [x.id for x in db.collection('exams').stream()]
for exam in exams:
print('checking', exam)
exam_json = json.dumps(get_exam(exam=exam))
roster = get_roster(exam=exam)
flagged = set()
for email, _ in roster:
template_questions = extract_questions(json.loads(exam_json))
student_questions = list(extract_questions(scramble(email, json.
loads(exam_json), keep_data=True)))
student_question_lookup = {q['id']: q for q in student_questions}
for question in template_questions:
if question['id'] not in student_question_lookup:
continue
if question['type'] not in ['multiple_choice', 'select_all']:
continue
if question['id'] in flagged:
continue
for i, option in enumerate(question['options']):
option['index'] = i
s = lambda options: sorted(options, key=lambda q: q['text'])
for a, b in zip(s(question['options']), s(
student_question_lookup[question['id']]['options'])):
if a['index'] != b.get('index', a['index']):
flagged.add(question['id'])
continue
if flagged:
print(exam, flagged)
<|reserved_special_token_1|>
import json
from examtool.api.database import get_exam, get_roster
from examtool.api.extract_questions import extract_questions
from examtool.api.scramble import scramble
from google.cloud import firestore
import warnings
warnings.filterwarnings("ignore", "Your application has authenticated using end user credentials")
db = firestore.Client()
exams = [x.id for x in db.collection("exams").stream()]
for exam in exams:
print("checking", exam)
exam_json = json.dumps(get_exam(exam=exam))
roster = get_roster(exam=exam)
flagged = set()
for email, _ in roster:
template_questions = extract_questions(json.loads(exam_json))
student_questions = list(
extract_questions(scramble(email, json.loads(exam_json), keep_data=True))
)
student_question_lookup = {q['id']: q for q in student_questions}
for question in template_questions:
if question["id"] not in student_question_lookup:
continue
if question["type"] not in ["multiple_choice", "select_all"]:
continue
if question["id"] in flagged:
continue
for i, option in enumerate(question["options"]):
option["index"] = i
s = lambda options: sorted(options, key=lambda q: q["text"])
for a, b in zip(s(question["options"]), s(student_question_lookup[question["id"]]["options"])):
if a["index"] != b.get("index", a["index"]):
flagged.add(question["id"])
continue
if flagged:
print(exam, flagged)
|
flexible
|
{
"blob_id": "b74c759b51fb6591477757e2ff54b545f225991c",
"index": 7470,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwarnings.filterwarnings('ignore',\n 'Your application has authenticated using end user credentials')\n<mask token>\nfor exam in exams:\n print('checking', exam)\n exam_json = json.dumps(get_exam(exam=exam))\n roster = get_roster(exam=exam)\n flagged = set()\n for email, _ in roster:\n template_questions = extract_questions(json.loads(exam_json))\n student_questions = list(extract_questions(scramble(email, json.\n loads(exam_json), keep_data=True)))\n student_question_lookup = {q['id']: q for q in student_questions}\n for question in template_questions:\n if question['id'] not in student_question_lookup:\n continue\n if question['type'] not in ['multiple_choice', 'select_all']:\n continue\n if question['id'] in flagged:\n continue\n for i, option in enumerate(question['options']):\n option['index'] = i\n s = lambda options: sorted(options, key=lambda q: q['text'])\n for a, b in zip(s(question['options']), s(\n student_question_lookup[question['id']]['options'])):\n if a['index'] != b.get('index', a['index']):\n flagged.add(question['id'])\n continue\n if flagged:\n print(exam, flagged)\n",
"step-3": "<mask token>\nwarnings.filterwarnings('ignore',\n 'Your application has authenticated using end user credentials')\ndb = firestore.Client()\nexams = [x.id for x in db.collection('exams').stream()]\nfor exam in exams:\n print('checking', exam)\n exam_json = json.dumps(get_exam(exam=exam))\n roster = get_roster(exam=exam)\n flagged = set()\n for email, _ in roster:\n template_questions = extract_questions(json.loads(exam_json))\n student_questions = list(extract_questions(scramble(email, json.\n loads(exam_json), keep_data=True)))\n student_question_lookup = {q['id']: q for q in student_questions}\n for question in template_questions:\n if question['id'] not in student_question_lookup:\n continue\n if question['type'] not in ['multiple_choice', 'select_all']:\n continue\n if question['id'] in flagged:\n continue\n for i, option in enumerate(question['options']):\n option['index'] = i\n s = lambda options: sorted(options, key=lambda q: q['text'])\n for a, b in zip(s(question['options']), s(\n student_question_lookup[question['id']]['options'])):\n if a['index'] != b.get('index', a['index']):\n flagged.add(question['id'])\n continue\n if flagged:\n print(exam, flagged)\n",
"step-4": "import json\nfrom examtool.api.database import get_exam, get_roster\nfrom examtool.api.extract_questions import extract_questions\nfrom examtool.api.scramble import scramble\nfrom google.cloud import firestore\nimport warnings\nwarnings.filterwarnings('ignore',\n 'Your application has authenticated using end user credentials')\ndb = firestore.Client()\nexams = [x.id for x in db.collection('exams').stream()]\nfor exam in exams:\n print('checking', exam)\n exam_json = json.dumps(get_exam(exam=exam))\n roster = get_roster(exam=exam)\n flagged = set()\n for email, _ in roster:\n template_questions = extract_questions(json.loads(exam_json))\n student_questions = list(extract_questions(scramble(email, json.\n loads(exam_json), keep_data=True)))\n student_question_lookup = {q['id']: q for q in student_questions}\n for question in template_questions:\n if question['id'] not in student_question_lookup:\n continue\n if question['type'] not in ['multiple_choice', 'select_all']:\n continue\n if question['id'] in flagged:\n continue\n for i, option in enumerate(question['options']):\n option['index'] = i\n s = lambda options: sorted(options, key=lambda q: q['text'])\n for a, b in zip(s(question['options']), s(\n student_question_lookup[question['id']]['options'])):\n if a['index'] != b.get('index', a['index']):\n flagged.add(question['id'])\n continue\n if flagged:\n print(exam, flagged)\n",
"step-5": "import json\n\nfrom examtool.api.database import get_exam, get_roster\nfrom examtool.api.extract_questions import extract_questions\nfrom examtool.api.scramble import scramble\nfrom google.cloud import firestore\nimport warnings\nwarnings.filterwarnings(\"ignore\", \"Your application has authenticated using end user credentials\")\n\n\ndb = firestore.Client()\nexams = [x.id for x in db.collection(\"exams\").stream()]\n\nfor exam in exams:\n print(\"checking\", exam)\n exam_json = json.dumps(get_exam(exam=exam))\n roster = get_roster(exam=exam)\n\n flagged = set()\n\n for email, _ in roster:\n template_questions = extract_questions(json.loads(exam_json))\n student_questions = list(\n extract_questions(scramble(email, json.loads(exam_json), keep_data=True))\n )\n student_question_lookup = {q['id']: q for q in student_questions}\n for question in template_questions:\n if question[\"id\"] not in student_question_lookup:\n continue\n if question[\"type\"] not in [\"multiple_choice\", \"select_all\"]:\n continue\n if question[\"id\"] in flagged:\n continue\n\n for i, option in enumerate(question[\"options\"]):\n option[\"index\"] = i\n\n s = lambda options: sorted(options, key=lambda q: q[\"text\"])\n\n for a, b in zip(s(question[\"options\"]), s(student_question_lookup[question[\"id\"]][\"options\"])):\n if a[\"index\"] != b.get(\"index\", a[\"index\"]):\n flagged.add(question[\"id\"])\n continue\n\n if flagged:\n print(exam, flagged)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2
import numpy as np
import time
from dronekit import connect, VehicleMode
connection_string = "/dev/ttyACM0"
baud_rate = 115200
print(">>>> Connecting with the UAV <<<<")
vehicle = connect(connection_string, baud=baud_rate, wait_ready=True)
vehicle.wait_ready('autopilot_version')
print('ready')
cap = cv2.VideoCapture(0)
if (cap.isOpened() == False):
print("Unable to read camera feed")
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
t = str(time.time())
out = cv2.VideoWriter(t+'.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
while(True):
posdata = str(vehicle.location.global_relative_frame).split(':')
_, _, alt = posdata[1].split(',')
ret, frame = cap.read()
cv2.putText(frame, str(alt),(0,int(frame_height/2.1)),cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255,255,255), 1)
if ret == True:
print("record..")
out.write(frame)
#cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "8c11463e35fb32949abbb163a89f874040a33ad0",
"index": 5415,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('>>>> Connecting with the UAV <<<<')\n<mask token>\nvehicle.wait_ready('autopilot_version')\nprint('ready')\n<mask token>\nif cap.isOpened() == False:\n print('Unable to read camera feed')\n<mask token>\nwhile True:\n posdata = str(vehicle.location.global_relative_frame).split(':')\n _, _, alt = posdata[1].split(',')\n ret, frame = cap.read()\n cv2.putText(frame, str(alt), (0, int(frame_height / 2.1)), cv2.\n FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)\n if ret == True:\n print('record..')\n out.write(frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\ncap.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nconnection_string = '/dev/ttyACM0'\nbaud_rate = 115200\nprint('>>>> Connecting with the UAV <<<<')\nvehicle = connect(connection_string, baud=baud_rate, wait_ready=True)\nvehicle.wait_ready('autopilot_version')\nprint('ready')\ncap = cv2.VideoCapture(0)\nif cap.isOpened() == False:\n print('Unable to read camera feed')\nframe_width = int(cap.get(3))\nframe_height = int(cap.get(4))\nt = str(time.time())\nout = cv2.VideoWriter(t + '.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'\n ), 10, (frame_width, frame_height))\nwhile True:\n posdata = str(vehicle.location.global_relative_frame).split(':')\n _, _, alt = posdata[1].split(',')\n ret, frame = cap.read()\n cv2.putText(frame, str(alt), (0, int(frame_height / 2.1)), cv2.\n FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)\n if ret == True:\n print('record..')\n out.write(frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\ncap.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimport time\nfrom dronekit import connect, VehicleMode\nconnection_string = '/dev/ttyACM0'\nbaud_rate = 115200\nprint('>>>> Connecting with the UAV <<<<')\nvehicle = connect(connection_string, baud=baud_rate, wait_ready=True)\nvehicle.wait_ready('autopilot_version')\nprint('ready')\ncap = cv2.VideoCapture(0)\nif cap.isOpened() == False:\n print('Unable to read camera feed')\nframe_width = int(cap.get(3))\nframe_height = int(cap.get(4))\nt = str(time.time())\nout = cv2.VideoWriter(t + '.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'\n ), 10, (frame_width, frame_height))\nwhile True:\n posdata = str(vehicle.location.global_relative_frame).split(':')\n _, _, alt = posdata[1].split(',')\n ret, frame = cap.read()\n cv2.putText(frame, str(alt), (0, int(frame_height / 2.1)), cv2.\n FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)\n if ret == True:\n print('record..')\n out.write(frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\ncap.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\nimport time \nfrom dronekit import connect, VehicleMode\n\nconnection_string = \"/dev/ttyACM0\"\nbaud_rate = 115200\nprint(\">>>> Connecting with the UAV <<<<\")\nvehicle = connect(connection_string, baud=baud_rate, wait_ready=True)\nvehicle.wait_ready('autopilot_version')\nprint('ready')\n\ncap = cv2.VideoCapture(0)\n \nif (cap.isOpened() == False): \n print(\"Unable to read camera feed\")\n\nframe_width = int(cap.get(3))\nframe_height = int(cap.get(4))\nt = str(time.time())\nout = cv2.VideoWriter(t+'.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))\n \nwhile(True):\n posdata = str(vehicle.location.global_relative_frame).split(':')\n _, _, alt = posdata[1].split(',')\n ret, frame = cap.read()\n cv2.putText(frame, str(alt),(0,int(frame_height/2.1)),cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255,255,255), 1)\n if ret == True: \n print(\"record..\")\n out.write(frame)\n #cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break \ncap.release()\nout.release()\ncv2.destroyAllWindows() \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def build_statements_features(df, vectorizer, train=True, tokenizer=
tokenizer_nltk):
filtered_statements_dic = {}
for index, row in df.iterrows():
filtered_statement = []
tokenized_statement = tokenizer(row['statement'].lower().decode(
'utf-8'))
for token in tokenized_statement:
if token not in stop_words:
filtered_statement.append(token)
filtered_statements_dic[index] = filtered_statement
filtered_statements = filtered_statements_dic.values()
if train:
statements_features = vectorizer.fit_transform([statement_to_dict(
statement) for statement in filtered_statements])
else:
statements_features = vectorizer.transform([statement_to_dict(
statement) for statement in filtered_statements])
return statements_features
<|reserved_special_token_0|>
def build_W_embeddings(df_embeddings_reduced):
W = df_embeddings_reduced.as_matrix()
return W
def build_statements_embeddings(df, df_vocab, tokenizer=tokenizer_nltk,
max_statement_len=None):
embedded_statements_dic = {}
max_statement_len_ = -1
for index, row in df.iterrows():
embedded_statement = []
tokenized_statement = tokenizer(row['statement'].lower().decode(
'utf-8'))
for token in tokenized_statement:
if token in df_vocab.index:
embedded_statement.append(np.array(df_vocab.loc[token]))
else:
embedded_statement.append(np.array(df_vocab.loc['OOV']))
embedded_statements_dic[index] = np.array(embedded_statement)
if len(embedded_statement) > max_statement_len_:
max_statement_len_ = len(embedded_statement)
if max_statement_len is not None:
max_statement_len_ = max_statement_len
for key in embedded_statements_dic:
embedded_statement = embedded_statements_dic[key]
embedded_statement_len = np.shape(embedded_statement)[0]
padded_embedded_statement = np.tile(np.array(df_vocab.loc['PAD']),
(max_statement_len_, 1))
if max_statement_len_ >= embedded_statement_len:
padded_embedded_statement[:embedded_statement_len, :
] = embedded_statement
else:
padded_embedded_statement[:, :] = embedded_statement[:
max_statement_len_, :]
embedded_statements_dic[key] = padded_embedded_statement
embedded_statements_matrix = np.squeeze(np.asarray(
embedded_statements_dic.values()))
return embedded_statements_matrix, max_statement_len_
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def build_statements_features(df, vectorizer, train=True, tokenizer=
tokenizer_nltk):
filtered_statements_dic = {}
for index, row in df.iterrows():
filtered_statement = []
tokenized_statement = tokenizer(row['statement'].lower().decode(
'utf-8'))
for token in tokenized_statement:
if token not in stop_words:
filtered_statement.append(token)
filtered_statements_dic[index] = filtered_statement
filtered_statements = filtered_statements_dic.values()
if train:
statements_features = vectorizer.fit_transform([statement_to_dict(
statement) for statement in filtered_statements])
else:
statements_features = vectorizer.transform([statement_to_dict(
statement) for statement in filtered_statements])
return statements_features
def extract_vocab(df, embeddings, tokenizer=tokenizer_nltk):
path = '../saved_data/embeddings/'
lowercase = True if embeddings in ['glove_100d_6b', 'glove_300d_6b',
'facebook'] else False
if os.path.isfile(path + 'vocab.txt') and lowercase:
df_vocab = pd.read_table(path + 'vocab.txt', sep=' ', header=None,
index_col=0)
print('full vocab already exists - vocab loaded, tokens found: {:.0f}'
.format(len(df_vocab) - 2))
elif os.path.isfile(path + 'vocab_upper.txt') and not lowercase:
df_vocab = pd.read_table(path + 'vocab_upper.txt', sep=' ', header=
None, index_col=0)
print(
'full upper vocab already exists - upper vocab loaded, tokens found: {:.0f}'
.format(len(df_vocab) - 2))
else:
df_vocab = pd.DataFrame(columns=['1'])
df_vocab.loc['PAD'] = 0
df_vocab.loc['OOV'] = 1
if lowercase:
combined_statements = ' '.join(df['statement']).lower().decode(
'utf-8')
else:
combined_statements = ' '.join(df['statement']).decode('utf-8')
tokenized_combined_statements = tokenizer(combined_statements)
for token in tokenized_combined_statements:
token = token.encode('utf-8')
if token not in df_vocab.index:
df_vocab.loc[token] = len(df_vocab)
df_vocab = df_vocab.astype(int)
df_vocab.to_csv(path + 'vocab.txt', sep=' ', header=None, index_col=0)
print('full vocab built and saved, tokens found: {:.0f}'.format(len
(df_vocab) - 2))
return df_vocab
def load_embeddings(df, df_vocab, embeddings):
if embeddings == 'glove_300d_6b':
path = '../saved_data/embeddings/glove_300d_6b/'
if os.path.isfile(path + 'glove_300d_6b_reduced.txt'
) and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'glove_300d_6b_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'glove_300d_6b reduced embeddings and reduced vocab already exist - glove_300d_6b reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'glove_300d_6b.txt', sep=
' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_300d_6b embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'glove_300d_6b_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'glove_300d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',
header=None, index_col=0)
print('vocab reduced built and saved')
if embeddings == 'glove_100d_6b':
path = '../saved_data/embeddings/glove_100d_6b/'
if os.path.isfile(path + 'glove_100d_6b_reduced.txt'
) and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'glove_100d_6b_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'glove_100d_6b reduced embeddings and reduced vocab already exist - glove_100d_6b reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'glove_100d_6b.txt', sep=
' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_100d_6b embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'glove_100d_6b_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'glove_100d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',
header=None, index_col=0)
print('vocab reduced built and saved')
if embeddings == 'glove_300d_84b':
path = '../saved_data/embeddings/glove_300d_84b/'
if os.path.isfile(path + 'glove_300d_84b_reduced.txt'
) and os.path.isfile(path + 'vocab_upper_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0
)
df_vocab_reduced = pd.read_table(path +
'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print(
'glove_300d_84b reduced embeddings and reduced upper vocab already exist - glove_300d_84b reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'glove_300d_84b.txt', sep=
' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_300d_84b embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path +
'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0
)
print(
'glove_300d_84b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=
' ', header=None, index_col=0)
print('vocab upper reduced built and saved')
if embeddings == 'google':
path = '../saved_data/embeddings/google/'
if os.path.isfile(path + 'google_word2vec_300d_reduced.txt'
) and os.path.isfile(path + 'vocab_upper_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'google_word2vec_300d_reduced.txt', sep=' ', header=None,
index_col=0)
df_vocab_reduced = pd.read_table(path +
'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print(
'google_word2vec_300d reduced embeddings and reduced upper vocab already exist - google_word2vec_300d reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'google_word2vec_300d.txt',
sep=' ', header=None, index_col=0, nrows=1000000)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full google_word2vec_300d embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path +
'google_word2vec_300d_reduced.txt', sep=' ', header=None,
index_col=0)
print(
'google_word2vec_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=
' ', header=None, index_col=0)
print('vocab upper reduced built and saved')
if embeddings == 'facebook':
path = '../saved_data/embeddings/facebook/'
if os.path.isfile(path + 'facebook_fastText_300d_reduced.txt'
) and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'facebook_fastText_300d_reduced.txt', sep=' ', header=None,
index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'facebook_fastText_300d reduced embeddings and reduced upper vocab already exist - facebook_fastText_300d reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'wiki.en.vec', sep=' ',
skiprows=1, header=None, index_col=0, usecols=range(301))
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full facebook_fastText_300d embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path +
'facebook_fastText_300d_reduced.txt', sep=' ', header=None,
index_col=0)
print(
'facebook_fastText_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',
header=None, index_col=0)
print('vocab reduced built and saved')
return df_embeddings_reduced, df_vocab_reduced
def build_W_embeddings(df_embeddings_reduced):
W = df_embeddings_reduced.as_matrix()
return W
def build_statements_embeddings(df, df_vocab, tokenizer=tokenizer_nltk,
max_statement_len=None):
embedded_statements_dic = {}
max_statement_len_ = -1
for index, row in df.iterrows():
embedded_statement = []
tokenized_statement = tokenizer(row['statement'].lower().decode(
'utf-8'))
for token in tokenized_statement:
if token in df_vocab.index:
embedded_statement.append(np.array(df_vocab.loc[token]))
else:
embedded_statement.append(np.array(df_vocab.loc['OOV']))
embedded_statements_dic[index] = np.array(embedded_statement)
if len(embedded_statement) > max_statement_len_:
max_statement_len_ = len(embedded_statement)
if max_statement_len is not None:
max_statement_len_ = max_statement_len
for key in embedded_statements_dic:
embedded_statement = embedded_statements_dic[key]
embedded_statement_len = np.shape(embedded_statement)[0]
padded_embedded_statement = np.tile(np.array(df_vocab.loc['PAD']),
(max_statement_len_, 1))
if max_statement_len_ >= embedded_statement_len:
padded_embedded_statement[:embedded_statement_len, :
] = embedded_statement
else:
padded_embedded_statement[:, :] = embedded_statement[:
max_statement_len_, :]
embedded_statements_dic[key] = padded_embedded_statement
embedded_statements_matrix = np.squeeze(np.asarray(
embedded_statements_dic.values()))
return embedded_statements_matrix, max_statement_len_
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def tokenizer_nltk(input):
return nltk.word_tokenize(input)
def statement_to_dict(statement):
statement_features = defaultdict(float)
for token in statement:
statement_features[token] += 1.0
return statement_features
def build_statements_features(df, vectorizer, train=True, tokenizer=
tokenizer_nltk):
filtered_statements_dic = {}
for index, row in df.iterrows():
filtered_statement = []
tokenized_statement = tokenizer(row['statement'].lower().decode(
'utf-8'))
for token in tokenized_statement:
if token not in stop_words:
filtered_statement.append(token)
filtered_statements_dic[index] = filtered_statement
filtered_statements = filtered_statements_dic.values()
if train:
statements_features = vectorizer.fit_transform([statement_to_dict(
statement) for statement in filtered_statements])
else:
statements_features = vectorizer.transform([statement_to_dict(
statement) for statement in filtered_statements])
return statements_features
def extract_vocab(df, embeddings, tokenizer=tokenizer_nltk):
path = '../saved_data/embeddings/'
lowercase = True if embeddings in ['glove_100d_6b', 'glove_300d_6b',
'facebook'] else False
if os.path.isfile(path + 'vocab.txt') and lowercase:
df_vocab = pd.read_table(path + 'vocab.txt', sep=' ', header=None,
index_col=0)
print('full vocab already exists - vocab loaded, tokens found: {:.0f}'
.format(len(df_vocab) - 2))
elif os.path.isfile(path + 'vocab_upper.txt') and not lowercase:
df_vocab = pd.read_table(path + 'vocab_upper.txt', sep=' ', header=
None, index_col=0)
print(
'full upper vocab already exists - upper vocab loaded, tokens found: {:.0f}'
.format(len(df_vocab) - 2))
else:
df_vocab = pd.DataFrame(columns=['1'])
df_vocab.loc['PAD'] = 0
df_vocab.loc['OOV'] = 1
if lowercase:
combined_statements = ' '.join(df['statement']).lower().decode(
'utf-8')
else:
combined_statements = ' '.join(df['statement']).decode('utf-8')
tokenized_combined_statements = tokenizer(combined_statements)
for token in tokenized_combined_statements:
token = token.encode('utf-8')
if token not in df_vocab.index:
df_vocab.loc[token] = len(df_vocab)
df_vocab = df_vocab.astype(int)
df_vocab.to_csv(path + 'vocab.txt', sep=' ', header=None, index_col=0)
print('full vocab built and saved, tokens found: {:.0f}'.format(len
(df_vocab) - 2))
return df_vocab
def load_embeddings(df, df_vocab, embeddings):
if embeddings == 'glove_300d_6b':
path = '../saved_data/embeddings/glove_300d_6b/'
if os.path.isfile(path + 'glove_300d_6b_reduced.txt'
) and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'glove_300d_6b_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'glove_300d_6b reduced embeddings and reduced vocab already exist - glove_300d_6b reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'glove_300d_6b.txt', sep=
' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_300d_6b embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'glove_300d_6b_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'glove_300d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',
header=None, index_col=0)
print('vocab reduced built and saved')
if embeddings == 'glove_100d_6b':
path = '../saved_data/embeddings/glove_100d_6b/'
if os.path.isfile(path + 'glove_100d_6b_reduced.txt'
) and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'glove_100d_6b_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'glove_100d_6b reduced embeddings and reduced vocab already exist - glove_100d_6b reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'glove_100d_6b.txt', sep=
' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_100d_6b embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'glove_100d_6b_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'glove_100d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',
header=None, index_col=0)
print('vocab reduced built and saved')
if embeddings == 'glove_300d_84b':
path = '../saved_data/embeddings/glove_300d_84b/'
if os.path.isfile(path + 'glove_300d_84b_reduced.txt'
) and os.path.isfile(path + 'vocab_upper_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0
)
df_vocab_reduced = pd.read_table(path +
'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print(
'glove_300d_84b reduced embeddings and reduced upper vocab already exist - glove_300d_84b reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'glove_300d_84b.txt', sep=
' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_300d_84b embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path +
'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0
)
print(
'glove_300d_84b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=
' ', header=None, index_col=0)
print('vocab upper reduced built and saved')
if embeddings == 'google':
path = '../saved_data/embeddings/google/'
if os.path.isfile(path + 'google_word2vec_300d_reduced.txt'
) and os.path.isfile(path + 'vocab_upper_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'google_word2vec_300d_reduced.txt', sep=' ', header=None,
index_col=0)
df_vocab_reduced = pd.read_table(path +
'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print(
'google_word2vec_300d reduced embeddings and reduced upper vocab already exist - google_word2vec_300d reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'google_word2vec_300d.txt',
sep=' ', header=None, index_col=0, nrows=1000000)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full google_word2vec_300d embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path +
'google_word2vec_300d_reduced.txt', sep=' ', header=None,
index_col=0)
print(
'google_word2vec_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=
' ', header=None, index_col=0)
print('vocab upper reduced built and saved')
if embeddings == 'facebook':
path = '../saved_data/embeddings/facebook/'
if os.path.isfile(path + 'facebook_fastText_300d_reduced.txt'
) and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'facebook_fastText_300d_reduced.txt', sep=' ', header=None,
index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'facebook_fastText_300d reduced embeddings and reduced upper vocab already exist - facebook_fastText_300d reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'wiki.en.vec', sep=' ',
skiprows=1, header=None, index_col=0, usecols=range(301))
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full facebook_fastText_300d embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path +
'facebook_fastText_300d_reduced.txt', sep=' ', header=None,
index_col=0)
print(
'facebook_fastText_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',
header=None, index_col=0)
print('vocab reduced built and saved')
return df_embeddings_reduced, df_vocab_reduced
def build_W_embeddings(df_embeddings_reduced):
W = df_embeddings_reduced.as_matrix()
return W
def build_statements_embeddings(df, df_vocab, tokenizer=tokenizer_nltk,
max_statement_len=None):
embedded_statements_dic = {}
max_statement_len_ = -1
for index, row in df.iterrows():
embedded_statement = []
tokenized_statement = tokenizer(row['statement'].lower().decode(
'utf-8'))
for token in tokenized_statement:
if token in df_vocab.index:
embedded_statement.append(np.array(df_vocab.loc[token]))
else:
embedded_statement.append(np.array(df_vocab.loc['OOV']))
embedded_statements_dic[index] = np.array(embedded_statement)
if len(embedded_statement) > max_statement_len_:
max_statement_len_ = len(embedded_statement)
if max_statement_len is not None:
max_statement_len_ = max_statement_len
for key in embedded_statements_dic:
embedded_statement = embedded_statements_dic[key]
embedded_statement_len = np.shape(embedded_statement)[0]
padded_embedded_statement = np.tile(np.array(df_vocab.loc['PAD']),
(max_statement_len_, 1))
if max_statement_len_ >= embedded_statement_len:
padded_embedded_statement[:embedded_statement_len, :
] = embedded_statement
else:
padded_embedded_statement[:, :] = embedded_statement[:
max_statement_len_, :]
embedded_statements_dic[key] = padded_embedded_statement
embedded_statements_matrix = np.squeeze(np.asarray(
embedded_statements_dic.values()))
return embedded_statements_matrix, max_statement_len_
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import nltk
from collections import defaultdict
import os.path
stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves',
'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his',
'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself',
'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who',
'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are', 'was',
'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do',
'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or',
'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with',
'about', 'against', 'between', 'into', 'through', 'during', 'before',
'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out',
'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once',
'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both',
'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor',
'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't',
'can', 'will', 'just', 'don', 'should', 'now', '\n', 'the', '!', '"',
'#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':',
';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|',
'}', '~']
def tokenizer_nltk(input):
return nltk.word_tokenize(input)
def statement_to_dict(statement):
statement_features = defaultdict(float)
for token in statement:
statement_features[token] += 1.0
return statement_features
def build_statements_features(df, vectorizer, train=True, tokenizer=
tokenizer_nltk):
filtered_statements_dic = {}
for index, row in df.iterrows():
filtered_statement = []
tokenized_statement = tokenizer(row['statement'].lower().decode(
'utf-8'))
for token in tokenized_statement:
if token not in stop_words:
filtered_statement.append(token)
filtered_statements_dic[index] = filtered_statement
filtered_statements = filtered_statements_dic.values()
if train:
statements_features = vectorizer.fit_transform([statement_to_dict(
statement) for statement in filtered_statements])
else:
statements_features = vectorizer.transform([statement_to_dict(
statement) for statement in filtered_statements])
return statements_features
def extract_vocab(df, embeddings, tokenizer=tokenizer_nltk):
path = '../saved_data/embeddings/'
lowercase = True if embeddings in ['glove_100d_6b', 'glove_300d_6b',
'facebook'] else False
if os.path.isfile(path + 'vocab.txt') and lowercase:
df_vocab = pd.read_table(path + 'vocab.txt', sep=' ', header=None,
index_col=0)
print('full vocab already exists - vocab loaded, tokens found: {:.0f}'
.format(len(df_vocab) - 2))
elif os.path.isfile(path + 'vocab_upper.txt') and not lowercase:
df_vocab = pd.read_table(path + 'vocab_upper.txt', sep=' ', header=
None, index_col=0)
print(
'full upper vocab already exists - upper vocab loaded, tokens found: {:.0f}'
.format(len(df_vocab) - 2))
else:
df_vocab = pd.DataFrame(columns=['1'])
df_vocab.loc['PAD'] = 0
df_vocab.loc['OOV'] = 1
if lowercase:
combined_statements = ' '.join(df['statement']).lower().decode(
'utf-8')
else:
combined_statements = ' '.join(df['statement']).decode('utf-8')
tokenized_combined_statements = tokenizer(combined_statements)
for token in tokenized_combined_statements:
token = token.encode('utf-8')
if token not in df_vocab.index:
df_vocab.loc[token] = len(df_vocab)
df_vocab = df_vocab.astype(int)
df_vocab.to_csv(path + 'vocab.txt', sep=' ', header=None, index_col=0)
print('full vocab built and saved, tokens found: {:.0f}'.format(len
(df_vocab) - 2))
return df_vocab
def load_embeddings(df, df_vocab, embeddings):
if embeddings == 'glove_300d_6b':
path = '../saved_data/embeddings/glove_300d_6b/'
if os.path.isfile(path + 'glove_300d_6b_reduced.txt'
) and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'glove_300d_6b_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'glove_300d_6b reduced embeddings and reduced vocab already exist - glove_300d_6b reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'glove_300d_6b.txt', sep=
' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_300d_6b embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'glove_300d_6b_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'glove_300d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',
header=None, index_col=0)
print('vocab reduced built and saved')
if embeddings == 'glove_100d_6b':
path = '../saved_data/embeddings/glove_100d_6b/'
if os.path.isfile(path + 'glove_100d_6b_reduced.txt'
) and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'glove_100d_6b_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'glove_100d_6b reduced embeddings and reduced vocab already exist - glove_100d_6b reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'glove_100d_6b.txt', sep=
' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_100d_6b embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'glove_100d_6b_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'glove_100d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',
header=None, index_col=0)
print('vocab reduced built and saved')
if embeddings == 'glove_300d_84b':
path = '../saved_data/embeddings/glove_300d_84b/'
if os.path.isfile(path + 'glove_300d_84b_reduced.txt'
) and os.path.isfile(path + 'vocab_upper_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0
)
df_vocab_reduced = pd.read_table(path +
'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print(
'glove_300d_84b reduced embeddings and reduced upper vocab already exist - glove_300d_84b reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'glove_300d_84b.txt', sep=
' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_300d_84b embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path +
'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0
)
print(
'glove_300d_84b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=
' ', header=None, index_col=0)
print('vocab upper reduced built and saved')
if embeddings == 'google':
path = '../saved_data/embeddings/google/'
if os.path.isfile(path + 'google_word2vec_300d_reduced.txt'
) and os.path.isfile(path + 'vocab_upper_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'google_word2vec_300d_reduced.txt', sep=' ', header=None,
index_col=0)
df_vocab_reduced = pd.read_table(path +
'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print(
'google_word2vec_300d reduced embeddings and reduced upper vocab already exist - google_word2vec_300d reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'google_word2vec_300d.txt',
sep=' ', header=None, index_col=0, nrows=1000000)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full google_word2vec_300d embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path +
'google_word2vec_300d_reduced.txt', sep=' ', header=None,
index_col=0)
print(
'google_word2vec_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=
' ', header=None, index_col=0)
print('vocab upper reduced built and saved')
if embeddings == 'facebook':
path = '../saved_data/embeddings/facebook/'
if os.path.isfile(path + 'facebook_fastText_300d_reduced.txt'
) and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path +
'facebook_fastText_300d_reduced.txt', sep=' ', header=None,
index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',
sep=' ', header=None, index_col=0)
print(
'facebook_fastText_300d reduced embeddings and reduced upper vocab already exist - facebook_fastText_300d reduced embeddings loaded'
)
else:
df_embeddings = pd.read_table(path + 'wiki.en.vec', sep=' ',
skiprows=1, header=None, index_col=0, usecols=range(301))
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full facebook_fastText_300d embeddings loaded')
for token in df_vocab.index:
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(
df_embeddings.loc[token], ignore_index=False)
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:
].mean(axis=0)
df_embeddings_reduced.to_csv(path +
'facebook_fastText_300d_reduced.txt', sep=' ', header=None,
index_col=0)
print(
'facebook_fastText_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'
.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',
header=None, index_col=0)
print('vocab reduced built and saved')
return df_embeddings_reduced, df_vocab_reduced
def build_W_embeddings(df_embeddings_reduced):
W = df_embeddings_reduced.as_matrix()
return W
def build_statements_embeddings(df, df_vocab, tokenizer=tokenizer_nltk,
max_statement_len=None):
embedded_statements_dic = {}
max_statement_len_ = -1
for index, row in df.iterrows():
embedded_statement = []
tokenized_statement = tokenizer(row['statement'].lower().decode(
'utf-8'))
for token in tokenized_statement:
if token in df_vocab.index:
embedded_statement.append(np.array(df_vocab.loc[token]))
else:
embedded_statement.append(np.array(df_vocab.loc['OOV']))
embedded_statements_dic[index] = np.array(embedded_statement)
if len(embedded_statement) > max_statement_len_:
max_statement_len_ = len(embedded_statement)
if max_statement_len is not None:
max_statement_len_ = max_statement_len
for key in embedded_statements_dic:
embedded_statement = embedded_statements_dic[key]
embedded_statement_len = np.shape(embedded_statement)[0]
padded_embedded_statement = np.tile(np.array(df_vocab.loc['PAD']),
(max_statement_len_, 1))
if max_statement_len_ >= embedded_statement_len:
padded_embedded_statement[:embedded_statement_len, :
] = embedded_statement
else:
padded_embedded_statement[:, :] = embedded_statement[:
max_statement_len_, :]
embedded_statements_dic[key] = padded_embedded_statement
embedded_statements_matrix = np.squeeze(np.asarray(
embedded_statements_dic.values()))
return embedded_statements_matrix, max_statement_len_
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import nltk
from collections import defaultdict
import os.path
stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours',
'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers',
'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are',
'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does',
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until',
'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into',
'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here',
'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so',
'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now', '\n', 'the',
'!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=',
'>', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~']
# stop words source: https://github.com/uclmr/stat-nlp-book/blob/python/chapters/doc_classify.ipynb
def tokenizer_nltk(input):
return nltk.word_tokenize(input)
# sklearn models
def statement_to_dict(statement):
statement_features = defaultdict(float)
for token in statement:
statement_features[token] += 1.0
return statement_features
def build_statements_features(df, vectorizer, train=True, tokenizer=tokenizer_nltk):
filtered_statements_dic = {}
for index, row in df.iterrows():
filtered_statement = []
tokenized_statement = tokenizer(row['statement'].lower().decode('utf-8'))
for token in tokenized_statement:
if token not in stop_words:
filtered_statement.append(token)
filtered_statements_dic[index] = filtered_statement
filtered_statements = filtered_statements_dic.values()
if train:
statements_features = vectorizer.fit_transform([statement_to_dict(statement) for statement in filtered_statements])
else:
statements_features = vectorizer.transform([statement_to_dict(statement) for statement in filtered_statements])
return statements_features
# tensorflow models
def extract_vocab(df, embeddings, tokenizer=tokenizer_nltk):
path = '../saved_data/embeddings/'
lowercase = True if embeddings in ['glove_100d_6b', 'glove_300d_6b', 'facebook'] else False
if os.path.isfile(path + 'vocab.txt') and lowercase:
df_vocab = pd.read_table(path + 'vocab.txt', sep=' ', header=None, index_col=0)
print('full vocab already exists - vocab loaded, tokens found: {:.0f}'.format(len(df_vocab) - 2))
elif os.path.isfile(path + 'vocab_upper.txt') and not lowercase:
df_vocab = pd.read_table(path + 'vocab_upper.txt', sep=' ', header=None, index_col=0)
print('full upper vocab already exists - upper vocab loaded, tokens found: {:.0f}'.format(len(df_vocab) - 2))
else:
df_vocab = pd.DataFrame(columns=['1'])
# add OOV and PAD tokens
df_vocab.loc['PAD'] = 0
df_vocab.loc['OOV'] = 1
if lowercase:
combined_statements = ' '.join(df['statement']).lower().decode('utf-8')
else:
combined_statements = ' '.join(df['statement']).decode('utf-8')
tokenized_combined_statements = tokenizer(combined_statements)
for token in tokenized_combined_statements:
token = token.encode('utf-8')
if token not in df_vocab.index:
df_vocab.loc[token] = len(df_vocab)
df_vocab = df_vocab.astype(int)
df_vocab.to_csv(path + 'vocab.txt', sep=' ', header=None, index_col=0)
print('full vocab built and saved, tokens found: {:.0f}'.format(len(df_vocab) - 2))
return df_vocab
def load_embeddings(df, df_vocab, embeddings):
# glove_300d_6b
if embeddings == 'glove_300d_6b':
path = '../saved_data/embeddings/glove_300d_6b/'
# check if embeddings reduced exist already and if so return them
if os.path.isfile(path + 'glove_300d_6b_reduced.txt') and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path + 'glove_300d_6b_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)
print('glove_300d_6b reduced embeddings and reduced vocab already exist - glove_300d_6b reduced embeddings loaded')
# build the vocab from the combined statements and reduce the embeddings
else:
df_embeddings = pd.read_table(path + 'glove_300d_6b.txt', sep=' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_300d_6b embeddings loaded')
for token in df_vocab.index:
# token = token.encode('utf-8')
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)
# else:
# df_vocab.drop(df_vocab.loc[token])
# change OOV token
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'glove_300d_6b_reduced.txt', sep=' ', header=None, index_col=0)
print('glove_300d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)
print('vocab reduced built and saved')
# glove_100d_6b
if embeddings == 'glove_100d_6b':
path = '../saved_data/embeddings/glove_100d_6b/'
# check if embeddings reduced exist already and if so return them
if os.path.isfile(path + 'glove_100d_6b_reduced.txt') and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path + 'glove_100d_6b_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)
print('glove_100d_6b reduced embeddings and reduced vocab already exist - glove_100d_6b reduced embeddings loaded')
# build the vocab from the combined statements and reduce the embeddings
else:
df_embeddings = pd.read_table(path + 'glove_100d_6b.txt', sep=' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_100d_6b embeddings loaded')
for token in df_vocab.index:
# token = token.encode('utf-8')
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)
# else:
# df_vocab.drop(df_vocab.loc[token])
# change OOV token
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'glove_100d_6b_reduced.txt', sep=' ', header=None, index_col=0)
print('glove_100d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)
print('vocab reduced built and saved')
# glove_300d_6b
if embeddings == 'glove_300d_84b':
path = '../saved_data/embeddings/glove_300d_84b/'
# check if embeddings reduced exist already and if so return them
if os.path.isfile(path + 'glove_300d_84b_reduced.txt') and os.path.isfile(path + 'vocab_upper_reduced.txt'):
df_embeddings_reduced = pd.read_table(path + 'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print('glove_300d_84b reduced embeddings and reduced upper vocab already exist - glove_300d_84b reduced embeddings loaded')
# build the vocab from the combined statements and reduce the embeddings
else:
df_embeddings = pd.read_table(path + 'glove_300d_84b.txt', sep=' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_300d_84b embeddings loaded')
for token in df_vocab.index:
# token = token.encode('utf-8')
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)
# else:
# df_vocab.drop(df_vocab.loc[token])
# change OOV token
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0)
print('glove_300d_84b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print('vocab upper reduced built and saved')
# google
if embeddings == 'google':
path = '../saved_data/embeddings/google/'
# check if embeddings reduced exist already and if so return them
if os.path.isfile(path + 'google_word2vec_300d_reduced.txt') and os.path.isfile(path + 'vocab_upper_reduced.txt'):
df_embeddings_reduced = pd.read_table(path + 'google_word2vec_300d_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print('google_word2vec_300d reduced embeddings and reduced upper vocab already exist - google_word2vec_300d reduced embeddings loaded')
# build the vocab from the combined statements and reduce the embeddings
else:
df_embeddings = pd.read_table(path + 'google_word2vec_300d.txt', sep=' ', header=None, index_col=0, nrows=1000000)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full google_word2vec_300d embeddings loaded')
for token in df_vocab.index:
# token = token.encode('utf-8')
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)
# else:
# df_vocab.drop(df_vocab.loc[token])
# change OOV token
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'google_word2vec_300d_reduced.txt', sep=' ', header=None, index_col=0)
print('google_word2vec_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print('vocab upper reduced built and saved')
# facebook
if embeddings == 'facebook':
path = '../saved_data/embeddings/facebook/'
# check if embeddings reduced exist already and if so return them
if os.path.isfile(path + 'facebook_fastText_300d_reduced.txt') and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path + 'facebook_fastText_300d_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)
print('facebook_fastText_300d reduced embeddings and reduced upper vocab already exist - facebook_fastText_300d reduced embeddings loaded')
# build the vocab from the combined statements and reduce the embeddings
else:
df_embeddings = pd.read_table(path + 'wiki.en.vec', sep=' ', skiprows=1, header=None, index_col=0, usecols=range(301))
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full facebook_fastText_300d embeddings loaded')
for token in df_vocab.index:
# token = token.encode('utf-8')
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)
# else:
# df_vocab.drop(df_vocab.loc[token])
# change OOV token
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'facebook_fastText_300d_reduced.txt', sep=' ', header=None, index_col=0)
print('facebook_fastText_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)
print('vocab reduced built and saved')
return df_embeddings_reduced, df_vocab_reduced
def build_W_embeddings(df_embeddings_reduced):
W = df_embeddings_reduced.as_matrix()
return W
def build_statements_embeddings(df, df_vocab, tokenizer=tokenizer_nltk, max_statement_len=None):
embedded_statements_dic = {}
max_statement_len_ = -1
for index, row in df.iterrows():
embedded_statement = []
tokenized_statement = tokenizer(row['statement'].lower().decode('utf-8'))
for token in tokenized_statement:
if token in df_vocab.index:
embedded_statement.append(np.array(df_vocab.loc[token]))
else:
embedded_statement.append(np.array(df_vocab.loc['OOV']))
embedded_statements_dic[index] = np.array(embedded_statement)
if len(embedded_statement) > max_statement_len_:
max_statement_len_ = len(embedded_statement)
if max_statement_len is not None:
max_statement_len_ = max_statement_len
for key in embedded_statements_dic:
embedded_statement = embedded_statements_dic[key]
embedded_statement_len = np.shape(embedded_statement)[0]
padded_embedded_statement = np.tile(np.array(df_vocab.loc['PAD']), (max_statement_len_, 1))
if max_statement_len_ >= embedded_statement_len:
padded_embedded_statement[:embedded_statement_len, :] = embedded_statement
else:
padded_embedded_statement[:, :] = embedded_statement[:max_statement_len_, :]
embedded_statements_dic[key] = padded_embedded_statement
embedded_statements_matrix = np.squeeze(np.asarray(embedded_statements_dic.values()))
return embedded_statements_matrix, max_statement_len_
|
flexible
|
{
"blob_id": "0356b408624988100c10b20facecef14f1552203",
"index": 4537,
"step-1": "<mask token>\n\n\ndef build_statements_features(df, vectorizer, train=True, tokenizer=\n tokenizer_nltk):\n filtered_statements_dic = {}\n for index, row in df.iterrows():\n filtered_statement = []\n tokenized_statement = tokenizer(row['statement'].lower().decode(\n 'utf-8'))\n for token in tokenized_statement:\n if token not in stop_words:\n filtered_statement.append(token)\n filtered_statements_dic[index] = filtered_statement\n filtered_statements = filtered_statements_dic.values()\n if train:\n statements_features = vectorizer.fit_transform([statement_to_dict(\n statement) for statement in filtered_statements])\n else:\n statements_features = vectorizer.transform([statement_to_dict(\n statement) for statement in filtered_statements])\n return statements_features\n\n\n<mask token>\n\n\ndef build_W_embeddings(df_embeddings_reduced):\n W = df_embeddings_reduced.as_matrix()\n return W\n\n\ndef build_statements_embeddings(df, df_vocab, tokenizer=tokenizer_nltk,\n max_statement_len=None):\n embedded_statements_dic = {}\n max_statement_len_ = -1\n for index, row in df.iterrows():\n embedded_statement = []\n tokenized_statement = tokenizer(row['statement'].lower().decode(\n 'utf-8'))\n for token in tokenized_statement:\n if token in df_vocab.index:\n embedded_statement.append(np.array(df_vocab.loc[token]))\n else:\n embedded_statement.append(np.array(df_vocab.loc['OOV']))\n embedded_statements_dic[index] = np.array(embedded_statement)\n if len(embedded_statement) > max_statement_len_:\n max_statement_len_ = len(embedded_statement)\n if max_statement_len is not None:\n max_statement_len_ = max_statement_len\n for key in embedded_statements_dic:\n embedded_statement = embedded_statements_dic[key]\n embedded_statement_len = np.shape(embedded_statement)[0]\n padded_embedded_statement = np.tile(np.array(df_vocab.loc['PAD']),\n (max_statement_len_, 1))\n if max_statement_len_ >= embedded_statement_len:\n padded_embedded_statement[:embedded_statement_len, :\n ] = embedded_statement\n else:\n padded_embedded_statement[:, :] = embedded_statement[:\n max_statement_len_, :]\n embedded_statements_dic[key] = padded_embedded_statement\n embedded_statements_matrix = np.squeeze(np.asarray(\n embedded_statements_dic.values()))\n return embedded_statements_matrix, max_statement_len_\n",
"step-2": "<mask token>\n\n\ndef build_statements_features(df, vectorizer, train=True, tokenizer=\n tokenizer_nltk):\n filtered_statements_dic = {}\n for index, row in df.iterrows():\n filtered_statement = []\n tokenized_statement = tokenizer(row['statement'].lower().decode(\n 'utf-8'))\n for token in tokenized_statement:\n if token not in stop_words:\n filtered_statement.append(token)\n filtered_statements_dic[index] = filtered_statement\n filtered_statements = filtered_statements_dic.values()\n if train:\n statements_features = vectorizer.fit_transform([statement_to_dict(\n statement) for statement in filtered_statements])\n else:\n statements_features = vectorizer.transform([statement_to_dict(\n statement) for statement in filtered_statements])\n return statements_features\n\n\ndef extract_vocab(df, embeddings, tokenizer=tokenizer_nltk):\n path = '../saved_data/embeddings/'\n lowercase = True if embeddings in ['glove_100d_6b', 'glove_300d_6b',\n 'facebook'] else False\n if os.path.isfile(path + 'vocab.txt') and lowercase:\n df_vocab = pd.read_table(path + 'vocab.txt', sep=' ', header=None,\n index_col=0)\n print('full vocab already exists - vocab loaded, tokens found: {:.0f}'\n .format(len(df_vocab) - 2))\n elif os.path.isfile(path + 'vocab_upper.txt') and not lowercase:\n df_vocab = pd.read_table(path + 'vocab_upper.txt', sep=' ', header=\n None, index_col=0)\n print(\n 'full upper vocab already exists - upper vocab loaded, tokens found: {:.0f}'\n .format(len(df_vocab) - 2))\n else:\n df_vocab = pd.DataFrame(columns=['1'])\n df_vocab.loc['PAD'] = 0\n df_vocab.loc['OOV'] = 1\n if lowercase:\n combined_statements = ' '.join(df['statement']).lower().decode(\n 'utf-8')\n else:\n combined_statements = ' '.join(df['statement']).decode('utf-8')\n tokenized_combined_statements = tokenizer(combined_statements)\n for token in tokenized_combined_statements:\n token = token.encode('utf-8')\n if token not in df_vocab.index:\n df_vocab.loc[token] = len(df_vocab)\n df_vocab = df_vocab.astype(int)\n df_vocab.to_csv(path + 'vocab.txt', sep=' ', header=None, index_col=0)\n print('full vocab built and saved, tokens found: {:.0f}'.format(len\n (df_vocab) - 2))\n return df_vocab\n\n\ndef load_embeddings(df, df_vocab, embeddings):\n if embeddings == 'glove_300d_6b':\n path = '../saved_data/embeddings/glove_300d_6b/'\n if os.path.isfile(path + 'glove_300d_6b_reduced.txt'\n ) and os.path.isfile(path + 'vocab_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'glove_300d_6b_reduced.txt', sep=' ', header=None, index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'glove_300d_6b reduced embeddings and reduced vocab already exist - glove_300d_6b reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'glove_300d_6b.txt', sep=\n ' ', header=None, index_col=0)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full glove_300d_6b embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path + 'glove_300d_6b_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'glove_300d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',\n header=None, index_col=0)\n print('vocab reduced built and saved')\n if embeddings == 'glove_100d_6b':\n path = '../saved_data/embeddings/glove_100d_6b/'\n if os.path.isfile(path + 'glove_100d_6b_reduced.txt'\n ) and os.path.isfile(path + 'vocab_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'glove_100d_6b_reduced.txt', sep=' ', header=None, index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'glove_100d_6b reduced embeddings and reduced vocab already exist - glove_100d_6b reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'glove_100d_6b.txt', sep=\n ' ', header=None, index_col=0)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full glove_100d_6b embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path + 'glove_100d_6b_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'glove_100d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',\n header=None, index_col=0)\n print('vocab reduced built and saved')\n if embeddings == 'glove_300d_84b':\n path = '../saved_data/embeddings/glove_300d_84b/'\n if os.path.isfile(path + 'glove_300d_84b_reduced.txt'\n ) and os.path.isfile(path + 'vocab_upper_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0\n )\n df_vocab_reduced = pd.read_table(path +\n 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)\n print(\n 'glove_300d_84b reduced embeddings and reduced upper vocab already exist - glove_300d_84b reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'glove_300d_84b.txt', sep=\n ' ', header=None, index_col=0)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full glove_300d_84b embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path +\n 'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0\n )\n print(\n 'glove_300d_84b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=\n ' ', header=None, index_col=0)\n print('vocab upper reduced built and saved')\n if embeddings == 'google':\n path = '../saved_data/embeddings/google/'\n if os.path.isfile(path + 'google_word2vec_300d_reduced.txt'\n ) and os.path.isfile(path + 'vocab_upper_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'google_word2vec_300d_reduced.txt', sep=' ', header=None,\n index_col=0)\n df_vocab_reduced = pd.read_table(path +\n 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)\n print(\n 'google_word2vec_300d reduced embeddings and reduced upper vocab already exist - google_word2vec_300d reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'google_word2vec_300d.txt',\n sep=' ', header=None, index_col=0, nrows=1000000)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full google_word2vec_300d embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path +\n 'google_word2vec_300d_reduced.txt', sep=' ', header=None,\n index_col=0)\n print(\n 'google_word2vec_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=\n ' ', header=None, index_col=0)\n print('vocab upper reduced built and saved')\n if embeddings == 'facebook':\n path = '../saved_data/embeddings/facebook/'\n if os.path.isfile(path + 'facebook_fastText_300d_reduced.txt'\n ) and os.path.isfile(path + 'vocab_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'facebook_fastText_300d_reduced.txt', sep=' ', header=None,\n index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'facebook_fastText_300d reduced embeddings and reduced upper vocab already exist - facebook_fastText_300d reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'wiki.en.vec', sep=' ',\n skiprows=1, header=None, index_col=0, usecols=range(301))\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full facebook_fastText_300d embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path +\n 'facebook_fastText_300d_reduced.txt', sep=' ', header=None,\n index_col=0)\n print(\n 'facebook_fastText_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',\n header=None, index_col=0)\n print('vocab reduced built and saved')\n return df_embeddings_reduced, df_vocab_reduced\n\n\ndef build_W_embeddings(df_embeddings_reduced):\n W = df_embeddings_reduced.as_matrix()\n return W\n\n\ndef build_statements_embeddings(df, df_vocab, tokenizer=tokenizer_nltk,\n max_statement_len=None):\n embedded_statements_dic = {}\n max_statement_len_ = -1\n for index, row in df.iterrows():\n embedded_statement = []\n tokenized_statement = tokenizer(row['statement'].lower().decode(\n 'utf-8'))\n for token in tokenized_statement:\n if token in df_vocab.index:\n embedded_statement.append(np.array(df_vocab.loc[token]))\n else:\n embedded_statement.append(np.array(df_vocab.loc['OOV']))\n embedded_statements_dic[index] = np.array(embedded_statement)\n if len(embedded_statement) > max_statement_len_:\n max_statement_len_ = len(embedded_statement)\n if max_statement_len is not None:\n max_statement_len_ = max_statement_len\n for key in embedded_statements_dic:\n embedded_statement = embedded_statements_dic[key]\n embedded_statement_len = np.shape(embedded_statement)[0]\n padded_embedded_statement = np.tile(np.array(df_vocab.loc['PAD']),\n (max_statement_len_, 1))\n if max_statement_len_ >= embedded_statement_len:\n padded_embedded_statement[:embedded_statement_len, :\n ] = embedded_statement\n else:\n padded_embedded_statement[:, :] = embedded_statement[:\n max_statement_len_, :]\n embedded_statements_dic[key] = padded_embedded_statement\n embedded_statements_matrix = np.squeeze(np.asarray(\n embedded_statements_dic.values()))\n return embedded_statements_matrix, max_statement_len_\n",
"step-3": "<mask token>\n\n\ndef tokenizer_nltk(input):\n return nltk.word_tokenize(input)\n\n\ndef statement_to_dict(statement):\n statement_features = defaultdict(float)\n for token in statement:\n statement_features[token] += 1.0\n return statement_features\n\n\ndef build_statements_features(df, vectorizer, train=True, tokenizer=\n tokenizer_nltk):\n filtered_statements_dic = {}\n for index, row in df.iterrows():\n filtered_statement = []\n tokenized_statement = tokenizer(row['statement'].lower().decode(\n 'utf-8'))\n for token in tokenized_statement:\n if token not in stop_words:\n filtered_statement.append(token)\n filtered_statements_dic[index] = filtered_statement\n filtered_statements = filtered_statements_dic.values()\n if train:\n statements_features = vectorizer.fit_transform([statement_to_dict(\n statement) for statement in filtered_statements])\n else:\n statements_features = vectorizer.transform([statement_to_dict(\n statement) for statement in filtered_statements])\n return statements_features\n\n\ndef extract_vocab(df, embeddings, tokenizer=tokenizer_nltk):\n path = '../saved_data/embeddings/'\n lowercase = True if embeddings in ['glove_100d_6b', 'glove_300d_6b',\n 'facebook'] else False\n if os.path.isfile(path + 'vocab.txt') and lowercase:\n df_vocab = pd.read_table(path + 'vocab.txt', sep=' ', header=None,\n index_col=0)\n print('full vocab already exists - vocab loaded, tokens found: {:.0f}'\n .format(len(df_vocab) - 2))\n elif os.path.isfile(path + 'vocab_upper.txt') and not lowercase:\n df_vocab = pd.read_table(path + 'vocab_upper.txt', sep=' ', header=\n None, index_col=0)\n print(\n 'full upper vocab already exists - upper vocab loaded, tokens found: {:.0f}'\n .format(len(df_vocab) - 2))\n else:\n df_vocab = pd.DataFrame(columns=['1'])\n df_vocab.loc['PAD'] = 0\n df_vocab.loc['OOV'] = 1\n if lowercase:\n combined_statements = ' '.join(df['statement']).lower().decode(\n 'utf-8')\n else:\n combined_statements = ' '.join(df['statement']).decode('utf-8')\n tokenized_combined_statements = tokenizer(combined_statements)\n for token in tokenized_combined_statements:\n token = token.encode('utf-8')\n if token not in df_vocab.index:\n df_vocab.loc[token] = len(df_vocab)\n df_vocab = df_vocab.astype(int)\n df_vocab.to_csv(path + 'vocab.txt', sep=' ', header=None, index_col=0)\n print('full vocab built and saved, tokens found: {:.0f}'.format(len\n (df_vocab) - 2))\n return df_vocab\n\n\ndef load_embeddings(df, df_vocab, embeddings):\n if embeddings == 'glove_300d_6b':\n path = '../saved_data/embeddings/glove_300d_6b/'\n if os.path.isfile(path + 'glove_300d_6b_reduced.txt'\n ) and os.path.isfile(path + 'vocab_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'glove_300d_6b_reduced.txt', sep=' ', header=None, index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'glove_300d_6b reduced embeddings and reduced vocab already exist - glove_300d_6b reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'glove_300d_6b.txt', sep=\n ' ', header=None, index_col=0)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full glove_300d_6b embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path + 'glove_300d_6b_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'glove_300d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',\n header=None, index_col=0)\n print('vocab reduced built and saved')\n if embeddings == 'glove_100d_6b':\n path = '../saved_data/embeddings/glove_100d_6b/'\n if os.path.isfile(path + 'glove_100d_6b_reduced.txt'\n ) and os.path.isfile(path + 'vocab_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'glove_100d_6b_reduced.txt', sep=' ', header=None, index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'glove_100d_6b reduced embeddings and reduced vocab already exist - glove_100d_6b reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'glove_100d_6b.txt', sep=\n ' ', header=None, index_col=0)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full glove_100d_6b embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path + 'glove_100d_6b_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'glove_100d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',\n header=None, index_col=0)\n print('vocab reduced built and saved')\n if embeddings == 'glove_300d_84b':\n path = '../saved_data/embeddings/glove_300d_84b/'\n if os.path.isfile(path + 'glove_300d_84b_reduced.txt'\n ) and os.path.isfile(path + 'vocab_upper_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0\n )\n df_vocab_reduced = pd.read_table(path +\n 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)\n print(\n 'glove_300d_84b reduced embeddings and reduced upper vocab already exist - glove_300d_84b reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'glove_300d_84b.txt', sep=\n ' ', header=None, index_col=0)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full glove_300d_84b embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path +\n 'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0\n )\n print(\n 'glove_300d_84b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=\n ' ', header=None, index_col=0)\n print('vocab upper reduced built and saved')\n if embeddings == 'google':\n path = '../saved_data/embeddings/google/'\n if os.path.isfile(path + 'google_word2vec_300d_reduced.txt'\n ) and os.path.isfile(path + 'vocab_upper_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'google_word2vec_300d_reduced.txt', sep=' ', header=None,\n index_col=0)\n df_vocab_reduced = pd.read_table(path +\n 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)\n print(\n 'google_word2vec_300d reduced embeddings and reduced upper vocab already exist - google_word2vec_300d reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'google_word2vec_300d.txt',\n sep=' ', header=None, index_col=0, nrows=1000000)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full google_word2vec_300d embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path +\n 'google_word2vec_300d_reduced.txt', sep=' ', header=None,\n index_col=0)\n print(\n 'google_word2vec_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=\n ' ', header=None, index_col=0)\n print('vocab upper reduced built and saved')\n if embeddings == 'facebook':\n path = '../saved_data/embeddings/facebook/'\n if os.path.isfile(path + 'facebook_fastText_300d_reduced.txt'\n ) and os.path.isfile(path + 'vocab_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'facebook_fastText_300d_reduced.txt', sep=' ', header=None,\n index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'facebook_fastText_300d reduced embeddings and reduced upper vocab already exist - facebook_fastText_300d reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'wiki.en.vec', sep=' ',\n skiprows=1, header=None, index_col=0, usecols=range(301))\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full facebook_fastText_300d embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path +\n 'facebook_fastText_300d_reduced.txt', sep=' ', header=None,\n index_col=0)\n print(\n 'facebook_fastText_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',\n header=None, index_col=0)\n print('vocab reduced built and saved')\n return df_embeddings_reduced, df_vocab_reduced\n\n\ndef build_W_embeddings(df_embeddings_reduced):\n W = df_embeddings_reduced.as_matrix()\n return W\n\n\ndef build_statements_embeddings(df, df_vocab, tokenizer=tokenizer_nltk,\n max_statement_len=None):\n embedded_statements_dic = {}\n max_statement_len_ = -1\n for index, row in df.iterrows():\n embedded_statement = []\n tokenized_statement = tokenizer(row['statement'].lower().decode(\n 'utf-8'))\n for token in tokenized_statement:\n if token in df_vocab.index:\n embedded_statement.append(np.array(df_vocab.loc[token]))\n else:\n embedded_statement.append(np.array(df_vocab.loc['OOV']))\n embedded_statements_dic[index] = np.array(embedded_statement)\n if len(embedded_statement) > max_statement_len_:\n max_statement_len_ = len(embedded_statement)\n if max_statement_len is not None:\n max_statement_len_ = max_statement_len\n for key in embedded_statements_dic:\n embedded_statement = embedded_statements_dic[key]\n embedded_statement_len = np.shape(embedded_statement)[0]\n padded_embedded_statement = np.tile(np.array(df_vocab.loc['PAD']),\n (max_statement_len_, 1))\n if max_statement_len_ >= embedded_statement_len:\n padded_embedded_statement[:embedded_statement_len, :\n ] = embedded_statement\n else:\n padded_embedded_statement[:, :] = embedded_statement[:\n max_statement_len_, :]\n embedded_statements_dic[key] = padded_embedded_statement\n embedded_statements_matrix = np.squeeze(np.asarray(\n embedded_statements_dic.values()))\n return embedded_statements_matrix, max_statement_len_\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport nltk\nfrom collections import defaultdict\nimport os.path\nstop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves',\n 'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his',\n 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself',\n 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who',\n 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are', 'was',\n 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do',\n 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or',\n 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with',\n 'about', 'against', 'between', 'into', 'through', 'during', 'before',\n 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out',\n 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once',\n 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both',\n 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor',\n 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't',\n 'can', 'will', 'just', 'don', 'should', 'now', '\\n', 'the', '!', '\"',\n '#', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/', ':',\n ';', '<', '=', '>', '?', '@', '[', '\\\\', ']', '^', '_', '`', '{', '|',\n '}', '~']\n\n\ndef tokenizer_nltk(input):\n return nltk.word_tokenize(input)\n\n\ndef statement_to_dict(statement):\n statement_features = defaultdict(float)\n for token in statement:\n statement_features[token] += 1.0\n return statement_features\n\n\ndef build_statements_features(df, vectorizer, train=True, tokenizer=\n tokenizer_nltk):\n filtered_statements_dic = {}\n for index, row in df.iterrows():\n filtered_statement = []\n tokenized_statement = tokenizer(row['statement'].lower().decode(\n 'utf-8'))\n for token in tokenized_statement:\n if token not in stop_words:\n filtered_statement.append(token)\n filtered_statements_dic[index] = filtered_statement\n filtered_statements = filtered_statements_dic.values()\n if train:\n statements_features = vectorizer.fit_transform([statement_to_dict(\n statement) for statement in filtered_statements])\n else:\n statements_features = vectorizer.transform([statement_to_dict(\n statement) for statement in filtered_statements])\n return statements_features\n\n\ndef extract_vocab(df, embeddings, tokenizer=tokenizer_nltk):\n path = '../saved_data/embeddings/'\n lowercase = True if embeddings in ['glove_100d_6b', 'glove_300d_6b',\n 'facebook'] else False\n if os.path.isfile(path + 'vocab.txt') and lowercase:\n df_vocab = pd.read_table(path + 'vocab.txt', sep=' ', header=None,\n index_col=0)\n print('full vocab already exists - vocab loaded, tokens found: {:.0f}'\n .format(len(df_vocab) - 2))\n elif os.path.isfile(path + 'vocab_upper.txt') and not lowercase:\n df_vocab = pd.read_table(path + 'vocab_upper.txt', sep=' ', header=\n None, index_col=0)\n print(\n 'full upper vocab already exists - upper vocab loaded, tokens found: {:.0f}'\n .format(len(df_vocab) - 2))\n else:\n df_vocab = pd.DataFrame(columns=['1'])\n df_vocab.loc['PAD'] = 0\n df_vocab.loc['OOV'] = 1\n if lowercase:\n combined_statements = ' '.join(df['statement']).lower().decode(\n 'utf-8')\n else:\n combined_statements = ' '.join(df['statement']).decode('utf-8')\n tokenized_combined_statements = tokenizer(combined_statements)\n for token in tokenized_combined_statements:\n token = token.encode('utf-8')\n if token not in df_vocab.index:\n df_vocab.loc[token] = len(df_vocab)\n df_vocab = df_vocab.astype(int)\n df_vocab.to_csv(path + 'vocab.txt', sep=' ', header=None, index_col=0)\n print('full vocab built and saved, tokens found: {:.0f}'.format(len\n (df_vocab) - 2))\n return df_vocab\n\n\ndef load_embeddings(df, df_vocab, embeddings):\n if embeddings == 'glove_300d_6b':\n path = '../saved_data/embeddings/glove_300d_6b/'\n if os.path.isfile(path + 'glove_300d_6b_reduced.txt'\n ) and os.path.isfile(path + 'vocab_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'glove_300d_6b_reduced.txt', sep=' ', header=None, index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'glove_300d_6b reduced embeddings and reduced vocab already exist - glove_300d_6b reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'glove_300d_6b.txt', sep=\n ' ', header=None, index_col=0)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full glove_300d_6b embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path + 'glove_300d_6b_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'glove_300d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',\n header=None, index_col=0)\n print('vocab reduced built and saved')\n if embeddings == 'glove_100d_6b':\n path = '../saved_data/embeddings/glove_100d_6b/'\n if os.path.isfile(path + 'glove_100d_6b_reduced.txt'\n ) and os.path.isfile(path + 'vocab_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'glove_100d_6b_reduced.txt', sep=' ', header=None, index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'glove_100d_6b reduced embeddings and reduced vocab already exist - glove_100d_6b reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'glove_100d_6b.txt', sep=\n ' ', header=None, index_col=0)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full glove_100d_6b embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path + 'glove_100d_6b_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'glove_100d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',\n header=None, index_col=0)\n print('vocab reduced built and saved')\n if embeddings == 'glove_300d_84b':\n path = '../saved_data/embeddings/glove_300d_84b/'\n if os.path.isfile(path + 'glove_300d_84b_reduced.txt'\n ) and os.path.isfile(path + 'vocab_upper_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0\n )\n df_vocab_reduced = pd.read_table(path +\n 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)\n print(\n 'glove_300d_84b reduced embeddings and reduced upper vocab already exist - glove_300d_84b reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'glove_300d_84b.txt', sep=\n ' ', header=None, index_col=0)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full glove_300d_84b embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path +\n 'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0\n )\n print(\n 'glove_300d_84b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=\n ' ', header=None, index_col=0)\n print('vocab upper reduced built and saved')\n if embeddings == 'google':\n path = '../saved_data/embeddings/google/'\n if os.path.isfile(path + 'google_word2vec_300d_reduced.txt'\n ) and os.path.isfile(path + 'vocab_upper_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'google_word2vec_300d_reduced.txt', sep=' ', header=None,\n index_col=0)\n df_vocab_reduced = pd.read_table(path +\n 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)\n print(\n 'google_word2vec_300d reduced embeddings and reduced upper vocab already exist - google_word2vec_300d reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'google_word2vec_300d.txt',\n sep=' ', header=None, index_col=0, nrows=1000000)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full google_word2vec_300d embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path +\n 'google_word2vec_300d_reduced.txt', sep=' ', header=None,\n index_col=0)\n print(\n 'google_word2vec_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=\n ' ', header=None, index_col=0)\n print('vocab upper reduced built and saved')\n if embeddings == 'facebook':\n path = '../saved_data/embeddings/facebook/'\n if os.path.isfile(path + 'facebook_fastText_300d_reduced.txt'\n ) and os.path.isfile(path + 'vocab_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path +\n 'facebook_fastText_300d_reduced.txt', sep=' ', header=None,\n index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt',\n sep=' ', header=None, index_col=0)\n print(\n 'facebook_fastText_300d reduced embeddings and reduced upper vocab already exist - facebook_fastText_300d reduced embeddings loaded'\n )\n else:\n df_embeddings = pd.read_table(path + 'wiki.en.vec', sep=' ',\n skiprows=1, header=None, index_col=0, usecols=range(301))\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full facebook_fastText_300d embeddings loaded')\n for token in df_vocab.index:\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(\n df_embeddings.loc[token], ignore_index=False)\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:\n ].mean(axis=0)\n df_embeddings_reduced.to_csv(path +\n 'facebook_fastText_300d_reduced.txt', sep=' ', header=None,\n index_col=0)\n print(\n 'facebook_fastText_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'\n .format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ',\n header=None, index_col=0)\n print('vocab reduced built and saved')\n return df_embeddings_reduced, df_vocab_reduced\n\n\ndef build_W_embeddings(df_embeddings_reduced):\n W = df_embeddings_reduced.as_matrix()\n return W\n\n\ndef build_statements_embeddings(df, df_vocab, tokenizer=tokenizer_nltk,\n max_statement_len=None):\n embedded_statements_dic = {}\n max_statement_len_ = -1\n for index, row in df.iterrows():\n embedded_statement = []\n tokenized_statement = tokenizer(row['statement'].lower().decode(\n 'utf-8'))\n for token in tokenized_statement:\n if token in df_vocab.index:\n embedded_statement.append(np.array(df_vocab.loc[token]))\n else:\n embedded_statement.append(np.array(df_vocab.loc['OOV']))\n embedded_statements_dic[index] = np.array(embedded_statement)\n if len(embedded_statement) > max_statement_len_:\n max_statement_len_ = len(embedded_statement)\n if max_statement_len is not None:\n max_statement_len_ = max_statement_len\n for key in embedded_statements_dic:\n embedded_statement = embedded_statements_dic[key]\n embedded_statement_len = np.shape(embedded_statement)[0]\n padded_embedded_statement = np.tile(np.array(df_vocab.loc['PAD']),\n (max_statement_len_, 1))\n if max_statement_len_ >= embedded_statement_len:\n padded_embedded_statement[:embedded_statement_len, :\n ] = embedded_statement\n else:\n padded_embedded_statement[:, :] = embedded_statement[:\n max_statement_len_, :]\n embedded_statements_dic[key] = padded_embedded_statement\n embedded_statements_matrix = np.squeeze(np.asarray(\n embedded_statements_dic.values()))\n return embedded_statements_matrix, max_statement_len_\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport nltk\nfrom collections import defaultdict\nimport os.path\n\n\nstop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours',\n 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers',\n 'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',\n 'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are',\n 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does',\n 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until',\n 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into',\n 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',\n 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here',\n 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',\n 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so',\n 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now', '\\n', 'the',\n '!', '\"', '#', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=',\n '>', '?', '@', '[', '\\\\', ']', '^', '_', '`', '{', '|', '}', '~']\n# stop words source: https://github.com/uclmr/stat-nlp-book/blob/python/chapters/doc_classify.ipynb\n\n\ndef tokenizer_nltk(input):\n return nltk.word_tokenize(input)\n\n\n# sklearn models\ndef statement_to_dict(statement):\n statement_features = defaultdict(float)\n for token in statement:\n statement_features[token] += 1.0\n return statement_features\n\n\ndef build_statements_features(df, vectorizer, train=True, tokenizer=tokenizer_nltk):\n filtered_statements_dic = {}\n\n for index, row in df.iterrows():\n filtered_statement = []\n tokenized_statement = tokenizer(row['statement'].lower().decode('utf-8'))\n for token in tokenized_statement:\n if token not in stop_words:\n filtered_statement.append(token)\n\n filtered_statements_dic[index] = filtered_statement\n\n filtered_statements = filtered_statements_dic.values()\n\n if train:\n statements_features = vectorizer.fit_transform([statement_to_dict(statement) for statement in filtered_statements])\n else:\n statements_features = vectorizer.transform([statement_to_dict(statement) for statement in filtered_statements])\n\n return statements_features\n\n\n# tensorflow models\ndef extract_vocab(df, embeddings, tokenizer=tokenizer_nltk):\n path = '../saved_data/embeddings/'\n\n lowercase = True if embeddings in ['glove_100d_6b', 'glove_300d_6b', 'facebook'] else False\n\n if os.path.isfile(path + 'vocab.txt') and lowercase:\n df_vocab = pd.read_table(path + 'vocab.txt', sep=' ', header=None, index_col=0)\n print('full vocab already exists - vocab loaded, tokens found: {:.0f}'.format(len(df_vocab) - 2))\n\n elif os.path.isfile(path + 'vocab_upper.txt') and not lowercase:\n df_vocab = pd.read_table(path + 'vocab_upper.txt', sep=' ', header=None, index_col=0)\n print('full upper vocab already exists - upper vocab loaded, tokens found: {:.0f}'.format(len(df_vocab) - 2))\n\n else:\n df_vocab = pd.DataFrame(columns=['1'])\n # add OOV and PAD tokens\n df_vocab.loc['PAD'] = 0\n df_vocab.loc['OOV'] = 1\n\n if lowercase:\n combined_statements = ' '.join(df['statement']).lower().decode('utf-8')\n else:\n combined_statements = ' '.join(df['statement']).decode('utf-8')\n\n tokenized_combined_statements = tokenizer(combined_statements)\n for token in tokenized_combined_statements:\n token = token.encode('utf-8')\n if token not in df_vocab.index:\n df_vocab.loc[token] = len(df_vocab)\n df_vocab = df_vocab.astype(int)\n df_vocab.to_csv(path + 'vocab.txt', sep=' ', header=None, index_col=0)\n print('full vocab built and saved, tokens found: {:.0f}'.format(len(df_vocab) - 2))\n\n return df_vocab\n\n\ndef load_embeddings(df, df_vocab, embeddings):\n # glove_300d_6b\n if embeddings == 'glove_300d_6b':\n path = '../saved_data/embeddings/glove_300d_6b/'\n\n # check if embeddings reduced exist already and if so return them\n if os.path.isfile(path + 'glove_300d_6b_reduced.txt') and os.path.isfile(path + 'vocab_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path + 'glove_300d_6b_reduced.txt', sep=' ', header=None, index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)\n print('glove_300d_6b reduced embeddings and reduced vocab already exist - glove_300d_6b reduced embeddings loaded')\n # build the vocab from the combined statements and reduce the embeddings\n else:\n df_embeddings = pd.read_table(path + 'glove_300d_6b.txt', sep=' ', header=None, index_col=0)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full glove_300d_6b embeddings loaded')\n\n for token in df_vocab.index:\n # token = token.encode('utf-8')\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)\n # else:\n # df_vocab.drop(df_vocab.loc[token])\n\n # change OOV token\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)\n\n df_embeddings_reduced.to_csv(path + 'glove_300d_6b_reduced.txt', sep=' ', header=None, index_col=0)\n print('glove_300d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)\n print('vocab reduced built and saved')\n\n # glove_100d_6b\n if embeddings == 'glove_100d_6b':\n path = '../saved_data/embeddings/glove_100d_6b/'\n\n # check if embeddings reduced exist already and if so return them\n if os.path.isfile(path + 'glove_100d_6b_reduced.txt') and os.path.isfile(path + 'vocab_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path + 'glove_100d_6b_reduced.txt', sep=' ', header=None, index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)\n print('glove_100d_6b reduced embeddings and reduced vocab already exist - glove_100d_6b reduced embeddings loaded')\n # build the vocab from the combined statements and reduce the embeddings\n else:\n df_embeddings = pd.read_table(path + 'glove_100d_6b.txt', sep=' ', header=None, index_col=0)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full glove_100d_6b embeddings loaded')\n\n for token in df_vocab.index:\n # token = token.encode('utf-8')\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)\n # else:\n # df_vocab.drop(df_vocab.loc[token])\n\n # change OOV token\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)\n\n df_embeddings_reduced.to_csv(path + 'glove_100d_6b_reduced.txt', sep=' ', header=None, index_col=0)\n print('glove_100d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)\n print('vocab reduced built and saved')\n\n # glove_300d_6b\n if embeddings == 'glove_300d_84b':\n path = '../saved_data/embeddings/glove_300d_84b/'\n\n # check if embeddings reduced exist already and if so return them\n if os.path.isfile(path + 'glove_300d_84b_reduced.txt') and os.path.isfile(path + 'vocab_upper_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path + 'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)\n print('glove_300d_84b reduced embeddings and reduced upper vocab already exist - glove_300d_84b reduced embeddings loaded')\n # build the vocab from the combined statements and reduce the embeddings\n else:\n df_embeddings = pd.read_table(path + 'glove_300d_84b.txt', sep=' ', header=None, index_col=0)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full glove_300d_84b embeddings loaded')\n\n for token in df_vocab.index:\n # token = token.encode('utf-8')\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)\n # else:\n # df_vocab.drop(df_vocab.loc[token])\n\n # change OOV token\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)\n\n df_embeddings_reduced.to_csv(path + 'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0)\n print('glove_300d_84b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)\n print('vocab upper reduced built and saved')\n\n # google\n if embeddings == 'google':\n path = '../saved_data/embeddings/google/'\n\n # check if embeddings reduced exist already and if so return them\n if os.path.isfile(path + 'google_word2vec_300d_reduced.txt') and os.path.isfile(path + 'vocab_upper_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path + 'google_word2vec_300d_reduced.txt', sep=' ', header=None, index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)\n print('google_word2vec_300d reduced embeddings and reduced upper vocab already exist - google_word2vec_300d reduced embeddings loaded')\n # build the vocab from the combined statements and reduce the embeddings\n else:\n df_embeddings = pd.read_table(path + 'google_word2vec_300d.txt', sep=' ', header=None, index_col=0, nrows=1000000)\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full google_word2vec_300d embeddings loaded')\n\n for token in df_vocab.index:\n # token = token.encode('utf-8')\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)\n # else:\n # df_vocab.drop(df_vocab.loc[token])\n\n # change OOV token\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)\n\n df_embeddings_reduced.to_csv(path + 'google_word2vec_300d_reduced.txt', sep=' ', header=None, index_col=0)\n print('google_word2vec_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)\n print('vocab upper reduced built and saved')\n\n # facebook\n if embeddings == 'facebook':\n path = '../saved_data/embeddings/facebook/'\n\n # check if embeddings reduced exist already and if so return them\n if os.path.isfile(path + 'facebook_fastText_300d_reduced.txt') and os.path.isfile(path + 'vocab_reduced.txt'):\n df_embeddings_reduced = pd.read_table(path + 'facebook_fastText_300d_reduced.txt', sep=' ', header=None, index_col=0)\n df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)\n print('facebook_fastText_300d reduced embeddings and reduced upper vocab already exist - facebook_fastText_300d reduced embeddings loaded')\n # build the vocab from the combined statements and reduce the embeddings\n else:\n df_embeddings = pd.read_table(path + 'wiki.en.vec', sep=' ', skiprows=1, header=None, index_col=0, usecols=range(301))\n df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)\n df_embeddings_reduced.loc['PAD'] = 0\n df_embeddings_reduced.loc['OOV'] = 1\n print('full facebook_fastText_300d embeddings loaded')\n\n for token in df_vocab.index:\n # token = token.encode('utf-8')\n if token in df_embeddings.index:\n df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)\n # else:\n # df_vocab.drop(df_vocab.loc[token])\n\n # change OOV token\n df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)\n\n df_embeddings_reduced.to_csv(path + 'facebook_fastText_300d_reduced.txt', sep=' ', header=None, index_col=0)\n print('facebook_fastText_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))\n\n df_vocab_reduced = pd.DataFrame(columns=['1'])\n for i, token in enumerate(df_embeddings_reduced.index):\n df_vocab_reduced.loc[token] = i\n df_vocab_reduced = df_vocab_reduced.astype(int)\n df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)\n print('vocab reduced built and saved')\n\n return df_embeddings_reduced, df_vocab_reduced\n\n\ndef build_W_embeddings(df_embeddings_reduced):\n W = df_embeddings_reduced.as_matrix()\n return W\n\n\ndef build_statements_embeddings(df, df_vocab, tokenizer=tokenizer_nltk, max_statement_len=None):\n embedded_statements_dic = {}\n max_statement_len_ = -1\n\n for index, row in df.iterrows():\n embedded_statement = []\n tokenized_statement = tokenizer(row['statement'].lower().decode('utf-8'))\n for token in tokenized_statement:\n if token in df_vocab.index:\n embedded_statement.append(np.array(df_vocab.loc[token]))\n else:\n embedded_statement.append(np.array(df_vocab.loc['OOV']))\n\n embedded_statements_dic[index] = np.array(embedded_statement)\n if len(embedded_statement) > max_statement_len_:\n max_statement_len_ = len(embedded_statement)\n\n if max_statement_len is not None:\n max_statement_len_ = max_statement_len\n\n for key in embedded_statements_dic:\n embedded_statement = embedded_statements_dic[key]\n embedded_statement_len = np.shape(embedded_statement)[0]\n padded_embedded_statement = np.tile(np.array(df_vocab.loc['PAD']), (max_statement_len_, 1))\n\n if max_statement_len_ >= embedded_statement_len:\n padded_embedded_statement[:embedded_statement_len, :] = embedded_statement\n else:\n padded_embedded_statement[:, :] = embedded_statement[:max_statement_len_, :]\n embedded_statements_dic[key] = padded_embedded_statement\n\n embedded_statements_matrix = np.squeeze(np.asarray(embedded_statements_dic.values()))\n\n return embedded_statements_matrix, max_statement_len_\n",
"step-ids": [
3,
5,
7,
9,
10
]
}
|
[
3,
5,
7,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('blog', '0033_auto_20171016_1334')]
operations = [migrations.AlterField(model_name='sponsor', name=
'email_text_markdown', field=models.CharField(default='',
max_length=1000))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('blog', '0033_auto_20171016_1334')]
operations = [migrations.AlterField(model_name='sponsor', name=
'email_text_markdown', field=models.CharField(default='',
max_length=1000))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-16 12:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0033_auto_20171016_1334'),
]
operations = [
migrations.AlterField(
model_name='sponsor',
name='email_text_markdown',
field=models.CharField(default='', max_length=1000),
),
]
|
flexible
|
{
"blob_id": "d0dfea27128ca6966c85da6529ead5c95c86c4cf",
"index": 1183,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0033_auto_20171016_1334')]\n operations = [migrations.AlterField(model_name='sponsor', name=\n 'email_text_markdown', field=models.CharField(default='',\n max_length=1000))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0033_auto_20171016_1334')]\n operations = [migrations.AlterField(model_name='sponsor', name=\n 'email_text_markdown', field=models.CharField(default='',\n max_length=1000))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-10-16 12:35\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0033_auto_20171016_1334'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='sponsor',\n name='email_text_markdown',\n field=models.CharField(default='', max_length=1000),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
import pandas
from matplotlib import pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error
from math import sqrt
def main():
df = pandas.read_csv("2016Q1")
df = df.append(pandas.read_csv("2016Q2"))
df = df.append(pandas.read_csv("2016Q3"))
df = df.append(pandas.read_csv("2016Q4"))
test = pandas.read_csv("2017Q1")
test = test.append(pandas.read_csv("2017Q2"))
test = test.append(pandas.read_csv("2017Q3"))
test = test.append(pandas.read_csv("2017Q4"))
#make_scatter(df)
train_predict_1d(df, test)
#train_predict_2d(df, test)
return
def make_scatter(df):
plt.figure(figsize=(8,6))
plt.plot(df['Start station number'], df['Counts'], 'o')
plt.xlabel('Station')
plt.ylabel('Counts')
plt.show()
return
def train_predict_1d(df, test):
regressor = DecisionTreeRegressor(max_depth=2)
regressor.fit(np.array([df['Start station number']]).T, df['Counts'])
xx = np.array([test['Start station number']]).T
plt.figure(figsize=(8,6))
plt.plot(df['Start station number'], df['Counts'], 'o', label='observation')
plt.plot(xx, regressor.predict(xx), linewidth=4, alpha=.7, label='prediction')
plt.xlabel('Station')
plt.ylabel('Counts')
plt.legend()
#plt.show()
print("RMSE")
print(sqrt(mean_squared_error(test['Counts'], regressor.predict(xx))))
return
def train_predict_2d(df, test):
#regressor = AdaBoostRegressor(DecisionTreeRegressor(max_depth=10), n_estimators=50, loss="square")
regressor = DecisionTreeRegressor()
regressor.fit(df[['Start station number', 'Quarter']], df['Counts'])
nx = 30
ny = 30
x_station = np.linspace(30800,32300, nx)
y_day = np.linspace(0, 3, ny)
xx, yy = np.meshgrid(x_station, y_day)
z_counts = regressor.predict(np.array([xx.flatten(), yy.flatten()]).T)
zz = np.reshape(z_counts, (nx, ny))
fig = plt.figure(figsize=(8, 8))
plt.pcolormesh(x_station, y_day, zz, cmap=plt.cm.YlOrRd)
plt.colorbar(label='bikes predicted')
#plt.scatter(test['Start station number'], test['Counts'], s=test['Counts']/25.0, c='g')
plt.xlim(np.min(x_station), np.max(x_station))
plt.ylim(np.min(y_day), np.max(y_day))
plt.xlabel('Start station number')
plt.ylabel('Quarter')
#plt.show()
#fig.savefig("2d_prediction_quarter")
print("Mean Absolute Error")
print(mean_absolute_error(test['Counts'], regressor.predict(test[['Start station number', 'Quarter']])))
print("RMSE")
print(sqrt(mean_squared_error(test['Counts'], regressor.predict(test[['Start station number', 'Quarter']]))))
return
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "e35dbcdef8779ffabc34b5e5c543e35b29523971",
"index": 7989,
"step-1": "<mask token>\n\n\ndef make_scatter(df):\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.show()\n return\n\n\ndef train_predict_1d(df, test):\n regressor = DecisionTreeRegressor(max_depth=2)\n regressor.fit(np.array([df['Start station number']]).T, df['Counts'])\n xx = np.array([test['Start station number']]).T\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o', label='observation'\n )\n plt.plot(xx, regressor.predict(xx), linewidth=4, alpha=0.7, label=\n 'prediction')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.legend()\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(xx))))\n return\n\n\ndef train_predict_2d(df, test):\n regressor = DecisionTreeRegressor()\n regressor.fit(df[['Start station number', 'Quarter']], df['Counts'])\n nx = 30\n ny = 30\n x_station = np.linspace(30800, 32300, nx)\n y_day = np.linspace(0, 3, ny)\n xx, yy = np.meshgrid(x_station, y_day)\n z_counts = regressor.predict(np.array([xx.flatten(), yy.flatten()]).T)\n zz = np.reshape(z_counts, (nx, ny))\n fig = plt.figure(figsize=(8, 8))\n plt.pcolormesh(x_station, y_day, zz, cmap=plt.cm.YlOrRd)\n plt.colorbar(label='bikes predicted')\n plt.xlim(np.min(x_station), np.max(x_station))\n plt.ylim(np.min(y_day), np.max(y_day))\n plt.xlabel('Start station number')\n plt.ylabel('Quarter')\n print('Mean Absolute Error')\n print(mean_absolute_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']])))\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']]))))\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n df = pandas.read_csv('2016Q1')\n df = df.append(pandas.read_csv('2016Q2'))\n df = df.append(pandas.read_csv('2016Q3'))\n df = df.append(pandas.read_csv('2016Q4'))\n test = pandas.read_csv('2017Q1')\n test = test.append(pandas.read_csv('2017Q2'))\n test = test.append(pandas.read_csv('2017Q3'))\n test = test.append(pandas.read_csv('2017Q4'))\n train_predict_1d(df, test)\n return\n\n\ndef make_scatter(df):\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.show()\n return\n\n\ndef train_predict_1d(df, test):\n regressor = DecisionTreeRegressor(max_depth=2)\n regressor.fit(np.array([df['Start station number']]).T, df['Counts'])\n xx = np.array([test['Start station number']]).T\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o', label='observation'\n )\n plt.plot(xx, regressor.predict(xx), linewidth=4, alpha=0.7, label=\n 'prediction')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.legend()\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(xx))))\n return\n\n\ndef train_predict_2d(df, test):\n regressor = DecisionTreeRegressor()\n regressor.fit(df[['Start station number', 'Quarter']], df['Counts'])\n nx = 30\n ny = 30\n x_station = np.linspace(30800, 32300, nx)\n y_day = np.linspace(0, 3, ny)\n xx, yy = np.meshgrid(x_station, y_day)\n z_counts = regressor.predict(np.array([xx.flatten(), yy.flatten()]).T)\n zz = np.reshape(z_counts, (nx, ny))\n fig = plt.figure(figsize=(8, 8))\n plt.pcolormesh(x_station, y_day, zz, cmap=plt.cm.YlOrRd)\n plt.colorbar(label='bikes predicted')\n plt.xlim(np.min(x_station), np.max(x_station))\n plt.ylim(np.min(y_day), np.max(y_day))\n plt.xlabel('Start station number')\n plt.ylabel('Quarter')\n print('Mean Absolute Error')\n print(mean_absolute_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']])))\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']]))))\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n df = pandas.read_csv('2016Q1')\n df = df.append(pandas.read_csv('2016Q2'))\n df = df.append(pandas.read_csv('2016Q3'))\n df = df.append(pandas.read_csv('2016Q4'))\n test = pandas.read_csv('2017Q1')\n test = test.append(pandas.read_csv('2017Q2'))\n test = test.append(pandas.read_csv('2017Q3'))\n test = test.append(pandas.read_csv('2017Q4'))\n train_predict_1d(df, test)\n return\n\n\ndef make_scatter(df):\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.show()\n return\n\n\ndef train_predict_1d(df, test):\n regressor = DecisionTreeRegressor(max_depth=2)\n regressor.fit(np.array([df['Start station number']]).T, df['Counts'])\n xx = np.array([test['Start station number']]).T\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o', label='observation'\n )\n plt.plot(xx, regressor.predict(xx), linewidth=4, alpha=0.7, label=\n 'prediction')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.legend()\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(xx))))\n return\n\n\ndef train_predict_2d(df, test):\n regressor = DecisionTreeRegressor()\n regressor.fit(df[['Start station number', 'Quarter']], df['Counts'])\n nx = 30\n ny = 30\n x_station = np.linspace(30800, 32300, nx)\n y_day = np.linspace(0, 3, ny)\n xx, yy = np.meshgrid(x_station, y_day)\n z_counts = regressor.predict(np.array([xx.flatten(), yy.flatten()]).T)\n zz = np.reshape(z_counts, (nx, ny))\n fig = plt.figure(figsize=(8, 8))\n plt.pcolormesh(x_station, y_day, zz, cmap=plt.cm.YlOrRd)\n plt.colorbar(label='bikes predicted')\n plt.xlim(np.min(x_station), np.max(x_station))\n plt.ylim(np.min(y_day), np.max(y_day))\n plt.xlabel('Start station number')\n plt.ylabel('Quarter')\n print('Mean Absolute Error')\n print(mean_absolute_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']])))\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']]))))\n return\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import pandas\nfrom matplotlib import pyplot as plt\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import AdaBoostRegressor\nimport numpy as np\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nfrom math import sqrt\n\n\ndef main():\n df = pandas.read_csv('2016Q1')\n df = df.append(pandas.read_csv('2016Q2'))\n df = df.append(pandas.read_csv('2016Q3'))\n df = df.append(pandas.read_csv('2016Q4'))\n test = pandas.read_csv('2017Q1')\n test = test.append(pandas.read_csv('2017Q2'))\n test = test.append(pandas.read_csv('2017Q3'))\n test = test.append(pandas.read_csv('2017Q4'))\n train_predict_1d(df, test)\n return\n\n\ndef make_scatter(df):\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.show()\n return\n\n\ndef train_predict_1d(df, test):\n regressor = DecisionTreeRegressor(max_depth=2)\n regressor.fit(np.array([df['Start station number']]).T, df['Counts'])\n xx = np.array([test['Start station number']]).T\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o', label='observation'\n )\n plt.plot(xx, regressor.predict(xx), linewidth=4, alpha=0.7, label=\n 'prediction')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.legend()\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(xx))))\n return\n\n\ndef train_predict_2d(df, test):\n regressor = DecisionTreeRegressor()\n regressor.fit(df[['Start station number', 'Quarter']], df['Counts'])\n nx = 30\n ny = 30\n x_station = np.linspace(30800, 32300, nx)\n y_day = np.linspace(0, 3, ny)\n xx, yy = np.meshgrid(x_station, y_day)\n z_counts = regressor.predict(np.array([xx.flatten(), yy.flatten()]).T)\n zz = np.reshape(z_counts, (nx, ny))\n fig = plt.figure(figsize=(8, 8))\n plt.pcolormesh(x_station, y_day, zz, cmap=plt.cm.YlOrRd)\n plt.colorbar(label='bikes predicted')\n plt.xlim(np.min(x_station), np.max(x_station))\n plt.ylim(np.min(y_day), np.max(y_day))\n plt.xlabel('Start station number')\n plt.ylabel('Quarter')\n print('Mean Absolute Error')\n print(mean_absolute_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']])))\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']]))))\n return\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n\nimport pandas\nfrom matplotlib import pyplot as plt\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import AdaBoostRegressor\nimport numpy as np\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nfrom math import sqrt\n\ndef main():\n df = pandas.read_csv(\"2016Q1\")\n df = df.append(pandas.read_csv(\"2016Q2\"))\n df = df.append(pandas.read_csv(\"2016Q3\"))\n df = df.append(pandas.read_csv(\"2016Q4\"))\n\n test = pandas.read_csv(\"2017Q1\")\n test = test.append(pandas.read_csv(\"2017Q2\"))\n test = test.append(pandas.read_csv(\"2017Q3\"))\n test = test.append(pandas.read_csv(\"2017Q4\"))\n #make_scatter(df)\n train_predict_1d(df, test)\n #train_predict_2d(df, test)\n return\n\ndef make_scatter(df):\n plt.figure(figsize=(8,6))\n plt.plot(df['Start station number'], df['Counts'], 'o')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.show()\n return\n\ndef train_predict_1d(df, test):\n regressor = DecisionTreeRegressor(max_depth=2)\n regressor.fit(np.array([df['Start station number']]).T, df['Counts'])\n \n xx = np.array([test['Start station number']]).T\n plt.figure(figsize=(8,6))\n plt.plot(df['Start station number'], df['Counts'], 'o', label='observation')\n plt.plot(xx, regressor.predict(xx), linewidth=4, alpha=.7, label='prediction')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.legend()\n #plt.show()\n\n print(\"RMSE\")\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(xx))))\n return\n\ndef train_predict_2d(df, test):\n #regressor = AdaBoostRegressor(DecisionTreeRegressor(max_depth=10), n_estimators=50, loss=\"square\")\n regressor = DecisionTreeRegressor()\n regressor.fit(df[['Start station number', 'Quarter']], df['Counts'])\n\n nx = 30\n ny = 30\n \n x_station = np.linspace(30800,32300, nx) \n y_day = np.linspace(0, 3, ny)\n xx, yy = np.meshgrid(x_station, y_day)\n\n z_counts = regressor.predict(np.array([xx.flatten(), yy.flatten()]).T)\n zz = np.reshape(z_counts, (nx, ny))\n\n fig = plt.figure(figsize=(8, 8))\n plt.pcolormesh(x_station, y_day, zz, cmap=plt.cm.YlOrRd)\n plt.colorbar(label='bikes predicted') \n #plt.scatter(test['Start station number'], test['Counts'], s=test['Counts']/25.0, c='g')\n plt.xlim(np.min(x_station), np.max(x_station))\n plt.ylim(np.min(y_day), np.max(y_day))\n plt.xlabel('Start station number')\n plt.ylabel('Quarter')\n #plt.show()\n #fig.savefig(\"2d_prediction_quarter\")\n\n print(\"Mean Absolute Error\")\n print(mean_absolute_error(test['Counts'], regressor.predict(test[['Start station number', 'Quarter']])))\n print(\"RMSE\")\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(test[['Start station number', 'Quarter']]))))\n\n return\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
class User():
def __init__(self, first, last, gender, age):
self.first_name = first
self.last_name = last
self.gender = gender
self.age = age
self.full_name = self.first_name + " " + self.last_name
def describe_user(self):
print("The name of the user is " + self.full_name + ".")
print("The user's gender is " + self.gender + ".")
print("The user is " + str(self.age) + " years old.")
def greet_user(self):
if self.gender.lower() == "male":
print("Greetings, Mr. " + self.last_name.title() + "!")
elif self.gender.lower() == "female":
print("Greetings, Miss " + self.last_name.title() + "!")
user1 = User("zhichao", "li", "male", 27)
user2 = User("juan", "zhang", "female", 28)
user3 = User("Tian", "ZHANG", "male", 26)
user1.describe_user()
user1.greet_user()
user2.describe_user()
user2.greet_user()
user3.describe_user()
user3.greet_user()
|
normal
|
{
"blob_id": "93b712c60ba4bfa81d967ec59035b6fb7793ce87",
"index": 1974,
"step-1": "class User:\n <mask token>\n <mask token>\n\n def greet_user(self):\n if self.gender.lower() == 'male':\n print('Greetings, Mr. ' + self.last_name.title() + '!')\n elif self.gender.lower() == 'female':\n print('Greetings, Miss ' + self.last_name.title() + '!')\n\n\n<mask token>\n",
"step-2": "class User:\n <mask token>\n\n def describe_user(self):\n print('The name of the user is ' + self.full_name + '.')\n print(\"The user's gender is \" + self.gender + '.')\n print('The user is ' + str(self.age) + ' years old.')\n\n def greet_user(self):\n if self.gender.lower() == 'male':\n print('Greetings, Mr. ' + self.last_name.title() + '!')\n elif self.gender.lower() == 'female':\n print('Greetings, Miss ' + self.last_name.title() + '!')\n\n\n<mask token>\n",
"step-3": "class User:\n\n def __init__(self, first, last, gender, age):\n self.first_name = first\n self.last_name = last\n self.gender = gender\n self.age = age\n self.full_name = self.first_name + ' ' + self.last_name\n\n def describe_user(self):\n print('The name of the user is ' + self.full_name + '.')\n print(\"The user's gender is \" + self.gender + '.')\n print('The user is ' + str(self.age) + ' years old.')\n\n def greet_user(self):\n if self.gender.lower() == 'male':\n print('Greetings, Mr. ' + self.last_name.title() + '!')\n elif self.gender.lower() == 'female':\n print('Greetings, Miss ' + self.last_name.title() + '!')\n\n\n<mask token>\n",
"step-4": "class User:\n\n def __init__(self, first, last, gender, age):\n self.first_name = first\n self.last_name = last\n self.gender = gender\n self.age = age\n self.full_name = self.first_name + ' ' + self.last_name\n\n def describe_user(self):\n print('The name of the user is ' + self.full_name + '.')\n print(\"The user's gender is \" + self.gender + '.')\n print('The user is ' + str(self.age) + ' years old.')\n\n def greet_user(self):\n if self.gender.lower() == 'male':\n print('Greetings, Mr. ' + self.last_name.title() + '!')\n elif self.gender.lower() == 'female':\n print('Greetings, Miss ' + self.last_name.title() + '!')\n\n\nuser1 = User('zhichao', 'li', 'male', 27)\nuser2 = User('juan', 'zhang', 'female', 28)\nuser3 = User('Tian', 'ZHANG', 'male', 26)\nuser1.describe_user()\nuser1.greet_user()\nuser2.describe_user()\nuser2.greet_user()\nuser3.describe_user()\nuser3.greet_user()\n",
"step-5": "class User():\n def __init__(self, first, last, gender, age):\n self.first_name = first\n self.last_name = last\n self.gender = gender\n self.age = age\n self.full_name = self.first_name + \" \" + self.last_name\n\n def describe_user(self):\n print(\"The name of the user is \" + self.full_name + \".\")\n print(\"The user's gender is \" + self.gender + \".\")\n print(\"The user is \" + str(self.age) + \" years old.\")\n\n def greet_user(self):\n if self.gender.lower() == \"male\":\n print(\"Greetings, Mr. \" + self.last_name.title() + \"!\")\n elif self.gender.lower() == \"female\":\n print(\"Greetings, Miss \" + self.last_name.title() + \"!\")\n\n\nuser1 = User(\"zhichao\", \"li\", \"male\", 27)\nuser2 = User(\"juan\", \"zhang\", \"female\", 28)\nuser3 = User(\"Tian\", \"ZHANG\", \"male\", 26)\n\nuser1.describe_user()\nuser1.greet_user()\nuser2.describe_user()\nuser2.greet_user()\nuser3.describe_user()\nuser3.greet_user()\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
# add DenseNet structure
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# self.x = x
self.block0 = nn.Sequential(
# input image 96x96
nn.ReLU(),
nn.Conv2d(3, 64, (5, 5), (1, 1), (2, 2)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(64),
)
self.block1 = nn.Sequential(
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(64),
)
self.block2 = nn.Sequential(
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(64),
)
self.block3 = nn.Sequential(
nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(32),
nn.Conv2d(32, 4, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(4),
)
self.side0_3 = nn.Sequential(
nn.Conv2d(64, 4, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(4),
)
self.side1_3 = nn.Sequential(
nn.Conv2d(64, 4, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(4),
)
self.side2_3 = nn.Sequential(
nn.Conv2d(64, 4, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(4),
)
self.fc = nn.Sequential(
nn.Conv2d(4, 1, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(1),
nn.Sigmoid()
)
def forward(self, x):
x=x.float()
out = self.block0(x) # 64x96x96
res0_1 = out
res0_2 = out
res0_3 = self.side0_3(out)
out = self.block1(out) # 64x96x96
res1_2 = out
res1_3 = self.side1_3(out)
out = out + res0_1
out = self.block2(out) # 64x96x96
res2_3 = self.side2_3(out)
out = out + res0_2 + res1_2
out = self.block3(out) # 4x96x96
out = out + res0_3 + res1_3 + res2_3
out = self.fc(out)
return out
def _initialize_weights(self):
pass
|
normal
|
{
"blob_id": "49cdeb59e75ed93122b3a62fbdc508b7d66166d6",
"index": 2337,
"step-1": "<mask token>\n\n\nclass Net(nn.Module):\n <mask token>\n <mask token>\n\n def _initialize_weights(self):\n pass\n",
"step-2": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.block0 = nn.Sequential(nn.ReLU(), nn.Conv2d(3, 64, (5, 5), (1,\n 1), (2, 2)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block1 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block2 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block3 = nn.Sequential(nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(32), nn.Conv2d(32, 4, (1,\n 1), (1, 1)), nn.LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side0_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side1_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side2_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.fc = nn.Sequential(nn.Conv2d(4, 1, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(1), nn.Sigmoid())\n <mask token>\n\n def _initialize_weights(self):\n pass\n",
"step-3": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.block0 = nn.Sequential(nn.ReLU(), nn.Conv2d(3, 64, (5, 5), (1,\n 1), (2, 2)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block1 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block2 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block3 = nn.Sequential(nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(32), nn.Conv2d(32, 4, (1,\n 1), (1, 1)), nn.LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side0_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side1_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side2_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.fc = nn.Sequential(nn.Conv2d(4, 1, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(1), nn.Sigmoid())\n\n def forward(self, x):\n x = x.float()\n out = self.block0(x)\n res0_1 = out\n res0_2 = out\n res0_3 = self.side0_3(out)\n out = self.block1(out)\n res1_2 = out\n res1_3 = self.side1_3(out)\n out = out + res0_1\n out = self.block2(out)\n res2_3 = self.side2_3(out)\n out = out + res0_2 + res1_2\n out = self.block3(out)\n out = out + res0_3 + res1_3 + res2_3\n out = self.fc(out)\n return out\n\n def _initialize_weights(self):\n pass\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.block0 = nn.Sequential(nn.ReLU(), nn.Conv2d(3, 64, (5, 5), (1,\n 1), (2, 2)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block1 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block2 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block3 = nn.Sequential(nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(32), nn.Conv2d(32, 4, (1,\n 1), (1, 1)), nn.LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side0_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side1_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side2_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.fc = nn.Sequential(nn.Conv2d(4, 1, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(1), nn.Sigmoid())\n\n def forward(self, x):\n x = x.float()\n out = self.block0(x)\n res0_1 = out\n res0_2 = out\n res0_3 = self.side0_3(out)\n out = self.block1(out)\n res1_2 = out\n res1_3 = self.side1_3(out)\n out = out + res0_1\n out = self.block2(out)\n res2_3 = self.side2_3(out)\n out = out + res0_2 + res1_2\n out = self.block3(out)\n out = out + res0_3 + res1_3 + res2_3\n out = self.fc(out)\n return out\n\n def _initialize_weights(self):\n pass\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\n\n# add DenseNet structure\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n# self.x = x\n self.block0 = nn.Sequential(\n # input image 96x96\n nn.ReLU(),\n nn.Conv2d(3, 64, (5, 5), (1, 1), (2, 2)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(64),\n\n )\n \n self.block1 = nn.Sequential(\n nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(64),\n )\n \n self.block2 = nn.Sequential(\n nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(64), \n )\n \n self.block3 = nn.Sequential(\n nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 4, (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(4),\n )\n \n self.side0_3 = nn.Sequential(\n nn.Conv2d(64, 4, (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(4),\n )\n \n self.side1_3 = nn.Sequential(\n nn.Conv2d(64, 4, (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(4),\n )\n \n self.side2_3 = nn.Sequential(\n nn.Conv2d(64, 4, (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(4),\n )\n \n self.fc = nn.Sequential(\n nn.Conv2d(4, 1, (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(1),\n nn.Sigmoid()\n )\n \n \n def forward(self, x):\n x=x.float()\n out = self.block0(x) # 64x96x96\n res0_1 = out\n res0_2 = out\n res0_3 = self.side0_3(out)\n \n out = self.block1(out) # 64x96x96\n res1_2 = out\n res1_3 = self.side1_3(out)\n \n out = out + res0_1\n out = self.block2(out) # 64x96x96\n res2_3 = self.side2_3(out)\n \n out = out + res0_2 + res1_2\n out = self.block3(out) # 4x96x96\n \n out = out + res0_3 + res1_3 + res2_3\n out = self.fc(out)\n\n return out\n \n \n def _initialize_weights(self):\n pass",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv) < 2:
raise NameError(
'Please add subject number (ex:1) as 1st argument in the command line!'
)
elif len(sys.argv) < 3:
raise NameError(
'Please select server being used (ex: aeneas or cartesius) as 2nd argument in the command line!'
)
else:
sj = str(sys.argv[1]).zfill(2)
<|reserved_special_token_0|>
with open(json_dir, 'r') as json_file:
analysis_params = json.load(json_file)
<|reserved_special_token_0|>
if str(sys.argv[2]) == 'cartesius':
filepath = glob.glob(os.path.join(analysis_params[
'post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj),
'*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],
'shift_crop')
elif str(sys.argv[2]) == 'aeneas':
print(os.path.join(analysis_params['post_fmriprep_outdir'], 'prf',
'sub-{sj}'.format(sj=sj), '*'))
filepath = glob.glob(os.path.join(analysis_params[
'post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir'], 'shift_crop')
if with_smooth == 'True':
file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params[
'smooth_fwhm']
median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),
'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],
'iterative_fit')
else:
file_extension = 'cropped_sg_psc.func.gii'
median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),
'run-median', 'iterative_fit')
<|reserved_special_token_0|>
filename.sort()
if not os.path.exists(median_path):
os.makedirs(median_path)
<|reserved_special_token_0|>
for field in ['hemi-L', 'hemi-R']:
hemi = [h for h in filename if field in h]
med_file = os.path.join(median_path, re.sub('run-\\d{2}_',
'run-median_', os.path.split(hemi[0])[-1]))
if not os.path.exists(med_file):
med_gii.append(median_gii(hemi, median_path))
print('computed %s' % med_gii)
else:
med_gii.append(med_file)
print('median file %s already exists, skipping' % med_gii)
<|reserved_special_token_0|>
png_filename.sort()
<|reserved_special_token_0|>
screenshot2DM(png_filename, 0.1, analysis_params['screenRes'], dm_filename,
dm_shape='square')
print('computed %s' % dm_filename)
<|reserved_special_token_0|>
for gii_file in med_gii:
print('loading data from %s' % gii_file)
data = np.array(surface.load_surf_data(gii_file))
print('data array with shape %s' % str(data.shape))
gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)
grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')
if not os.path.isfile(grid_estimates_filename):
print('%s not found, fitting grid' % grid_estimates_filename)
gf.grid_fit(ecc_grid=eccs, polar_grid=polars, size_grid=sizes)
np.savez(grid_estimates_filename, x=gf.gridsearch_params[..., 0], y
=gf.gridsearch_params[..., 1], size=gf.gridsearch_params[..., 2
], betas=gf.gridsearch_params[..., 3], baseline=gf.
gridsearch_params[..., 4], ns=gf.gridsearch_params[..., 5], r2=
gf.gridsearch_params[..., 6])
loaded_gf_pars = np.load(grid_estimates_filename)
gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x',
'y', 'size', 'betas', 'baseline', 'ns', 'r2']])
gf.gridsearch_params = np.transpose(gf.gridsearch_params)
iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')
if not os.path.isfile(iterative_out):
print('doing iterative fit')
gf.iterative_fit(rsq_threshold=0.1, verbose=False)
np.savez(iterative_out, it_output=gf.iterative_search_params)
else:
print('%s already exists' % iterative_out)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv) < 2:
raise NameError(
'Please add subject number (ex:1) as 1st argument in the command line!'
)
elif len(sys.argv) < 3:
raise NameError(
'Please select server being used (ex: aeneas or cartesius) as 2nd argument in the command line!'
)
else:
sj = str(sys.argv[1]).zfill(2)
json_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(sys.argv[2]
) == 'cartesius' else 'analysis_params.json'
with open(json_dir, 'r') as json_file:
analysis_params = json.load(json_file)
with_smooth = analysis_params['with_smooth']
if str(sys.argv[2]) == 'cartesius':
filepath = glob.glob(os.path.join(analysis_params[
'post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj),
'*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],
'shift_crop')
elif str(sys.argv[2]) == 'aeneas':
print(os.path.join(analysis_params['post_fmriprep_outdir'], 'prf',
'sub-{sj}'.format(sj=sj), '*'))
filepath = glob.glob(os.path.join(analysis_params[
'post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir'], 'shift_crop')
if with_smooth == 'True':
file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params[
'smooth_fwhm']
median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),
'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],
'iterative_fit')
else:
file_extension = 'cropped_sg_psc.func.gii'
median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),
'run-median', 'iterative_fit')
filename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and
run.endswith(file_extension)]
filename.sort()
if not os.path.exists(median_path):
os.makedirs(median_path)
med_gii = []
for field in ['hemi-L', 'hemi-R']:
hemi = [h for h in filename if field in h]
med_file = os.path.join(median_path, re.sub('run-\\d{2}_',
'run-median_', os.path.split(hemi[0])[-1]))
if not os.path.exists(med_file):
med_gii.append(median_gii(hemi, median_path))
print('computed %s' % med_gii)
else:
med_gii.append(med_file)
print('median file %s already exists, skipping' % med_gii)
png_path = '/home/inesv/SB-ref/scripts/imgs/' if str(sys.argv[2]
) == 'cartesius' else analysis_params['imgs_dir']
png_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]
png_filename.sort()
dm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')
screenshot2DM(png_filename, 0.1, analysis_params['screenRes'], dm_filename,
dm_shape='square')
print('computed %s' % dm_filename)
prf_dm = np.load(dm_filename)
prf_dm = prf_dm.T
prf_dm = shift_DM(prf_dm)
prf_dm = prf_dm[:, :, analysis_params['crop_pRF_TR']:]
fit_model = analysis_params['fit_model']
TR = analysis_params['TR']
hrf = utilities.spm_hrf(0, TR)
prf_stim = PRFStimulus2D(screen_size_cm=analysis_params['screen_width'],
screen_distance_cm=analysis_params['screen_distance'], design_matrix=
prf_dm, TR=TR)
gg = Iso2DGaussianGridder(stimulus=prf_stim, hrf=hrf, filter_predictions=
False, window_length=analysis_params['sg_filt_window_length'],
polyorder=analysis_params['sg_filt_polyorder'], highpass=False,
add_mean=False)
grid_nr = analysis_params['grid_steps']
sizes = analysis_params['max_size'] * np.linspace(np.sqrt(analysis_params[
'min_size'] / analysis_params['max_size']), 1, grid_nr) ** 2
eccs = analysis_params['max_eccen'] * np.linspace(np.sqrt(analysis_params[
'min_eccen'] / analysis_params['max_eccen']), 1, grid_nr) ** 2
polars = np.linspace(0, 2 * np.pi, grid_nr)
for gii_file in med_gii:
print('loading data from %s' % gii_file)
data = np.array(surface.load_surf_data(gii_file))
print('data array with shape %s' % str(data.shape))
gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)
grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')
if not os.path.isfile(grid_estimates_filename):
print('%s not found, fitting grid' % grid_estimates_filename)
gf.grid_fit(ecc_grid=eccs, polar_grid=polars, size_grid=sizes)
np.savez(grid_estimates_filename, x=gf.gridsearch_params[..., 0], y
=gf.gridsearch_params[..., 1], size=gf.gridsearch_params[..., 2
], betas=gf.gridsearch_params[..., 3], baseline=gf.
gridsearch_params[..., 4], ns=gf.gridsearch_params[..., 5], r2=
gf.gridsearch_params[..., 6])
loaded_gf_pars = np.load(grid_estimates_filename)
gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x',
'y', 'size', 'betas', 'baseline', 'ns', 'r2']])
gf.gridsearch_params = np.transpose(gf.gridsearch_params)
iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')
if not os.path.isfile(iterative_out):
print('doing iterative fit')
gf.iterative_fit(rsq_threshold=0.1, verbose=False)
np.savez(iterative_out, it_output=gf.iterative_search_params)
else:
print('%s already exists' % iterative_out)
<|reserved_special_token_1|>
import os
import json
import sys
import glob
import re
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import scipy as sp
import scipy.stats as stats
import nibabel as nb
from nilearn.image import mean_img
from nilearn import surface
from utils import *
from prfpy.rf import *
from prfpy.timecourse import *
from prfpy.stimulus import PRFStimulus2D
from prfpy.grid import Iso2DGaussianGridder
from prfpy.fit import Iso2DGaussianFitter
from popeye import utilities
if len(sys.argv) < 2:
raise NameError(
'Please add subject number (ex:1) as 1st argument in the command line!'
)
elif len(sys.argv) < 3:
raise NameError(
'Please select server being used (ex: aeneas or cartesius) as 2nd argument in the command line!'
)
else:
sj = str(sys.argv[1]).zfill(2)
json_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(sys.argv[2]
) == 'cartesius' else 'analysis_params.json'
with open(json_dir, 'r') as json_file:
analysis_params = json.load(json_file)
with_smooth = analysis_params['with_smooth']
if str(sys.argv[2]) == 'cartesius':
filepath = glob.glob(os.path.join(analysis_params[
'post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj),
'*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],
'shift_crop')
elif str(sys.argv[2]) == 'aeneas':
print(os.path.join(analysis_params['post_fmriprep_outdir'], 'prf',
'sub-{sj}'.format(sj=sj), '*'))
filepath = glob.glob(os.path.join(analysis_params[
'post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir'], 'shift_crop')
if with_smooth == 'True':
file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params[
'smooth_fwhm']
median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),
'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],
'iterative_fit')
else:
file_extension = 'cropped_sg_psc.func.gii'
median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),
'run-median', 'iterative_fit')
filename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and
run.endswith(file_extension)]
filename.sort()
if not os.path.exists(median_path):
os.makedirs(median_path)
med_gii = []
for field in ['hemi-L', 'hemi-R']:
hemi = [h for h in filename if field in h]
med_file = os.path.join(median_path, re.sub('run-\\d{2}_',
'run-median_', os.path.split(hemi[0])[-1]))
if not os.path.exists(med_file):
med_gii.append(median_gii(hemi, median_path))
print('computed %s' % med_gii)
else:
med_gii.append(med_file)
print('median file %s already exists, skipping' % med_gii)
png_path = '/home/inesv/SB-ref/scripts/imgs/' if str(sys.argv[2]
) == 'cartesius' else analysis_params['imgs_dir']
png_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]
png_filename.sort()
dm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')
screenshot2DM(png_filename, 0.1, analysis_params['screenRes'], dm_filename,
dm_shape='square')
print('computed %s' % dm_filename)
prf_dm = np.load(dm_filename)
prf_dm = prf_dm.T
prf_dm = shift_DM(prf_dm)
prf_dm = prf_dm[:, :, analysis_params['crop_pRF_TR']:]
fit_model = analysis_params['fit_model']
TR = analysis_params['TR']
hrf = utilities.spm_hrf(0, TR)
prf_stim = PRFStimulus2D(screen_size_cm=analysis_params['screen_width'],
screen_distance_cm=analysis_params['screen_distance'], design_matrix=
prf_dm, TR=TR)
gg = Iso2DGaussianGridder(stimulus=prf_stim, hrf=hrf, filter_predictions=
False, window_length=analysis_params['sg_filt_window_length'],
polyorder=analysis_params['sg_filt_polyorder'], highpass=False,
add_mean=False)
grid_nr = analysis_params['grid_steps']
sizes = analysis_params['max_size'] * np.linspace(np.sqrt(analysis_params[
'min_size'] / analysis_params['max_size']), 1, grid_nr) ** 2
eccs = analysis_params['max_eccen'] * np.linspace(np.sqrt(analysis_params[
'min_eccen'] / analysis_params['max_eccen']), 1, grid_nr) ** 2
polars = np.linspace(0, 2 * np.pi, grid_nr)
for gii_file in med_gii:
print('loading data from %s' % gii_file)
data = np.array(surface.load_surf_data(gii_file))
print('data array with shape %s' % str(data.shape))
gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)
grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')
if not os.path.isfile(grid_estimates_filename):
print('%s not found, fitting grid' % grid_estimates_filename)
gf.grid_fit(ecc_grid=eccs, polar_grid=polars, size_grid=sizes)
np.savez(grid_estimates_filename, x=gf.gridsearch_params[..., 0], y
=gf.gridsearch_params[..., 1], size=gf.gridsearch_params[..., 2
], betas=gf.gridsearch_params[..., 3], baseline=gf.
gridsearch_params[..., 4], ns=gf.gridsearch_params[..., 5], r2=
gf.gridsearch_params[..., 6])
loaded_gf_pars = np.load(grid_estimates_filename)
gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x',
'y', 'size', 'betas', 'baseline', 'ns', 'r2']])
gf.gridsearch_params = np.transpose(gf.gridsearch_params)
iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')
if not os.path.isfile(iterative_out):
print('doing iterative fit')
gf.iterative_fit(rsq_threshold=0.1, verbose=False)
np.savez(iterative_out, it_output=gf.iterative_search_params)
else:
print('%s already exists' % iterative_out)
<|reserved_special_token_1|>
#####
# Created on Oct 15 13:13:11 2019
#
# @author: inesverissimo
#
# Do pRF fit on median run, make iterative fit and save outputs
####
import os
# issue with tensorflow, try this suggestion
#NUM_PARALLEL_EXEC_UNITS = 16
#os.environ['OMP_NUM_THREADS'] = str(NUM_PARALLEL_EXEC_UNITS)
#os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
##
import json
import sys
import glob
import re
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import scipy as sp
import scipy.stats as stats
import nibabel as nb
from nilearn.image import mean_img
from nilearn import surface
from utils import * # import script to use relevante functions
# requires pfpy be installed - preferably with python setup.py develop
from prfpy.rf import *
from prfpy.timecourse import *
from prfpy.stimulus import PRFStimulus2D
from prfpy.grid import Iso2DGaussianGridder
from prfpy.fit import Iso2DGaussianFitter
from popeye import utilities
# define participant number and open json parameter file
if len(sys.argv) < 2:
raise NameError('Please add subject number (ex:1) '
'as 1st argument in the command line!')
elif len(sys.argv) < 3:
raise NameError('Please select server being used (ex: aeneas or cartesius) '
'as 2nd argument in the command line!')
else:
# fill subject number with 0 in case user forgets
sj = str(sys.argv[1]).zfill(2)
json_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(
sys.argv[2]) == 'cartesius' else 'analysis_params.json'
with open(json_dir, 'r') as json_file:
analysis_params = json.load(json_file)
# use smoothed data?
with_smooth = analysis_params['with_smooth']
# define paths and list of files
if str(sys.argv[2]) == 'cartesius':
filepath = glob.glob(os.path.join(
analysis_params['post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],'shift_crop')
elif str(sys.argv[2]) == 'aeneas':
print(os.path.join(
analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
filepath = glob.glob(os.path.join(
analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir'],'shift_crop')
# changes depending on data used
if with_smooth == 'True':
# last part of filename to use
file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params['smooth_fwhm']
# compute median run, per hemifield
median_path = os.path.join(
out_dir, 'sub-{sj}'.format(sj=sj), 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],'iterative_fit')
else:
# last part of filename to use
file_extension = 'cropped_sg_psc.func.gii'
# compute median run, per hemifield
median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj), 'run-median','iterative_fit')
# list of functional files
filename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and run.endswith(
file_extension)]
filename.sort()
if not os.path.exists(median_path): # check if path to save median run exist
os.makedirs(median_path)
med_gii = []
for field in ['hemi-L', 'hemi-R']:
hemi = [h for h in filename if field in h]
# set name for median run (now numpy array)
med_file = os.path.join(median_path, re.sub(
'run-\d{2}_', 'run-median_', os.path.split(hemi[0])[-1]))
# if file doesn't exist
if not os.path.exists(med_file):
med_gii.append(median_gii(hemi, median_path)) # create it
print('computed %s' % (med_gii))
else:
med_gii.append(med_file)
print('median file %s already exists, skipping' % (med_gii))
# create/load design matrix
png_path = '/home/inesv/SB-ref/scripts/imgs/' if str(
sys.argv[2]) == 'cartesius' else analysis_params['imgs_dir']
png_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]
png_filename.sort()
dm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')
#if not os.path.exists(dm_filename): # if not exists
screenshot2DM(png_filename, 0.1,
analysis_params['screenRes'], dm_filename,dm_shape = 'square') # create it
print('computed %s' % (dm_filename))
#else:
# print('loading %s' % dm_filename)
prf_dm = np.load(dm_filename)
prf_dm = prf_dm.T # then it'll be (x, y, t)
# change DM to see if fit is better like that
# do new one which is average of every 2 TRs
prf_dm = shift_DM(prf_dm)
prf_dm = prf_dm[:,:,analysis_params['crop_pRF_TR']:] # crop DM because functional data also cropped now
# define model params
fit_model = analysis_params["fit_model"]
TR = analysis_params["TR"]
hrf = utilities.spm_hrf(0,TR)
# make stimulus object, which takes an input design matrix and sets up its real-world dimensions
prf_stim = PRFStimulus2D(screen_size_cm=analysis_params["screen_width"],
screen_distance_cm=analysis_params["screen_distance"],
design_matrix=prf_dm,
TR=TR)
# sets up stimulus and hrf for this gridder
gg = Iso2DGaussianGridder(stimulus=prf_stim,
hrf=hrf,
filter_predictions=False,
window_length=analysis_params["sg_filt_window_length"],
polyorder=analysis_params["sg_filt_polyorder"],
highpass=False,
add_mean=False)
# set grid parameters
grid_nr = analysis_params["grid_steps"]
sizes = analysis_params["max_size"] * np.linspace(np.sqrt(analysis_params["min_size"]/analysis_params["max_size"]),1,grid_nr)**2
eccs = analysis_params["max_eccen"] * np.linspace(np.sqrt(analysis_params["min_eccen"]/analysis_params["max_eccen"]),1,grid_nr)**2
polars = np.linspace(0, 2*np.pi, grid_nr)
for gii_file in med_gii:
print('loading data from %s' % gii_file)
data = np.array(surface.load_surf_data(gii_file))
print('data array with shape %s'%str(data.shape))
gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)
#filename for the numpy array with the estimates of the grid fit
grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')
if not os.path.isfile(grid_estimates_filename): # if estimates file doesn't exist
print('%s not found, fitting grid'%grid_estimates_filename)
# do grid fit and save estimates
gf.grid_fit(ecc_grid=eccs,
polar_grid=polars,
size_grid=sizes)
np.savez(grid_estimates_filename,
x = gf.gridsearch_params[..., 0],
y = gf.gridsearch_params[..., 1],
size = gf.gridsearch_params[..., 2],
betas = gf.gridsearch_params[...,3],
baseline = gf.gridsearch_params[..., 4],
ns = gf.gridsearch_params[..., 5],
r2 = gf.gridsearch_params[..., 6])
loaded_gf_pars = np.load(grid_estimates_filename)
gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x', 'y', 'size', 'betas', 'baseline','ns','r2']])
gf.gridsearch_params = np.transpose(gf.gridsearch_params)
# do iterative fit
iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')
if not os.path.isfile(iterative_out): # if estimates file doesn't exist
print('doing iterative fit')
gf.iterative_fit(rsq_threshold=0.1, verbose=False)
np.savez(iterative_out,
it_output=gf.iterative_search_params)
else:
print('%s already exists'%iterative_out)
## do iterative fit again, now with css, n=1 (isn't that just gaussian?)
#print('doing iterative fit with css ')
#gf.fit_css = True
#gf.iterative_fit(rsq_threshold=0.1, verbose=False)
#iterative_css_out = gii_file.replace('.func.gii', '_iterative_css_output.npz')
#np.savez(iterative_css_out,
# it_output=gf.iterative_search_params)
|
flexible
|
{
"blob_id": "d9156e240d49e0a6570a5bc2315f95a7a670fd4f",
"index": 6327,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) < 2:\n raise NameError(\n 'Please add subject number (ex:1) as 1st argument in the command line!'\n )\nelif len(sys.argv) < 3:\n raise NameError(\n 'Please select server being used (ex: aeneas or cartesius) as 2nd argument in the command line!'\n )\nelse:\n sj = str(sys.argv[1]).zfill(2)\n<mask token>\nwith open(json_dir, 'r') as json_file:\n analysis_params = json.load(json_file)\n<mask token>\nif str(sys.argv[2]) == 'cartesius':\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj),\n '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],\n 'shift_crop')\nelif str(sys.argv[2]) == 'aeneas':\n print(os.path.join(analysis_params['post_fmriprep_outdir'], 'prf',\n 'sub-{sj}'.format(sj=sj), '*'))\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir'], 'shift_crop')\nif with_smooth == 'True':\n file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params[\n 'smooth_fwhm']\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],\n 'iterative_fit')\nelse:\n file_extension = 'cropped_sg_psc.func.gii'\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'iterative_fit')\n<mask token>\nfilename.sort()\nif not os.path.exists(median_path):\n os.makedirs(median_path)\n<mask token>\nfor field in ['hemi-L', 'hemi-R']:\n hemi = [h for h in filename if field in h]\n med_file = os.path.join(median_path, re.sub('run-\\\\d{2}_',\n 'run-median_', os.path.split(hemi[0])[-1]))\n if not os.path.exists(med_file):\n med_gii.append(median_gii(hemi, median_path))\n print('computed %s' % med_gii)\n else:\n med_gii.append(med_file)\n print('median file %s already exists, skipping' % med_gii)\n<mask token>\npng_filename.sort()\n<mask token>\nscreenshot2DM(png_filename, 0.1, analysis_params['screenRes'], dm_filename,\n dm_shape='square')\nprint('computed %s' % dm_filename)\n<mask token>\nfor gii_file in med_gii:\n print('loading data from %s' % gii_file)\n data = np.array(surface.load_surf_data(gii_file))\n print('data array with shape %s' % str(data.shape))\n gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)\n grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')\n if not os.path.isfile(grid_estimates_filename):\n print('%s not found, fitting grid' % grid_estimates_filename)\n gf.grid_fit(ecc_grid=eccs, polar_grid=polars, size_grid=sizes)\n np.savez(grid_estimates_filename, x=gf.gridsearch_params[..., 0], y\n =gf.gridsearch_params[..., 1], size=gf.gridsearch_params[..., 2\n ], betas=gf.gridsearch_params[..., 3], baseline=gf.\n gridsearch_params[..., 4], ns=gf.gridsearch_params[..., 5], r2=\n gf.gridsearch_params[..., 6])\n loaded_gf_pars = np.load(grid_estimates_filename)\n gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x',\n 'y', 'size', 'betas', 'baseline', 'ns', 'r2']])\n gf.gridsearch_params = np.transpose(gf.gridsearch_params)\n iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')\n if not os.path.isfile(iterative_out):\n print('doing iterative fit')\n gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n np.savez(iterative_out, it_output=gf.iterative_search_params)\n else:\n print('%s already exists' % iterative_out)\n",
"step-3": "<mask token>\nif len(sys.argv) < 2:\n raise NameError(\n 'Please add subject number (ex:1) as 1st argument in the command line!'\n )\nelif len(sys.argv) < 3:\n raise NameError(\n 'Please select server being used (ex: aeneas or cartesius) as 2nd argument in the command line!'\n )\nelse:\n sj = str(sys.argv[1]).zfill(2)\njson_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(sys.argv[2]\n ) == 'cartesius' else 'analysis_params.json'\nwith open(json_dir, 'r') as json_file:\n analysis_params = json.load(json_file)\nwith_smooth = analysis_params['with_smooth']\nif str(sys.argv[2]) == 'cartesius':\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj),\n '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],\n 'shift_crop')\nelif str(sys.argv[2]) == 'aeneas':\n print(os.path.join(analysis_params['post_fmriprep_outdir'], 'prf',\n 'sub-{sj}'.format(sj=sj), '*'))\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir'], 'shift_crop')\nif with_smooth == 'True':\n file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params[\n 'smooth_fwhm']\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],\n 'iterative_fit')\nelse:\n file_extension = 'cropped_sg_psc.func.gii'\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'iterative_fit')\nfilename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and\n run.endswith(file_extension)]\nfilename.sort()\nif not os.path.exists(median_path):\n os.makedirs(median_path)\nmed_gii = []\nfor field in ['hemi-L', 'hemi-R']:\n hemi = [h for h in filename if field in h]\n med_file = os.path.join(median_path, re.sub('run-\\\\d{2}_',\n 'run-median_', os.path.split(hemi[0])[-1]))\n if not os.path.exists(med_file):\n med_gii.append(median_gii(hemi, median_path))\n print('computed %s' % med_gii)\n else:\n med_gii.append(med_file)\n print('median file %s already exists, skipping' % med_gii)\npng_path = '/home/inesv/SB-ref/scripts/imgs/' if str(sys.argv[2]\n ) == 'cartesius' else analysis_params['imgs_dir']\npng_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]\npng_filename.sort()\ndm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')\nscreenshot2DM(png_filename, 0.1, analysis_params['screenRes'], dm_filename,\n dm_shape='square')\nprint('computed %s' % dm_filename)\nprf_dm = np.load(dm_filename)\nprf_dm = prf_dm.T\nprf_dm = shift_DM(prf_dm)\nprf_dm = prf_dm[:, :, analysis_params['crop_pRF_TR']:]\nfit_model = analysis_params['fit_model']\nTR = analysis_params['TR']\nhrf = utilities.spm_hrf(0, TR)\nprf_stim = PRFStimulus2D(screen_size_cm=analysis_params['screen_width'],\n screen_distance_cm=analysis_params['screen_distance'], design_matrix=\n prf_dm, TR=TR)\ngg = Iso2DGaussianGridder(stimulus=prf_stim, hrf=hrf, filter_predictions=\n False, window_length=analysis_params['sg_filt_window_length'],\n polyorder=analysis_params['sg_filt_polyorder'], highpass=False,\n add_mean=False)\ngrid_nr = analysis_params['grid_steps']\nsizes = analysis_params['max_size'] * np.linspace(np.sqrt(analysis_params[\n 'min_size'] / analysis_params['max_size']), 1, grid_nr) ** 2\neccs = analysis_params['max_eccen'] * np.linspace(np.sqrt(analysis_params[\n 'min_eccen'] / analysis_params['max_eccen']), 1, grid_nr) ** 2\npolars = np.linspace(0, 2 * np.pi, grid_nr)\nfor gii_file in med_gii:\n print('loading data from %s' % gii_file)\n data = np.array(surface.load_surf_data(gii_file))\n print('data array with shape %s' % str(data.shape))\n gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)\n grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')\n if not os.path.isfile(grid_estimates_filename):\n print('%s not found, fitting grid' % grid_estimates_filename)\n gf.grid_fit(ecc_grid=eccs, polar_grid=polars, size_grid=sizes)\n np.savez(grid_estimates_filename, x=gf.gridsearch_params[..., 0], y\n =gf.gridsearch_params[..., 1], size=gf.gridsearch_params[..., 2\n ], betas=gf.gridsearch_params[..., 3], baseline=gf.\n gridsearch_params[..., 4], ns=gf.gridsearch_params[..., 5], r2=\n gf.gridsearch_params[..., 6])\n loaded_gf_pars = np.load(grid_estimates_filename)\n gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x',\n 'y', 'size', 'betas', 'baseline', 'ns', 'r2']])\n gf.gridsearch_params = np.transpose(gf.gridsearch_params)\n iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')\n if not os.path.isfile(iterative_out):\n print('doing iterative fit')\n gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n np.savez(iterative_out, it_output=gf.iterative_search_params)\n else:\n print('%s already exists' % iterative_out)\n",
"step-4": "import os\nimport json\nimport sys\nimport glob\nimport re\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport numpy as np\nimport scipy as sp\nimport scipy.stats as stats\nimport nibabel as nb\nfrom nilearn.image import mean_img\nfrom nilearn import surface\nfrom utils import *\nfrom prfpy.rf import *\nfrom prfpy.timecourse import *\nfrom prfpy.stimulus import PRFStimulus2D\nfrom prfpy.grid import Iso2DGaussianGridder\nfrom prfpy.fit import Iso2DGaussianFitter\nfrom popeye import utilities\nif len(sys.argv) < 2:\n raise NameError(\n 'Please add subject number (ex:1) as 1st argument in the command line!'\n )\nelif len(sys.argv) < 3:\n raise NameError(\n 'Please select server being used (ex: aeneas or cartesius) as 2nd argument in the command line!'\n )\nelse:\n sj = str(sys.argv[1]).zfill(2)\njson_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(sys.argv[2]\n ) == 'cartesius' else 'analysis_params.json'\nwith open(json_dir, 'r') as json_file:\n analysis_params = json.load(json_file)\nwith_smooth = analysis_params['with_smooth']\nif str(sys.argv[2]) == 'cartesius':\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj),\n '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],\n 'shift_crop')\nelif str(sys.argv[2]) == 'aeneas':\n print(os.path.join(analysis_params['post_fmriprep_outdir'], 'prf',\n 'sub-{sj}'.format(sj=sj), '*'))\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir'], 'shift_crop')\nif with_smooth == 'True':\n file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params[\n 'smooth_fwhm']\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],\n 'iterative_fit')\nelse:\n file_extension = 'cropped_sg_psc.func.gii'\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'iterative_fit')\nfilename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and\n run.endswith(file_extension)]\nfilename.sort()\nif not os.path.exists(median_path):\n os.makedirs(median_path)\nmed_gii = []\nfor field in ['hemi-L', 'hemi-R']:\n hemi = [h for h in filename if field in h]\n med_file = os.path.join(median_path, re.sub('run-\\\\d{2}_',\n 'run-median_', os.path.split(hemi[0])[-1]))\n if not os.path.exists(med_file):\n med_gii.append(median_gii(hemi, median_path))\n print('computed %s' % med_gii)\n else:\n med_gii.append(med_file)\n print('median file %s already exists, skipping' % med_gii)\npng_path = '/home/inesv/SB-ref/scripts/imgs/' if str(sys.argv[2]\n ) == 'cartesius' else analysis_params['imgs_dir']\npng_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]\npng_filename.sort()\ndm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')\nscreenshot2DM(png_filename, 0.1, analysis_params['screenRes'], dm_filename,\n dm_shape='square')\nprint('computed %s' % dm_filename)\nprf_dm = np.load(dm_filename)\nprf_dm = prf_dm.T\nprf_dm = shift_DM(prf_dm)\nprf_dm = prf_dm[:, :, analysis_params['crop_pRF_TR']:]\nfit_model = analysis_params['fit_model']\nTR = analysis_params['TR']\nhrf = utilities.spm_hrf(0, TR)\nprf_stim = PRFStimulus2D(screen_size_cm=analysis_params['screen_width'],\n screen_distance_cm=analysis_params['screen_distance'], design_matrix=\n prf_dm, TR=TR)\ngg = Iso2DGaussianGridder(stimulus=prf_stim, hrf=hrf, filter_predictions=\n False, window_length=analysis_params['sg_filt_window_length'],\n polyorder=analysis_params['sg_filt_polyorder'], highpass=False,\n add_mean=False)\ngrid_nr = analysis_params['grid_steps']\nsizes = analysis_params['max_size'] * np.linspace(np.sqrt(analysis_params[\n 'min_size'] / analysis_params['max_size']), 1, grid_nr) ** 2\neccs = analysis_params['max_eccen'] * np.linspace(np.sqrt(analysis_params[\n 'min_eccen'] / analysis_params['max_eccen']), 1, grid_nr) ** 2\npolars = np.linspace(0, 2 * np.pi, grid_nr)\nfor gii_file in med_gii:\n print('loading data from %s' % gii_file)\n data = np.array(surface.load_surf_data(gii_file))\n print('data array with shape %s' % str(data.shape))\n gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)\n grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')\n if not os.path.isfile(grid_estimates_filename):\n print('%s not found, fitting grid' % grid_estimates_filename)\n gf.grid_fit(ecc_grid=eccs, polar_grid=polars, size_grid=sizes)\n np.savez(grid_estimates_filename, x=gf.gridsearch_params[..., 0], y\n =gf.gridsearch_params[..., 1], size=gf.gridsearch_params[..., 2\n ], betas=gf.gridsearch_params[..., 3], baseline=gf.\n gridsearch_params[..., 4], ns=gf.gridsearch_params[..., 5], r2=\n gf.gridsearch_params[..., 6])\n loaded_gf_pars = np.load(grid_estimates_filename)\n gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x',\n 'y', 'size', 'betas', 'baseline', 'ns', 'r2']])\n gf.gridsearch_params = np.transpose(gf.gridsearch_params)\n iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')\n if not os.path.isfile(iterative_out):\n print('doing iterative fit')\n gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n np.savez(iterative_out, it_output=gf.iterative_search_params)\n else:\n print('%s already exists' % iterative_out)\n",
"step-5": "\n#####\n# Created on Oct 15 13:13:11 2019\n#\n# @author: inesverissimo\n#\n# Do pRF fit on median run, make iterative fit and save outputs\n####\n\nimport os\n\n# issue with tensorflow, try this suggestion\n#NUM_PARALLEL_EXEC_UNITS = 16\n#os.environ['OMP_NUM_THREADS'] = str(NUM_PARALLEL_EXEC_UNITS)\n#os.environ[\"KMP_AFFINITY\"] = \"granularity=fine,verbose,compact,1,0\"\n##\n\nimport json\nimport sys\nimport glob\nimport re\n\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\n\nimport numpy as np\nimport scipy as sp\nimport scipy.stats as stats\nimport nibabel as nb\nfrom nilearn.image import mean_img\n\nfrom nilearn import surface\n\nfrom utils import * # import script to use relevante functions\n\n# requires pfpy be installed - preferably with python setup.py develop\nfrom prfpy.rf import *\nfrom prfpy.timecourse import *\nfrom prfpy.stimulus import PRFStimulus2D\nfrom prfpy.grid import Iso2DGaussianGridder\nfrom prfpy.fit import Iso2DGaussianFitter\n\nfrom popeye import utilities \n\n# define participant number and open json parameter file\nif len(sys.argv) < 2:\n raise NameError('Please add subject number (ex:1) '\n 'as 1st argument in the command line!')\n\nelif len(sys.argv) < 3:\n raise NameError('Please select server being used (ex: aeneas or cartesius) '\n 'as 2nd argument in the command line!')\n\nelse:\n # fill subject number with 0 in case user forgets\n sj = str(sys.argv[1]).zfill(2)\n\n\njson_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(\n sys.argv[2]) == 'cartesius' else 'analysis_params.json'\n\nwith open(json_dir, 'r') as json_file:\n analysis_params = json.load(json_file)\n\n# use smoothed data?\nwith_smooth = analysis_params['with_smooth']\n\n\n# define paths and list of files\nif str(sys.argv[2]) == 'cartesius':\n filepath = glob.glob(os.path.join(\n analysis_params['post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],'shift_crop')\n\nelif str(sys.argv[2]) == 'aeneas':\n print(os.path.join(\n analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n filepath = glob.glob(os.path.join(\n analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir'],'shift_crop')\n\n# changes depending on data used\nif with_smooth == 'True':\n # last part of filename to use\n file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params['smooth_fwhm']\n # compute median run, per hemifield\n median_path = os.path.join(\n out_dir, 'sub-{sj}'.format(sj=sj), 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],'iterative_fit')\nelse:\n # last part of filename to use\n file_extension = 'cropped_sg_psc.func.gii'\n # compute median run, per hemifield\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj), 'run-median','iterative_fit')\n\n# list of functional files\nfilename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and run.endswith(\n file_extension)]\nfilename.sort()\nif not os.path.exists(median_path): # check if path to save median run exist\n os.makedirs(median_path)\n\n\nmed_gii = []\nfor field in ['hemi-L', 'hemi-R']:\n hemi = [h for h in filename if field in h]\n\n # set name for median run (now numpy array)\n med_file = os.path.join(median_path, re.sub(\n 'run-\\d{2}_', 'run-median_', os.path.split(hemi[0])[-1]))\n # if file doesn't exist\n if not os.path.exists(med_file):\n med_gii.append(median_gii(hemi, median_path)) # create it\n print('computed %s' % (med_gii))\n else:\n med_gii.append(med_file)\n print('median file %s already exists, skipping' % (med_gii))\n\n\n# create/load design matrix\npng_path = '/home/inesv/SB-ref/scripts/imgs/' if str(\n sys.argv[2]) == 'cartesius' else analysis_params['imgs_dir']\npng_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]\npng_filename.sort()\n\ndm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')\n\n#if not os.path.exists(dm_filename): # if not exists\nscreenshot2DM(png_filename, 0.1,\n analysis_params['screenRes'], dm_filename,dm_shape = 'square') # create it\nprint('computed %s' % (dm_filename))\n\n#else:\n# print('loading %s' % dm_filename)\n\nprf_dm = np.load(dm_filename)\nprf_dm = prf_dm.T # then it'll be (x, y, t)\n\n# change DM to see if fit is better like that\n# do new one which is average of every 2 TRs\n\nprf_dm = shift_DM(prf_dm)\n\nprf_dm = prf_dm[:,:,analysis_params['crop_pRF_TR']:] # crop DM because functional data also cropped now\n\n# define model params\nfit_model = analysis_params[\"fit_model\"]\n\nTR = analysis_params[\"TR\"]\n\nhrf = utilities.spm_hrf(0,TR)\n\n# make stimulus object, which takes an input design matrix and sets up its real-world dimensions\nprf_stim = PRFStimulus2D(screen_size_cm=analysis_params[\"screen_width\"], \n screen_distance_cm=analysis_params[\"screen_distance\"], \n design_matrix=prf_dm, \n TR=TR)\n\n# sets up stimulus and hrf for this gridder\ngg = Iso2DGaussianGridder(stimulus=prf_stim,\n hrf=hrf,\n filter_predictions=False,\n window_length=analysis_params[\"sg_filt_window_length\"],\n polyorder=analysis_params[\"sg_filt_polyorder\"],\n highpass=False,\n add_mean=False)\n\n# set grid parameters\ngrid_nr = analysis_params[\"grid_steps\"]\nsizes = analysis_params[\"max_size\"] * np.linspace(np.sqrt(analysis_params[\"min_size\"]/analysis_params[\"max_size\"]),1,grid_nr)**2\neccs = analysis_params[\"max_eccen\"] * np.linspace(np.sqrt(analysis_params[\"min_eccen\"]/analysis_params[\"max_eccen\"]),1,grid_nr)**2\npolars = np.linspace(0, 2*np.pi, grid_nr)\n\nfor gii_file in med_gii:\n print('loading data from %s' % gii_file)\n data = np.array(surface.load_surf_data(gii_file))\n print('data array with shape %s'%str(data.shape))\n\n gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)\n\n #filename for the numpy array with the estimates of the grid fit\n grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')\n\n if not os.path.isfile(grid_estimates_filename): # if estimates file doesn't exist\n print('%s not found, fitting grid'%grid_estimates_filename)\n # do grid fit and save estimates\n gf.grid_fit(ecc_grid=eccs,\n polar_grid=polars,\n size_grid=sizes)\n\n np.savez(grid_estimates_filename,\n x = gf.gridsearch_params[..., 0],\n y = gf.gridsearch_params[..., 1],\n size = gf.gridsearch_params[..., 2],\n betas = gf.gridsearch_params[...,3],\n baseline = gf.gridsearch_params[..., 4],\n ns = gf.gridsearch_params[..., 5],\n r2 = gf.gridsearch_params[..., 6])\n\n\n loaded_gf_pars = np.load(grid_estimates_filename)\n\n gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x', 'y', 'size', 'betas', 'baseline','ns','r2']]) \n gf.gridsearch_params = np.transpose(gf.gridsearch_params)\n\n # do iterative fit\n iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')\n\n if not os.path.isfile(iterative_out): # if estimates file doesn't exist\n print('doing iterative fit')\n gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n\n \n np.savez(iterative_out,\n it_output=gf.iterative_search_params)\n else:\n print('%s already exists'%iterative_out)\n\n ## do iterative fit again, now with css, n=1 (isn't that just gaussian?)\n #print('doing iterative fit with css ')\n #gf.fit_css = True\n #gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n\n #iterative_css_out = gii_file.replace('.func.gii', '_iterative_css_output.npz')\n #np.savez(iterative_css_out,\n # it_output=gf.iterative_search_params)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
selection review
very similar to quicksort in terms of set up.
no need to sort to find kth element in a list
but instead can be done in o(n)
quick sort can be o(nlogn) if we choose median
instead of pivot
tips:
raise value error for bad index not in between 0 <= k < n
basecase of n <=1 --> return arr[0]
use L, E, G
if k < len(L):
select(L, k)
if k < select(len(L) + len(E)):
return pivot
else:
select(G, k - len(l)-len(E))
O(n) runtime
n + n / 2 + n / 4 + n / 8 + n / 16 + ... = n (1 + 1/2 + 1/4 + 1/8 + ...)
= 2n on average
worst case is 0(n^2) like quick sort if you pick the worst each
time
'''
import random
def select(arr, k):
n = len(arr)
if not 0 <= k < n:
raise ValueError('not valid index in array')
if n <= 1:
return arr[0]
pivot = random.choice(arr)
L, E, G = [],[],[]
for data in arr:
if data < pivot:
L.append(data)
elif data == pivot:
E.append(pivot)
else:
G.append(data)
if k < len(L):
return select(L, k)
elif k < (len(L) + len(E)):
return pivot
else:
return select(G, k - (len(L) + len(E)))
x = [1,2,3,4,5,6,7,8,9,10]
print(select(x,3))
|
normal
|
{
"blob_id": "69d3a39dc024929eaf6fb77e38a7a818d2886cf7",
"index": 8512,
"step-1": "<mask token>\n\n\ndef select(arr, k):\n n = len(arr)\n if not 0 <= k < n:\n raise ValueError('not valid index in array')\n if n <= 1:\n return arr[0]\n pivot = random.choice(arr)\n L, E, G = [], [], []\n for data in arr:\n if data < pivot:\n L.append(data)\n elif data == pivot:\n E.append(pivot)\n else:\n G.append(data)\n if k < len(L):\n return select(L, k)\n elif k < len(L) + len(E):\n return pivot\n else:\n return select(G, k - (len(L) + len(E)))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef select(arr, k):\n n = len(arr)\n if not 0 <= k < n:\n raise ValueError('not valid index in array')\n if n <= 1:\n return arr[0]\n pivot = random.choice(arr)\n L, E, G = [], [], []\n for data in arr:\n if data < pivot:\n L.append(data)\n elif data == pivot:\n E.append(pivot)\n else:\n G.append(data)\n if k < len(L):\n return select(L, k)\n elif k < len(L) + len(E):\n return pivot\n else:\n return select(G, k - (len(L) + len(E)))\n\n\n<mask token>\nprint(select(x, 3))\n",
"step-3": "<mask token>\n\n\ndef select(arr, k):\n n = len(arr)\n if not 0 <= k < n:\n raise ValueError('not valid index in array')\n if n <= 1:\n return arr[0]\n pivot = random.choice(arr)\n L, E, G = [], [], []\n for data in arr:\n if data < pivot:\n L.append(data)\n elif data == pivot:\n E.append(pivot)\n else:\n G.append(data)\n if k < len(L):\n return select(L, k)\n elif k < len(L) + len(E):\n return pivot\n else:\n return select(G, k - (len(L) + len(E)))\n\n\nx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nprint(select(x, 3))\n",
"step-4": "<mask token>\nimport random\n\n\ndef select(arr, k):\n n = len(arr)\n if not 0 <= k < n:\n raise ValueError('not valid index in array')\n if n <= 1:\n return arr[0]\n pivot = random.choice(arr)\n L, E, G = [], [], []\n for data in arr:\n if data < pivot:\n L.append(data)\n elif data == pivot:\n E.append(pivot)\n else:\n G.append(data)\n if k < len(L):\n return select(L, k)\n elif k < len(L) + len(E):\n return pivot\n else:\n return select(G, k - (len(L) + len(E)))\n\n\nx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nprint(select(x, 3))\n",
"step-5": "'''\nselection review\nvery similar to quicksort in terms of set up.\nno need to sort to find kth element in a list\nbut instead can be done in o(n)\nquick sort can be o(nlogn) if we choose median\ninstead of pivot\n\ntips:\nraise value error for bad index not in between 0 <= k < n\nbasecase of n <=1 --> return arr[0]\nuse L, E, G\nif k < len(L):\n\tselect(L, k)\nif k < select(len(L) + len(E)):\n\treturn pivot\nelse:\n\tselect(G, k - len(l)-len(E))\n\nO(n) runtime\n\nn + n / 2 + n / 4 + n / 8 + n / 16 + ... = n (1 + 1/2 + 1/4 + 1/8 + ...)\n= 2n on average\nworst case is 0(n^2) like quick sort if you pick the worst each\ntime\n'''\nimport random\n\ndef select(arr, k):\n\tn = len(arr)\n\tif not 0 <= k < n:\n\t\traise ValueError('not valid index in array')\n\tif n <= 1:\n\t\treturn arr[0]\n\tpivot = random.choice(arr)\n\tL, E, G = [],[],[]\n\tfor data in arr:\n\t\tif data < pivot:\n\t\t\tL.append(data)\n\t\telif data == pivot:\n\t\t\tE.append(pivot)\n\t\telse:\n\t\t\tG.append(data)\n\tif k < len(L):\n\t\treturn select(L, k)\n\telif k < (len(L) + len(E)):\n\t\treturn pivot\n\telse:\n\t\treturn select(G, k - (len(L) + len(E)))\n\nx = [1,2,3,4,5,6,7,8,9,10]\nprint(select(x,3))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""Command generator for running a script against a BigQuery cluster.
Contains the method to compile the BigQuery specific script execution command
based on generic arguments (sql script, output destination) and BigQuery
specific arguments (flag values).
"""
__author__ = 'p3rf@google.com'
from absl import flags
flags.DEFINE_string('bq_project_id', None, 'Project Id which contains the query'
' dataset and table.')
flags.DEFINE_string('bq_dataset_id', None, 'Dataset Id which contains the query'
' table.')
flags.mark_flags_as_required(['bq_project_id', 'bq_dataset_id'])
FLAGS = flags.FLAGS
def generate_provider_specific_cmd_list(script, driver, output, error):
"""Method to compile the BigQuery specific script execution command.
Arguments:
script: SQL script which contains the query.
driver: Driver that contains the BigQuery specific script executor.
output: Output log file.
error: Error log file.
Returns:
Command list to execute the supplied script.
"""
cmd_list = [driver, FLAGS.bq_project_id, FLAGS.bq_dataset_id,
script, output, error]
return cmd_list
|
normal
|
{
"blob_id": "5e14eeaa3c79bfdd564f3bfd1575c9bbf1a3773d",
"index": 7881,
"step-1": "<mask token>\n\n\ndef generate_provider_specific_cmd_list(script, driver, output, error):\n \"\"\"Method to compile the BigQuery specific script execution command.\n\n Arguments:\n script: SQL script which contains the query.\n driver: Driver that contains the BigQuery specific script executor.\n output: Output log file.\n error: Error log file.\n\n Returns:\n Command list to execute the supplied script.\n \"\"\"\n cmd_list = [driver, FLAGS.bq_project_id, FLAGS.bq_dataset_id, script,\n output, error]\n return cmd_list\n",
"step-2": "<mask token>\nflags.DEFINE_string('bq_project_id', None,\n 'Project Id which contains the query dataset and table.')\nflags.DEFINE_string('bq_dataset_id', None,\n 'Dataset Id which contains the query table.')\nflags.mark_flags_as_required(['bq_project_id', 'bq_dataset_id'])\n<mask token>\n\n\ndef generate_provider_specific_cmd_list(script, driver, output, error):\n \"\"\"Method to compile the BigQuery specific script execution command.\n\n Arguments:\n script: SQL script which contains the query.\n driver: Driver that contains the BigQuery specific script executor.\n output: Output log file.\n error: Error log file.\n\n Returns:\n Command list to execute the supplied script.\n \"\"\"\n cmd_list = [driver, FLAGS.bq_project_id, FLAGS.bq_dataset_id, script,\n output, error]\n return cmd_list\n",
"step-3": "<mask token>\n__author__ = 'p3rf@google.com'\n<mask token>\nflags.DEFINE_string('bq_project_id', None,\n 'Project Id which contains the query dataset and table.')\nflags.DEFINE_string('bq_dataset_id', None,\n 'Dataset Id which contains the query table.')\nflags.mark_flags_as_required(['bq_project_id', 'bq_dataset_id'])\nFLAGS = flags.FLAGS\n\n\ndef generate_provider_specific_cmd_list(script, driver, output, error):\n \"\"\"Method to compile the BigQuery specific script execution command.\n\n Arguments:\n script: SQL script which contains the query.\n driver: Driver that contains the BigQuery specific script executor.\n output: Output log file.\n error: Error log file.\n\n Returns:\n Command list to execute the supplied script.\n \"\"\"\n cmd_list = [driver, FLAGS.bq_project_id, FLAGS.bq_dataset_id, script,\n output, error]\n return cmd_list\n",
"step-4": "<mask token>\n__author__ = 'p3rf@google.com'\nfrom absl import flags\nflags.DEFINE_string('bq_project_id', None,\n 'Project Id which contains the query dataset and table.')\nflags.DEFINE_string('bq_dataset_id', None,\n 'Dataset Id which contains the query table.')\nflags.mark_flags_as_required(['bq_project_id', 'bq_dataset_id'])\nFLAGS = flags.FLAGS\n\n\ndef generate_provider_specific_cmd_list(script, driver, output, error):\n \"\"\"Method to compile the BigQuery specific script execution command.\n\n Arguments:\n script: SQL script which contains the query.\n driver: Driver that contains the BigQuery specific script executor.\n output: Output log file.\n error: Error log file.\n\n Returns:\n Command list to execute the supplied script.\n \"\"\"\n cmd_list = [driver, FLAGS.bq_project_id, FLAGS.bq_dataset_id, script,\n output, error]\n return cmd_list\n",
"step-5": "\"\"\"Command generator for running a script against a BigQuery cluster.\n\nContains the method to compile the BigQuery specific script execution command\nbased on generic arguments (sql script, output destination) and BigQuery\nspecific arguments (flag values).\n\"\"\"\n\n__author__ = 'p3rf@google.com'\n\nfrom absl import flags\n\nflags.DEFINE_string('bq_project_id', None, 'Project Id which contains the query'\n ' dataset and table.')\nflags.DEFINE_string('bq_dataset_id', None, 'Dataset Id which contains the query'\n ' table.')\nflags.mark_flags_as_required(['bq_project_id', 'bq_dataset_id'])\n\nFLAGS = flags.FLAGS\n\n\ndef generate_provider_specific_cmd_list(script, driver, output, error):\n \"\"\"Method to compile the BigQuery specific script execution command.\n\n Arguments:\n script: SQL script which contains the query.\n driver: Driver that contains the BigQuery specific script executor.\n output: Output log file.\n error: Error log file.\n\n Returns:\n Command list to execute the supplied script.\n \"\"\"\n cmd_list = [driver, FLAGS.bq_project_id, FLAGS.bq_dataset_id,\n script, output, error]\n return cmd_list\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import io
import os
import sys
import whwn
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
here = os.path.abspath(os.path.dirname(__file__))
with open('README.md') as readme:
long_description = readme.read()
with open('requirements.txt') as reqs:
install_requires = [
line for line in reqs.read().split('\n') if (line and not
line.startswith('--'))
]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='We Have We Need',
version=whwn.__version__,
url='http://github.com/wehaveweneed/wehaveweneed',
tests_require=['pytest'],
cmdclass={'test': PyTest},
description='Inventory Management System',
long_description=long_description,
install_requires=install_requires,
packages=['whwn'],
include_package_data=True,
test_suite='whwn.test.test_whwn',
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
],
extras_require={
'testing': ['pytest'],
}
)
|
normal
|
{
"blob_id": "bd2a5c2dd3eef5979c87a488fb584dce740ccb05",
"index": 3870,
"step-1": "<mask token>\n\n\nclass PyTest(TestCommand):\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errcode = pytest.main(self.test_args)\n sys.exit(errcode)\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('README.md') as readme:\n long_description = readme.read()\nwith open('requirements.txt') as reqs:\n install_requires = [line for line in reqs.read().split('\\n') if line and\n not line.startswith('--')]\n\n\nclass PyTest(TestCommand):\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errcode = pytest.main(self.test_args)\n sys.exit(errcode)\n\n\nsetup(name='We Have We Need', version=whwn.__version__, url=\n 'http://github.com/wehaveweneed/wehaveweneed', tests_require=['pytest'],\n cmdclass={'test': PyTest}, description='Inventory Management System',\n long_description=long_description, install_requires=install_requires,\n packages=['whwn'], include_package_data=True, test_suite=\n 'whwn.test.test_whwn', classifiers=['Environment :: Web Environment',\n 'Framework :: Django'], extras_require={'testing': ['pytest']})\n",
"step-3": "<mask token>\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open('README.md') as readme:\n long_description = readme.read()\nwith open('requirements.txt') as reqs:\n install_requires = [line for line in reqs.read().split('\\n') if line and\n not line.startswith('--')]\n\n\nclass PyTest(TestCommand):\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errcode = pytest.main(self.test_args)\n sys.exit(errcode)\n\n\nsetup(name='We Have We Need', version=whwn.__version__, url=\n 'http://github.com/wehaveweneed/wehaveweneed', tests_require=['pytest'],\n cmdclass={'test': PyTest}, description='Inventory Management System',\n long_description=long_description, install_requires=install_requires,\n packages=['whwn'], include_package_data=True, test_suite=\n 'whwn.test.test_whwn', classifiers=['Environment :: Web Environment',\n 'Framework :: Django'], extras_require={'testing': ['pytest']})\n",
"step-4": "import io\nimport os\nimport sys\nimport whwn\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open('README.md') as readme:\n long_description = readme.read()\nwith open('requirements.txt') as reqs:\n install_requires = [line for line in reqs.read().split('\\n') if line and\n not line.startswith('--')]\n\n\nclass PyTest(TestCommand):\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errcode = pytest.main(self.test_args)\n sys.exit(errcode)\n\n\nsetup(name='We Have We Need', version=whwn.__version__, url=\n 'http://github.com/wehaveweneed/wehaveweneed', tests_require=['pytest'],\n cmdclass={'test': PyTest}, description='Inventory Management System',\n long_description=long_description, install_requires=install_requires,\n packages=['whwn'], include_package_data=True, test_suite=\n 'whwn.test.test_whwn', classifiers=['Environment :: Web Environment',\n 'Framework :: Django'], extras_require={'testing': ['pytest']})\n",
"step-5": "import io\nimport os\nimport sys\nimport whwn\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open('README.md') as readme:\n long_description = readme.read()\n\nwith open('requirements.txt') as reqs:\n install_requires = [\n line for line in reqs.read().split('\\n') if (line and not \n line.startswith('--'))\n ]\n\nclass PyTest(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errcode = pytest.main(self.test_args)\n sys.exit(errcode)\n\nsetup(\n name='We Have We Need',\n version=whwn.__version__,\n url='http://github.com/wehaveweneed/wehaveweneed',\n tests_require=['pytest'],\n cmdclass={'test': PyTest},\n description='Inventory Management System',\n long_description=long_description,\n install_requires=install_requires,\n packages=['whwn'],\n include_package_data=True,\n test_suite='whwn.test.test_whwn',\n classifiers = [\n 'Environment :: Web Environment',\n 'Framework :: Django',\n ],\n extras_require={\n 'testing': ['pytest'],\n }\n)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solution(clothes):
answer = 1
hash_map = defaultdict(lambda : 0)
for value, key in clothes:
hash_map[key] += 1
for v in hash_map.values():
answer *= v + 1
return answer - 1
<|reserved_special_token_1|>
from collections import defaultdict
def solution(clothes):
answer = 1
hash_map = defaultdict(lambda : 0)
for value, key in clothes:
hash_map[key] += 1
for v in hash_map.values():
answer *= v + 1
return answer - 1
|
flexible
|
{
"blob_id": "601089c2555e6fc75803087ee1d8af7f8180f651",
"index": 4199,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solution(clothes):\n answer = 1\n hash_map = defaultdict(lambda : 0)\n for value, key in clothes:\n hash_map[key] += 1\n for v in hash_map.values():\n answer *= v + 1\n return answer - 1\n",
"step-3": "from collections import defaultdict\n\n\ndef solution(clothes):\n answer = 1\n hash_map = defaultdict(lambda : 0)\n for value, key in clothes:\n hash_map[key] += 1\n for v in hash_map.values():\n answer *= v + 1\n return answer - 1\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
@app.route('/')
def index():
return "<h1>Congratulations, it's a web app!</h1>"
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def index():
return "<h1>Congratulations, it's a web app!</h1>"
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
@app.route('/')
def index():
return "<h1>Congratulations, it's a web app!</h1>"
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
<|reserved_special_token_1|>
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return "<h1>Congratulations, it's a web app!</h1>"
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
<|reserved_special_token_1|>
# from the top
# clean up dependencies
from flask import Flask
app = Flask(__name__)
@app.route("/")
def index():
return "<h1>Congratulations, it's a web app!</h1>"
if __name__ == "__main__":
app.run(host="127.0.0.1", port=8080, debug=True)
|
flexible
|
{
"blob_id": "612535d95e655f2e2d2c58f41b2aa99afa7fbcbc",
"index": 874,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n return \"<h1>Congratulations, it's a web app!</h1>\"\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n return \"<h1>Congratulations, it's a web app!</h1>\"\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=8080, debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return \"<h1>Congratulations, it's a web app!</h1>\"\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=8080, debug=True)\n",
"step-4": "from flask import Flask\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return \"<h1>Congratulations, it's a web app!</h1>\"\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=8080, debug=True)\n",
"step-5": "# from the top\n# clean up dependencies\n\nfrom flask import Flask\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return \"<h1>Congratulations, it's a web app!</h1>\"\n\n\nif __name__ == \"__main__\":\n app.run(host=\"127.0.0.1\", port=8080, debug=True)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Session:
<|reserved_special_token_0|>
class APIStatisticsCollection:
API_ACTION = 'x-stats-api-action'
DICT_PARAMS = 'x-stats-param-dict'
DICT_RESPONSE = 'x-stats-resp-dict'
SUCCESS = 'x-stats-success'
COLLECT = 'x-stats-collect'
class ParamDictPrefix:
PostKey = 'x-'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Session:
USER_ROOT_ID = 'x-root-id'
class APIStatisticsCollection:
API_ACTION = 'x-stats-api-action'
DICT_PARAMS = 'x-stats-param-dict'
DICT_RESPONSE = 'x-stats-resp-dict'
SUCCESS = 'x-stats-success'
COLLECT = 'x-stats-collect'
class ParamDictPrefix:
PostKey = 'x-'
<|reserved_special_token_1|>
class Cookies:
<|reserved_special_token_0|>
class Session:
USER_ROOT_ID = 'x-root-id'
class APIStatisticsCollection:
API_ACTION = 'x-stats-api-action'
DICT_PARAMS = 'x-stats-param-dict'
DICT_RESPONSE = 'x-stats-resp-dict'
SUCCESS = 'x-stats-success'
COLLECT = 'x-stats-collect'
class ParamDictPrefix:
PostKey = 'x-'
<|reserved_special_token_1|>
class Cookies:
USER_TOKEN = 'utoken'
class Session:
USER_ROOT_ID = 'x-root-id'
class APIStatisticsCollection:
API_ACTION = 'x-stats-api-action'
DICT_PARAMS = 'x-stats-param-dict'
DICT_RESPONSE = 'x-stats-resp-dict'
SUCCESS = 'x-stats-success'
COLLECT = 'x-stats-collect'
class ParamDictPrefix:
PostKey = 'x-'
<|reserved_special_token_1|>
# Cookies Keys
class Cookies:
USER_TOKEN = "utoken"
# Session Keys
class Session:
USER_ROOT_ID = "x-root-id"
class APIStatisticsCollection:
API_ACTION = "x-stats-api-action"
DICT_PARAMS = "x-stats-param-dict"
DICT_RESPONSE = "x-stats-resp-dict"
SUCCESS = "x-stats-success"
COLLECT = "x-stats-collect"
# Param Dict Prefix
class ParamDictPrefix:
PostKey = "x-" # Used in http POST params from HTML forms
|
flexible
|
{
"blob_id": "d0e5a3a6db0e27ecf157294850a48a19750a5ac2",
"index": 1667,
"step-1": "<mask token>\n\n\nclass Session:\n <mask token>\n\n\n class APIStatisticsCollection:\n API_ACTION = 'x-stats-api-action'\n DICT_PARAMS = 'x-stats-param-dict'\n DICT_RESPONSE = 'x-stats-resp-dict'\n SUCCESS = 'x-stats-success'\n COLLECT = 'x-stats-collect'\n\n\nclass ParamDictPrefix:\n PostKey = 'x-'\n",
"step-2": "<mask token>\n\n\nclass Session:\n USER_ROOT_ID = 'x-root-id'\n\n\n class APIStatisticsCollection:\n API_ACTION = 'x-stats-api-action'\n DICT_PARAMS = 'x-stats-param-dict'\n DICT_RESPONSE = 'x-stats-resp-dict'\n SUCCESS = 'x-stats-success'\n COLLECT = 'x-stats-collect'\n\n\nclass ParamDictPrefix:\n PostKey = 'x-'\n",
"step-3": "class Cookies:\n <mask token>\n\n\nclass Session:\n USER_ROOT_ID = 'x-root-id'\n\n\n class APIStatisticsCollection:\n API_ACTION = 'x-stats-api-action'\n DICT_PARAMS = 'x-stats-param-dict'\n DICT_RESPONSE = 'x-stats-resp-dict'\n SUCCESS = 'x-stats-success'\n COLLECT = 'x-stats-collect'\n\n\nclass ParamDictPrefix:\n PostKey = 'x-'\n",
"step-4": "class Cookies:\n USER_TOKEN = 'utoken'\n\n\nclass Session:\n USER_ROOT_ID = 'x-root-id'\n\n\n class APIStatisticsCollection:\n API_ACTION = 'x-stats-api-action'\n DICT_PARAMS = 'x-stats-param-dict'\n DICT_RESPONSE = 'x-stats-resp-dict'\n SUCCESS = 'x-stats-success'\n COLLECT = 'x-stats-collect'\n\n\nclass ParamDictPrefix:\n PostKey = 'x-'\n",
"step-5": "# Cookies Keys\nclass Cookies:\n USER_TOKEN = \"utoken\"\n\n\n# Session Keys\nclass Session:\n USER_ROOT_ID = \"x-root-id\"\n\n class APIStatisticsCollection:\n API_ACTION = \"x-stats-api-action\"\n DICT_PARAMS = \"x-stats-param-dict\"\n DICT_RESPONSE = \"x-stats-resp-dict\"\n SUCCESS = \"x-stats-success\"\n\n COLLECT = \"x-stats-collect\"\n\n\n# Param Dict Prefix\nclass ParamDictPrefix:\n PostKey = \"x-\" # Used in http POST params from HTML forms\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='teams', fields=[('id',
models.AutoField(primary_key=True, serialize=False)), ('name',
models.CharField(max_length=50)), ('discipline', models.CharField(
max_length=50)), ('amount', models.IntegerField())], options={
'ordering': ['id'], 'unique_together': {('name', 'discipline',
'amount')}})]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='teams', fields=[('id',
models.AutoField(primary_key=True, serialize=False)), ('name',
models.CharField(max_length=50)), ('discipline', models.CharField(
max_length=50)), ('amount', models.IntegerField())], options={
'ordering': ['id'], 'unique_together': {('name', 'discipline',
'amount')}})]
<|reserved_special_token_1|>
# Generated by Django 3.2.2 on 2021-05-07 08:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='teams',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('discipline', models.CharField(max_length=50)),
('amount', models.IntegerField()),
],
options={
'ordering': ['id'],
'unique_together': {('name', 'discipline', 'amount')},
},
),
]
|
flexible
|
{
"blob_id": "e72962b644fab148741eb1c528d48ada45a43e51",
"index": 3978,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='teams', fields=[('id',\n models.AutoField(primary_key=True, serialize=False)), ('name',\n models.CharField(max_length=50)), ('discipline', models.CharField(\n max_length=50)), ('amount', models.IntegerField())], options={\n 'ordering': ['id'], 'unique_together': {('name', 'discipline',\n 'amount')}})]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='teams', fields=[('id',\n models.AutoField(primary_key=True, serialize=False)), ('name',\n models.CharField(max_length=50)), ('discipline', models.CharField(\n max_length=50)), ('amount', models.IntegerField())], options={\n 'ordering': ['id'], 'unique_together': {('name', 'discipline',\n 'amount')}})]\n",
"step-5": "# Generated by Django 3.2.2 on 2021-05-07 08:01\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='teams',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=50)),\n ('discipline', models.CharField(max_length=50)),\n ('amount', models.IntegerField()),\n ],\n options={\n 'ordering': ['id'],\n 'unique_together': {('name', 'discipline', 'amount')},\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestEosLacpInterfacesModule(TestEosModule):
<|reserved_special_token_0|>
def setUp(self):
super(TestEosLacpInterfacesModule, self).setUp()
self.mock_get_config = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config'
)
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config'
)
self.load_config = self.mock_load_config.start()
self.mock_get_resource_connection_config = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection'
)
self.get_resource_connection_config = (self.
mock_get_resource_connection_config.start())
self.mock_get_resource_connection_facts = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection'
)
self.get_resource_connection_facts = (self.
mock_get_resource_connection_facts.start())
self.mock_edit_config = patch(
'ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config'
)
self.edit_config = self.mock_edit_config.start()
self.mock_execute_show_command = patch(
'ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.lacp_interfaces.lacp_interfaces.Lacp_interfacesFacts.get_device_data'
)
self.execute_show_command = self.mock_execute_show_command.start()
def tearDown(self):
super(TestEosLacpInterfacesModule, self).tearDown()
self.mock_get_resource_connection_config.stop()
self.mock_get_resource_connection_facts.stop()
self.mock_edit_config.stop()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_execute_show_command.stop()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_eos_lacp_interfaces_overridden_idempotent(self):
set_module_args(dict(config=[dict(name='Ethernet1', port_priority=
30), dict(name='Ethernet2', rate='fast')], state='overridden'))
self.execute_module(changed=False, commands=[])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestEosLacpInterfacesModule(TestEosModule):
<|reserved_special_token_0|>
def setUp(self):
super(TestEosLacpInterfacesModule, self).setUp()
self.mock_get_config = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config'
)
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config'
)
self.load_config = self.mock_load_config.start()
self.mock_get_resource_connection_config = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection'
)
self.get_resource_connection_config = (self.
mock_get_resource_connection_config.start())
self.mock_get_resource_connection_facts = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection'
)
self.get_resource_connection_facts = (self.
mock_get_resource_connection_facts.start())
self.mock_edit_config = patch(
'ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config'
)
self.edit_config = self.mock_edit_config.start()
self.mock_execute_show_command = patch(
'ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.lacp_interfaces.lacp_interfaces.Lacp_interfacesFacts.get_device_data'
)
self.execute_show_command = self.mock_execute_show_command.start()
def tearDown(self):
super(TestEosLacpInterfacesModule, self).tearDown()
self.mock_get_resource_connection_config.stop()
self.mock_get_resource_connection_facts.stop()
self.mock_edit_config.stop()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_execute_show_command.stop()
def load_fixtures(self, commands=None, transport='cli'):
def load_from_file(*args, **kwargs):
return load_fixture('eos_lacp_interfaces_config.cfg')
self.execute_show_command.side_effect = load_from_file
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_eos_lacp_interfaces_merged_idempotent(self):
set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')],
state='merged'))
self.execute_module(changed=False, commands=[])
<|reserved_special_token_0|>
def test_eos_lacp_interfaces_overridden(self):
set_module_args(dict(config=[dict(name='Ethernet1', port_priority=
45, rate='normal')], state='overridden'))
commands = ['interface Ethernet1', 'lacp port-priority 45',
'lacp rate normal', 'interface Ethernet2',
'no lacp port-priority', 'no lacp rate']
self.execute_module(changed=True, commands=commands)
def test_eos_lacp_interfaces_overridden_idempotent(self):
set_module_args(dict(config=[dict(name='Ethernet1', port_priority=
30), dict(name='Ethernet2', rate='fast')], state='overridden'))
self.execute_module(changed=False, commands=[])
def test_eos_lacp_interfaces_deleted(self):
set_module_args(dict(config=[dict(name='Ethernet2')], state='deleted'))
commands = ['interface Ethernet2', 'no lacp rate']
self.execute_module(changed=True, commands=commands)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestEosLacpInterfacesModule(TestEosModule):
<|reserved_special_token_0|>
def setUp(self):
super(TestEosLacpInterfacesModule, self).setUp()
self.mock_get_config = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config'
)
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config'
)
self.load_config = self.mock_load_config.start()
self.mock_get_resource_connection_config = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection'
)
self.get_resource_connection_config = (self.
mock_get_resource_connection_config.start())
self.mock_get_resource_connection_facts = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection'
)
self.get_resource_connection_facts = (self.
mock_get_resource_connection_facts.start())
self.mock_edit_config = patch(
'ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config'
)
self.edit_config = self.mock_edit_config.start()
self.mock_execute_show_command = patch(
'ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.lacp_interfaces.lacp_interfaces.Lacp_interfacesFacts.get_device_data'
)
self.execute_show_command = self.mock_execute_show_command.start()
def tearDown(self):
super(TestEosLacpInterfacesModule, self).tearDown()
self.mock_get_resource_connection_config.stop()
self.mock_get_resource_connection_facts.stop()
self.mock_edit_config.stop()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_execute_show_command.stop()
def load_fixtures(self, commands=None, transport='cli'):
def load_from_file(*args, **kwargs):
return load_fixture('eos_lacp_interfaces_config.cfg')
self.execute_show_command.side_effect = load_from_file
<|reserved_special_token_0|>
def test_eos_lacp_interfaces_default_idempotent(self):
set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')]))
self.execute_module(changed=False, commands=[])
def test_eos_lacp_interfaces_merged(self):
set_module_args(dict(config=[dict(name='Ethernet1', port_priority=
45, rate='normal'), dict(name='Ethernet2', rate='normal')],
state='merged'))
commands = ['interface Ethernet1', 'lacp port-priority 45',
'lacp rate normal', 'interface Ethernet2', 'lacp rate normal']
self.execute_module(changed=True, commands=commands)
def test_eos_lacp_interfaces_merged_idempotent(self):
set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')],
state='merged'))
self.execute_module(changed=False, commands=[])
def test_eos_lacp_interfaces_replaced_idempotent(self):
set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')],
state='replaced'))
self.execute_module(changed=False, commands=[])
def test_eos_lacp_interfaces_overridden(self):
set_module_args(dict(config=[dict(name='Ethernet1', port_priority=
45, rate='normal')], state='overridden'))
commands = ['interface Ethernet1', 'lacp port-priority 45',
'lacp rate normal', 'interface Ethernet2',
'no lacp port-priority', 'no lacp rate']
self.execute_module(changed=True, commands=commands)
def test_eos_lacp_interfaces_overridden_idempotent(self):
set_module_args(dict(config=[dict(name='Ethernet1', port_priority=
30), dict(name='Ethernet2', rate='fast')], state='overridden'))
self.execute_module(changed=False, commands=[])
def test_eos_lacp_interfaces_deleted(self):
set_module_args(dict(config=[dict(name='Ethernet2')], state='deleted'))
commands = ['interface Ethernet2', 'no lacp rate']
self.execute_module(changed=True, commands=commands)
<|reserved_special_token_1|>
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.arista.eos.tests.unit.compat.mock import patch
from ansible_collections.arista.eos.plugins.modules import eos_lacp_interfaces
from ansible_collections.arista.eos.tests.unit.modules.utils import set_module_args
from .eos_module import TestEosModule, load_fixture
class TestEosLacpInterfacesModule(TestEosModule):
module = eos_lacp_interfaces
def setUp(self):
super(TestEosLacpInterfacesModule, self).setUp()
self.mock_get_config = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config'
)
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config'
)
self.load_config = self.mock_load_config.start()
self.mock_get_resource_connection_config = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection'
)
self.get_resource_connection_config = (self.
mock_get_resource_connection_config.start())
self.mock_get_resource_connection_facts = patch(
'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection'
)
self.get_resource_connection_facts = (self.
mock_get_resource_connection_facts.start())
self.mock_edit_config = patch(
'ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config'
)
self.edit_config = self.mock_edit_config.start()
self.mock_execute_show_command = patch(
'ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.lacp_interfaces.lacp_interfaces.Lacp_interfacesFacts.get_device_data'
)
self.execute_show_command = self.mock_execute_show_command.start()
def tearDown(self):
super(TestEosLacpInterfacesModule, self).tearDown()
self.mock_get_resource_connection_config.stop()
self.mock_get_resource_connection_facts.stop()
self.mock_edit_config.stop()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_execute_show_command.stop()
def load_fixtures(self, commands=None, transport='cli'):
def load_from_file(*args, **kwargs):
return load_fixture('eos_lacp_interfaces_config.cfg')
self.execute_show_command.side_effect = load_from_file
def test_eos_lacp_interfaces_default(self):
set_module_args(dict(config=[dict(name='Ethernet1', port_priority=
45, rate='normal')]))
commands = ['interface Ethernet1', 'lacp port-priority 45',
'lacp rate normal']
self.execute_module(changed=True, commands=commands)
def test_eos_lacp_interfaces_default_idempotent(self):
set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')]))
self.execute_module(changed=False, commands=[])
def test_eos_lacp_interfaces_merged(self):
set_module_args(dict(config=[dict(name='Ethernet1', port_priority=
45, rate='normal'), dict(name='Ethernet2', rate='normal')],
state='merged'))
commands = ['interface Ethernet1', 'lacp port-priority 45',
'lacp rate normal', 'interface Ethernet2', 'lacp rate normal']
self.execute_module(changed=True, commands=commands)
def test_eos_lacp_interfaces_merged_idempotent(self):
set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')],
state='merged'))
self.execute_module(changed=False, commands=[])
def test_eos_lacp_interfaces_replaced_idempotent(self):
set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')],
state='replaced'))
self.execute_module(changed=False, commands=[])
def test_eos_lacp_interfaces_overridden(self):
set_module_args(dict(config=[dict(name='Ethernet1', port_priority=
45, rate='normal')], state='overridden'))
commands = ['interface Ethernet1', 'lacp port-priority 45',
'lacp rate normal', 'interface Ethernet2',
'no lacp port-priority', 'no lacp rate']
self.execute_module(changed=True, commands=commands)
def test_eos_lacp_interfaces_overridden_idempotent(self):
set_module_args(dict(config=[dict(name='Ethernet1', port_priority=
30), dict(name='Ethernet2', rate='fast')], state='overridden'))
self.execute_module(changed=False, commands=[])
def test_eos_lacp_interfaces_deleted(self):
set_module_args(dict(config=[dict(name='Ethernet2')], state='deleted'))
commands = ['interface Ethernet2', 'no lacp rate']
self.execute_module(changed=True, commands=commands)
<|reserved_special_token_1|>
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.arista.eos.tests.unit.compat.mock import patch
from ansible_collections.arista.eos.plugins.modules import eos_lacp_interfaces
from ansible_collections.arista.eos.tests.unit.modules.utils import (
set_module_args,
)
from .eos_module import TestEosModule, load_fixture
class TestEosLacpInterfacesModule(TestEosModule):
module = eos_lacp_interfaces
def setUp(self):
super(TestEosLacpInterfacesModule, self).setUp()
self.mock_get_config = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config"
)
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config"
)
self.load_config = self.mock_load_config.start()
self.mock_get_resource_connection_config = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection"
)
self.get_resource_connection_config = (
self.mock_get_resource_connection_config.start()
)
self.mock_get_resource_connection_facts = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection"
)
self.get_resource_connection_facts = (
self.mock_get_resource_connection_facts.start()
)
self.mock_edit_config = patch(
"ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config"
)
self.edit_config = self.mock_edit_config.start()
self.mock_execute_show_command = patch(
"ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.lacp_interfaces.lacp_interfaces.Lacp_interfacesFacts.get_device_data"
)
self.execute_show_command = self.mock_execute_show_command.start()
def tearDown(self):
super(TestEosLacpInterfacesModule, self).tearDown()
self.mock_get_resource_connection_config.stop()
self.mock_get_resource_connection_facts.stop()
self.mock_edit_config.stop()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_execute_show_command.stop()
def load_fixtures(self, commands=None, transport="cli"):
def load_from_file(*args, **kwargs):
return load_fixture("eos_lacp_interfaces_config.cfg")
self.execute_show_command.side_effect = load_from_file
def test_eos_lacp_interfaces_default(self):
set_module_args(
dict(
config=[
dict(name="Ethernet1", port_priority=45, rate="normal")
]
)
)
commands = [
"interface Ethernet1",
"lacp port-priority 45",
"lacp rate normal",
]
self.execute_module(changed=True, commands=commands)
def test_eos_lacp_interfaces_default_idempotent(self):
set_module_args(dict(config=[dict(name="Ethernet2", rate="fast")]))
self.execute_module(changed=False, commands=[])
def test_eos_lacp_interfaces_merged(self):
set_module_args(
dict(
config=[
dict(name="Ethernet1", port_priority=45, rate="normal"),
dict(name="Ethernet2", rate="normal"),
],
state="merged",
)
)
commands = [
"interface Ethernet1",
"lacp port-priority 45",
"lacp rate normal",
"interface Ethernet2",
"lacp rate normal",
]
self.execute_module(changed=True, commands=commands)
def test_eos_lacp_interfaces_merged_idempotent(self):
set_module_args(
dict(config=[dict(name="Ethernet2", rate="fast")], state="merged")
)
self.execute_module(changed=False, commands=[])
# Bug #64453
# def test_eos_lacp_interfaces_replaced(self):
# set_module_args(dict(
# config=[dict(
# name="Ethernet1",
# port_priority=45,
# rate="normal"
# )], state="replaced"
# ))
# commands = ['interface Ethernet1', 'lacp port-priority 45', 'lacp rate normal']
# self.execute_module(changed=True, commands=commands)
def test_eos_lacp_interfaces_replaced_idempotent(self):
set_module_args(
dict(
config=[dict(name="Ethernet2", rate="fast")], state="replaced"
)
)
self.execute_module(changed=False, commands=[])
def test_eos_lacp_interfaces_overridden(self):
set_module_args(
dict(
config=[
dict(name="Ethernet1", port_priority=45, rate="normal")
],
state="overridden",
)
)
commands = [
"interface Ethernet1",
"lacp port-priority 45",
"lacp rate normal",
"interface Ethernet2",
"no lacp port-priority",
"no lacp rate",
]
self.execute_module(changed=True, commands=commands)
def test_eos_lacp_interfaces_overridden_idempotent(self):
set_module_args(
dict(
config=[
dict(name="Ethernet1", port_priority=30),
dict(name="Ethernet2", rate="fast"),
],
state="overridden",
)
)
self.execute_module(changed=False, commands=[])
def test_eos_lacp_interfaces_deleted(self):
set_module_args(dict(config=[dict(name="Ethernet2")], state="deleted"))
commands = ["interface Ethernet2", "no lacp rate"]
self.execute_module(changed=True, commands=commands)
|
flexible
|
{
"blob_id": "6efe3975f4d5d9f431391b3560c37a3e89e27f3d",
"index": 9172,
"step-1": "<mask token>\n\n\nclass TestEosLacpInterfacesModule(TestEosModule):\n <mask token>\n\n def setUp(self):\n super(TestEosLacpInterfacesModule, self).setUp()\n self.mock_get_config = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config'\n )\n self.get_config = self.mock_get_config.start()\n self.mock_load_config = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config'\n )\n self.load_config = self.mock_load_config.start()\n self.mock_get_resource_connection_config = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection'\n )\n self.get_resource_connection_config = (self.\n mock_get_resource_connection_config.start())\n self.mock_get_resource_connection_facts = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection'\n )\n self.get_resource_connection_facts = (self.\n mock_get_resource_connection_facts.start())\n self.mock_edit_config = patch(\n 'ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config'\n )\n self.edit_config = self.mock_edit_config.start()\n self.mock_execute_show_command = patch(\n 'ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.lacp_interfaces.lacp_interfaces.Lacp_interfacesFacts.get_device_data'\n )\n self.execute_show_command = self.mock_execute_show_command.start()\n\n def tearDown(self):\n super(TestEosLacpInterfacesModule, self).tearDown()\n self.mock_get_resource_connection_config.stop()\n self.mock_get_resource_connection_facts.stop()\n self.mock_edit_config.stop()\n self.mock_get_config.stop()\n self.mock_load_config.stop()\n self.mock_execute_show_command.stop()\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_eos_lacp_interfaces_overridden_idempotent(self):\n set_module_args(dict(config=[dict(name='Ethernet1', port_priority=\n 30), dict(name='Ethernet2', rate='fast')], state='overridden'))\n self.execute_module(changed=False, commands=[])\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestEosLacpInterfacesModule(TestEosModule):\n <mask token>\n\n def setUp(self):\n super(TestEosLacpInterfacesModule, self).setUp()\n self.mock_get_config = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config'\n )\n self.get_config = self.mock_get_config.start()\n self.mock_load_config = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config'\n )\n self.load_config = self.mock_load_config.start()\n self.mock_get_resource_connection_config = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection'\n )\n self.get_resource_connection_config = (self.\n mock_get_resource_connection_config.start())\n self.mock_get_resource_connection_facts = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection'\n )\n self.get_resource_connection_facts = (self.\n mock_get_resource_connection_facts.start())\n self.mock_edit_config = patch(\n 'ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config'\n )\n self.edit_config = self.mock_edit_config.start()\n self.mock_execute_show_command = patch(\n 'ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.lacp_interfaces.lacp_interfaces.Lacp_interfacesFacts.get_device_data'\n )\n self.execute_show_command = self.mock_execute_show_command.start()\n\n def tearDown(self):\n super(TestEosLacpInterfacesModule, self).tearDown()\n self.mock_get_resource_connection_config.stop()\n self.mock_get_resource_connection_facts.stop()\n self.mock_edit_config.stop()\n self.mock_get_config.stop()\n self.mock_load_config.stop()\n self.mock_execute_show_command.stop()\n\n def load_fixtures(self, commands=None, transport='cli'):\n\n def load_from_file(*args, **kwargs):\n return load_fixture('eos_lacp_interfaces_config.cfg')\n self.execute_show_command.side_effect = load_from_file\n <mask token>\n <mask token>\n <mask token>\n\n def test_eos_lacp_interfaces_merged_idempotent(self):\n set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')],\n state='merged'))\n self.execute_module(changed=False, commands=[])\n <mask token>\n\n def test_eos_lacp_interfaces_overridden(self):\n set_module_args(dict(config=[dict(name='Ethernet1', port_priority=\n 45, rate='normal')], state='overridden'))\n commands = ['interface Ethernet1', 'lacp port-priority 45',\n 'lacp rate normal', 'interface Ethernet2',\n 'no lacp port-priority', 'no lacp rate']\n self.execute_module(changed=True, commands=commands)\n\n def test_eos_lacp_interfaces_overridden_idempotent(self):\n set_module_args(dict(config=[dict(name='Ethernet1', port_priority=\n 30), dict(name='Ethernet2', rate='fast')], state='overridden'))\n self.execute_module(changed=False, commands=[])\n\n def test_eos_lacp_interfaces_deleted(self):\n set_module_args(dict(config=[dict(name='Ethernet2')], state='deleted'))\n commands = ['interface Ethernet2', 'no lacp rate']\n self.execute_module(changed=True, commands=commands)\n",
"step-3": "<mask token>\n\n\nclass TestEosLacpInterfacesModule(TestEosModule):\n <mask token>\n\n def setUp(self):\n super(TestEosLacpInterfacesModule, self).setUp()\n self.mock_get_config = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config'\n )\n self.get_config = self.mock_get_config.start()\n self.mock_load_config = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config'\n )\n self.load_config = self.mock_load_config.start()\n self.mock_get_resource_connection_config = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection'\n )\n self.get_resource_connection_config = (self.\n mock_get_resource_connection_config.start())\n self.mock_get_resource_connection_facts = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection'\n )\n self.get_resource_connection_facts = (self.\n mock_get_resource_connection_facts.start())\n self.mock_edit_config = patch(\n 'ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config'\n )\n self.edit_config = self.mock_edit_config.start()\n self.mock_execute_show_command = patch(\n 'ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.lacp_interfaces.lacp_interfaces.Lacp_interfacesFacts.get_device_data'\n )\n self.execute_show_command = self.mock_execute_show_command.start()\n\n def tearDown(self):\n super(TestEosLacpInterfacesModule, self).tearDown()\n self.mock_get_resource_connection_config.stop()\n self.mock_get_resource_connection_facts.stop()\n self.mock_edit_config.stop()\n self.mock_get_config.stop()\n self.mock_load_config.stop()\n self.mock_execute_show_command.stop()\n\n def load_fixtures(self, commands=None, transport='cli'):\n\n def load_from_file(*args, **kwargs):\n return load_fixture('eos_lacp_interfaces_config.cfg')\n self.execute_show_command.side_effect = load_from_file\n <mask token>\n\n def test_eos_lacp_interfaces_default_idempotent(self):\n set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')]))\n self.execute_module(changed=False, commands=[])\n\n def test_eos_lacp_interfaces_merged(self):\n set_module_args(dict(config=[dict(name='Ethernet1', port_priority=\n 45, rate='normal'), dict(name='Ethernet2', rate='normal')],\n state='merged'))\n commands = ['interface Ethernet1', 'lacp port-priority 45',\n 'lacp rate normal', 'interface Ethernet2', 'lacp rate normal']\n self.execute_module(changed=True, commands=commands)\n\n def test_eos_lacp_interfaces_merged_idempotent(self):\n set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')],\n state='merged'))\n self.execute_module(changed=False, commands=[])\n\n def test_eos_lacp_interfaces_replaced_idempotent(self):\n set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')],\n state='replaced'))\n self.execute_module(changed=False, commands=[])\n\n def test_eos_lacp_interfaces_overridden(self):\n set_module_args(dict(config=[dict(name='Ethernet1', port_priority=\n 45, rate='normal')], state='overridden'))\n commands = ['interface Ethernet1', 'lacp port-priority 45',\n 'lacp rate normal', 'interface Ethernet2',\n 'no lacp port-priority', 'no lacp rate']\n self.execute_module(changed=True, commands=commands)\n\n def test_eos_lacp_interfaces_overridden_idempotent(self):\n set_module_args(dict(config=[dict(name='Ethernet1', port_priority=\n 30), dict(name='Ethernet2', rate='fast')], state='overridden'))\n self.execute_module(changed=False, commands=[])\n\n def test_eos_lacp_interfaces_deleted(self):\n set_module_args(dict(config=[dict(name='Ethernet2')], state='deleted'))\n commands = ['interface Ethernet2', 'no lacp rate']\n self.execute_module(changed=True, commands=commands)\n",
"step-4": "from __future__ import absolute_import, division, print_function\n__metaclass__ = type\nfrom ansible_collections.arista.eos.tests.unit.compat.mock import patch\nfrom ansible_collections.arista.eos.plugins.modules import eos_lacp_interfaces\nfrom ansible_collections.arista.eos.tests.unit.modules.utils import set_module_args\nfrom .eos_module import TestEosModule, load_fixture\n\n\nclass TestEosLacpInterfacesModule(TestEosModule):\n module = eos_lacp_interfaces\n\n def setUp(self):\n super(TestEosLacpInterfacesModule, self).setUp()\n self.mock_get_config = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config'\n )\n self.get_config = self.mock_get_config.start()\n self.mock_load_config = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config'\n )\n self.load_config = self.mock_load_config.start()\n self.mock_get_resource_connection_config = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection'\n )\n self.get_resource_connection_config = (self.\n mock_get_resource_connection_config.start())\n self.mock_get_resource_connection_facts = patch(\n 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection'\n )\n self.get_resource_connection_facts = (self.\n mock_get_resource_connection_facts.start())\n self.mock_edit_config = patch(\n 'ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config'\n )\n self.edit_config = self.mock_edit_config.start()\n self.mock_execute_show_command = patch(\n 'ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.lacp_interfaces.lacp_interfaces.Lacp_interfacesFacts.get_device_data'\n )\n self.execute_show_command = self.mock_execute_show_command.start()\n\n def tearDown(self):\n super(TestEosLacpInterfacesModule, self).tearDown()\n self.mock_get_resource_connection_config.stop()\n self.mock_get_resource_connection_facts.stop()\n self.mock_edit_config.stop()\n self.mock_get_config.stop()\n self.mock_load_config.stop()\n self.mock_execute_show_command.stop()\n\n def load_fixtures(self, commands=None, transport='cli'):\n\n def load_from_file(*args, **kwargs):\n return load_fixture('eos_lacp_interfaces_config.cfg')\n self.execute_show_command.side_effect = load_from_file\n\n def test_eos_lacp_interfaces_default(self):\n set_module_args(dict(config=[dict(name='Ethernet1', port_priority=\n 45, rate='normal')]))\n commands = ['interface Ethernet1', 'lacp port-priority 45',\n 'lacp rate normal']\n self.execute_module(changed=True, commands=commands)\n\n def test_eos_lacp_interfaces_default_idempotent(self):\n set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')]))\n self.execute_module(changed=False, commands=[])\n\n def test_eos_lacp_interfaces_merged(self):\n set_module_args(dict(config=[dict(name='Ethernet1', port_priority=\n 45, rate='normal'), dict(name='Ethernet2', rate='normal')],\n state='merged'))\n commands = ['interface Ethernet1', 'lacp port-priority 45',\n 'lacp rate normal', 'interface Ethernet2', 'lacp rate normal']\n self.execute_module(changed=True, commands=commands)\n\n def test_eos_lacp_interfaces_merged_idempotent(self):\n set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')],\n state='merged'))\n self.execute_module(changed=False, commands=[])\n\n def test_eos_lacp_interfaces_replaced_idempotent(self):\n set_module_args(dict(config=[dict(name='Ethernet2', rate='fast')],\n state='replaced'))\n self.execute_module(changed=False, commands=[])\n\n def test_eos_lacp_interfaces_overridden(self):\n set_module_args(dict(config=[dict(name='Ethernet1', port_priority=\n 45, rate='normal')], state='overridden'))\n commands = ['interface Ethernet1', 'lacp port-priority 45',\n 'lacp rate normal', 'interface Ethernet2',\n 'no lacp port-priority', 'no lacp rate']\n self.execute_module(changed=True, commands=commands)\n\n def test_eos_lacp_interfaces_overridden_idempotent(self):\n set_module_args(dict(config=[dict(name='Ethernet1', port_priority=\n 30), dict(name='Ethernet2', rate='fast')], state='overridden'))\n self.execute_module(changed=False, commands=[])\n\n def test_eos_lacp_interfaces_deleted(self):\n set_module_args(dict(config=[dict(name='Ethernet2')], state='deleted'))\n commands = ['interface Ethernet2', 'no lacp rate']\n self.execute_module(changed=True, commands=commands)\n",
"step-5": "# (c) 2019, Ansible by Red Hat, inc\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n#\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nfrom ansible_collections.arista.eos.tests.unit.compat.mock import patch\nfrom ansible_collections.arista.eos.plugins.modules import eos_lacp_interfaces\nfrom ansible_collections.arista.eos.tests.unit.modules.utils import (\n set_module_args,\n)\nfrom .eos_module import TestEosModule, load_fixture\n\n\nclass TestEosLacpInterfacesModule(TestEosModule):\n module = eos_lacp_interfaces\n\n def setUp(self):\n super(TestEosLacpInterfacesModule, self).setUp()\n\n self.mock_get_config = patch(\n \"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config\"\n )\n self.get_config = self.mock_get_config.start()\n\n self.mock_load_config = patch(\n \"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config\"\n )\n self.load_config = self.mock_load_config.start()\n\n self.mock_get_resource_connection_config = patch(\n \"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection\"\n )\n self.get_resource_connection_config = (\n self.mock_get_resource_connection_config.start()\n )\n\n self.mock_get_resource_connection_facts = patch(\n \"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection\"\n )\n self.get_resource_connection_facts = (\n self.mock_get_resource_connection_facts.start()\n )\n\n self.mock_edit_config = patch(\n \"ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config\"\n )\n self.edit_config = self.mock_edit_config.start()\n\n self.mock_execute_show_command = patch(\n \"ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.lacp_interfaces.lacp_interfaces.Lacp_interfacesFacts.get_device_data\"\n )\n self.execute_show_command = self.mock_execute_show_command.start()\n\n def tearDown(self):\n super(TestEosLacpInterfacesModule, self).tearDown()\n self.mock_get_resource_connection_config.stop()\n self.mock_get_resource_connection_facts.stop()\n self.mock_edit_config.stop()\n self.mock_get_config.stop()\n self.mock_load_config.stop()\n self.mock_execute_show_command.stop()\n\n def load_fixtures(self, commands=None, transport=\"cli\"):\n def load_from_file(*args, **kwargs):\n return load_fixture(\"eos_lacp_interfaces_config.cfg\")\n\n self.execute_show_command.side_effect = load_from_file\n\n def test_eos_lacp_interfaces_default(self):\n set_module_args(\n dict(\n config=[\n dict(name=\"Ethernet1\", port_priority=45, rate=\"normal\")\n ]\n )\n )\n commands = [\n \"interface Ethernet1\",\n \"lacp port-priority 45\",\n \"lacp rate normal\",\n ]\n self.execute_module(changed=True, commands=commands)\n\n def test_eos_lacp_interfaces_default_idempotent(self):\n set_module_args(dict(config=[dict(name=\"Ethernet2\", rate=\"fast\")]))\n self.execute_module(changed=False, commands=[])\n\n def test_eos_lacp_interfaces_merged(self):\n set_module_args(\n dict(\n config=[\n dict(name=\"Ethernet1\", port_priority=45, rate=\"normal\"),\n dict(name=\"Ethernet2\", rate=\"normal\"),\n ],\n state=\"merged\",\n )\n )\n commands = [\n \"interface Ethernet1\",\n \"lacp port-priority 45\",\n \"lacp rate normal\",\n \"interface Ethernet2\",\n \"lacp rate normal\",\n ]\n self.execute_module(changed=True, commands=commands)\n\n def test_eos_lacp_interfaces_merged_idempotent(self):\n set_module_args(\n dict(config=[dict(name=\"Ethernet2\", rate=\"fast\")], state=\"merged\")\n )\n self.execute_module(changed=False, commands=[])\n\n # Bug #64453\n # def test_eos_lacp_interfaces_replaced(self):\n # set_module_args(dict(\n # config=[dict(\n # name=\"Ethernet1\",\n # port_priority=45,\n # rate=\"normal\"\n # )], state=\"replaced\"\n # ))\n # commands = ['interface Ethernet1', 'lacp port-priority 45', 'lacp rate normal']\n # self.execute_module(changed=True, commands=commands)\n\n def test_eos_lacp_interfaces_replaced_idempotent(self):\n set_module_args(\n dict(\n config=[dict(name=\"Ethernet2\", rate=\"fast\")], state=\"replaced\"\n )\n )\n self.execute_module(changed=False, commands=[])\n\n def test_eos_lacp_interfaces_overridden(self):\n set_module_args(\n dict(\n config=[\n dict(name=\"Ethernet1\", port_priority=45, rate=\"normal\")\n ],\n state=\"overridden\",\n )\n )\n commands = [\n \"interface Ethernet1\",\n \"lacp port-priority 45\",\n \"lacp rate normal\",\n \"interface Ethernet2\",\n \"no lacp port-priority\",\n \"no lacp rate\",\n ]\n self.execute_module(changed=True, commands=commands)\n\n def test_eos_lacp_interfaces_overridden_idempotent(self):\n set_module_args(\n dict(\n config=[\n dict(name=\"Ethernet1\", port_priority=30),\n dict(name=\"Ethernet2\", rate=\"fast\"),\n ],\n state=\"overridden\",\n )\n )\n self.execute_module(changed=False, commands=[])\n\n def test_eos_lacp_interfaces_deleted(self):\n set_module_args(dict(config=[dict(name=\"Ethernet2\")], state=\"deleted\"))\n commands = [\"interface Ethernet2\", \"no lacp rate\"]\n self.execute_module(changed=True, commands=commands)\n",
"step-ids": [
4,
8,
11,
15,
16
]
}
|
[
4,
8,
11,
15,
16
] |
#!/usr/bin/env python
import sys
import struct
import Queue
import logging
import redis
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from threading import Thread
from scapy.all import sniff, sendp, hexdump, get_if_list, get_if_hwaddr
from scapy.all import Packet, IPOption
from scapy.all import PacketListField, ShortField, IntField, LongField, BitField, FieldListField, FieldLenField, ByteField
from scapy.all import Dot1Q, IP, UDP, Raw
from scapy.layers.inet import _IPOption_HDR
from check.verification import Verification
NUM = 0
redis_session = redis.Redis(host='localhost')
def get_if():
ifs=get_if_list()
iface=None
for i in get_if_list():
if "enp0s8" in i:
iface=i
break;
if not iface:
print "Cannot find enp0s8 interface"
exit(1)
return iface
class SwitchTrace(Packet):
fields_desc = [ BitField("swid", 0x0, 6),
BitField("inport", 0x0, 6),
BitField("rule", 0x0, 20)]
def extract_padding(self, p):
return "", p
class IVPOption_MRI(IPOption):
name = "MRI"
option = 31
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B",
length_of="swtraces",
adjust=lambda pkt,l:l*2+4),
ShortField("count", 0),
PacketListField("swtraces",
[],
SwitchTrace,
count_from=lambda pkt:(pkt.count*1)) ]
def check_packet(queue):
while True:
path = queue.get()
verif = Verification()
verif_path = verif.verif_packet(path)
def handle_pkt(pkt, q):
#pkt.show2()
global NUM
count = 0
path = []
vlanid = pkt[Dot1Q].vlan
while (count < pkt['MRI'].count):
swid = pkt['MRI'].swtraces[count].swid
inport = pkt['MRI'].swtraces[count].inport
ruleid = pkt['MRI'].swtraces[count].rule
dst_ip = pkt['IP'].dst
path.insert(0, [dst_ip, swid, inport, ruleid])
count = count + 1
NUM = NUM + 1
q.put([path, NUM, len(path), vlanid])
print("Path %i: %s and vlan ID: %d" % (NUM, path, vlanid))
sys.stdout.flush()
def main():
q = Queue.Queue(maxsize=0)
workers = 5
for i in range(workers):
thread = Thread(target=check_packet, args=(q, ))
thread.setDaemon(True)
thread.start()
iface = 'enp0s8'
print 'Path Format [vlanID, [dst_ip, swID, inport, ruleID], ...]\n'
sys.stdout.flush()
try:
sniff(filter='', iface = iface, prn = lambda x: handle_pkt(x, q))
finally:
for key in redis_session.scan_iter("s*"):
redis_session.delete(key)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "e4ecc1746e907f11936683384e1edb34dd637de7",
"index": 8171,
"step-1": "#!/usr/bin/env python\nimport sys\nimport struct\nimport Queue\nimport logging\nimport redis\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR)\n\nfrom threading import Thread\nfrom scapy.all import sniff, sendp, hexdump, get_if_list, get_if_hwaddr\nfrom scapy.all import Packet, IPOption\nfrom scapy.all import PacketListField, ShortField, IntField, LongField, BitField, FieldListField, FieldLenField, ByteField\nfrom scapy.all import Dot1Q, IP, UDP, Raw\nfrom scapy.layers.inet import _IPOption_HDR\nfrom check.verification import Verification\n\nNUM = 0\nredis_session = redis.Redis(host='localhost')\n\n\ndef get_if():\n ifs=get_if_list()\n iface=None\n for i in get_if_list():\n if \"enp0s8\" in i:\n iface=i\n break;\n if not iface:\n print \"Cannot find enp0s8 interface\"\n exit(1)\n return iface\n\n\nclass SwitchTrace(Packet):\n fields_desc = [ BitField(\"swid\", 0x0, 6),\n BitField(\"inport\", 0x0, 6),\n BitField(\"rule\", 0x0, 20)]\n\n def extract_padding(self, p):\n return \"\", p\n\n\nclass IVPOption_MRI(IPOption):\n name = \"MRI\"\n option = 31\n fields_desc = [ _IPOption_HDR,\n FieldLenField(\"length\", None, fmt=\"B\",\n length_of=\"swtraces\",\n adjust=lambda pkt,l:l*2+4),\n ShortField(\"count\", 0),\n PacketListField(\"swtraces\",\n [],\n SwitchTrace,\n count_from=lambda pkt:(pkt.count*1)) ]\n\n\ndef check_packet(queue):\n while True:\n path = queue.get()\n verif = Verification()\n verif_path = verif.verif_packet(path)\n\n\ndef handle_pkt(pkt, q):\n #pkt.show2()\n global NUM\n count = 0\n path = []\n vlanid = pkt[Dot1Q].vlan\n while (count < pkt['MRI'].count):\n swid = pkt['MRI'].swtraces[count].swid\n inport = pkt['MRI'].swtraces[count].inport\n ruleid = pkt['MRI'].swtraces[count].rule\n dst_ip = pkt['IP'].dst\n path.insert(0, [dst_ip, swid, inport, ruleid])\n count = count + 1\n NUM = NUM + 1\n q.put([path, NUM, len(path), vlanid])\n print(\"Path %i: %s and vlan ID: %d\" % (NUM, path, vlanid))\n sys.stdout.flush()\n\ndef main():\n q = Queue.Queue(maxsize=0)\n workers = 5\n\n for i in range(workers):\n thread = Thread(target=check_packet, args=(q, ))\n thread.setDaemon(True)\n thread.start()\n\n iface = 'enp0s8'\n print 'Path Format [vlanID, [dst_ip, swID, inport, ruleID], ...]\\n'\n sys.stdout.flush()\n try:\n sniff(filter='', iface = iface, prn = lambda x: handle_pkt(x, q))\n finally:\n for key in redis_session.scan_iter(\"s*\"):\n redis_session.delete(key)\n\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import pandas as pd
import pytest
import pytz
from neo4j._codec.hydration.v2 import HydrationHandler
from neo4j._codec.packstream import Structure
from neo4j.time import DateTime
from ..v1.test_temporal_dehydration import (
TestTimeDehydration as _TestTemporalDehydrationV1,
)
class TestTimeDehydration(_TestTemporalDehydrationV1):
@pytest.fixture
def hydration_handler(self):
return HydrationHandler()
def test_date_time_fixed_offset(self, assert_transforms):
dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862,
pytz.FixedOffset(60))
assert_transforms(
dt,
Structure(b"I", 1539340661, 474716862, 3600)
)
def test_native_date_time_fixed_offset(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716,
pytz.FixedOffset(60))
assert_transforms(
dt,
Structure(b"I", 1539340661, 474716000, 3600)
)
def test_pandas_date_time_fixed_offset(self, assert_transforms):
dt = pd.Timestamp("2018-10-12T11:37:41.474716862+0100")
assert_transforms(dt, Structure(b"I", 1539340661, 474716862, 3600))
def test_date_time_fixed_negative_offset(self, assert_transforms):
dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862,
pytz.FixedOffset(-60))
assert_transforms(
dt,
Structure(b"I", 1539347861, 474716862, -3600)
)
def test_native_date_time_fixed_negative_offset(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716,
pytz.FixedOffset(-60))
assert_transforms(
dt,
Structure(b"I", 1539347861, 474716000, -3600)
)
def test_pandas_date_time_fixed_negative_offset(self, assert_transforms):
dt = pd.Timestamp("2018-10-12T11:37:41.474716862-0100")
assert_transforms(dt, Structure(b"I", 1539347861, 474716862, -3600))
def test_date_time_zone_id(self, assert_transforms):
dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862)
dt = pytz.timezone("Europe/Stockholm").localize(dt)
# offset should be UTC+2 (7200 seconds)
assert_transforms(
dt,
Structure(b"i", 1539337061, 474716862, "Europe/Stockholm")
)
def test_native_date_time_zone_id(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716)
dt = pytz.timezone("Europe/Stockholm").localize(dt)
# offset should be UTC+2 (7200 seconds)
assert_transforms(
dt,
Structure(b"i", 1539337061, 474716000, "Europe/Stockholm")
)
@pytest.mark.parametrize(("dt", "fields"), (
(
pd.Timestamp("2018-10-12T11:37:41.474716862+0200",
tz="Europe/Stockholm"),
(1539337061, 474716862, "Europe/Stockholm"),
),
(
# 1972-10-29 02:00:01.001000001+0100 pre DST change
pd.Timestamp((1032 * 24 + 2) * 3600 * 1000000000 + 1001000001,
tz="Europe/London"),
((1032 * 24 + 2) * 3600 + 1, 1000001, "Europe/London"),
),
(
# 1972-10-29 02:00:01.001000001+0000 post DST change
pd.Timestamp((1032 * 24 + 1) * 3600 * 1000000000 + 1001000001,
tz="Europe/London"),
((1032 * 24 + 1) * 3600 + 1, 1000001, "Europe/London"),
)
))
def test_pandas_date_time_zone_id(self, dt, fields, assert_transforms):
assert_transforms(dt, Structure(b"i", *fields))
|
normal
|
{
"blob_id": "5b33615e1890631bac68801310e4b606ac41cb13",
"index": 1340,
"step-1": "<mask token>\n\n\nclass TestTimeDehydration(_TestTemporalDehydrationV1):\n\n @pytest.fixture\n def hydration_handler(self):\n return HydrationHandler()\n <mask token>\n <mask token>\n\n def test_pandas_date_time_fixed_offset(self, assert_transforms):\n dt = pd.Timestamp('2018-10-12T11:37:41.474716862+0100')\n assert_transforms(dt, Structure(b'I', 1539340661, 474716862, 3600))\n\n def test_date_time_fixed_negative_offset(self, assert_transforms):\n dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862, pytz.FixedOffset\n (-60))\n assert_transforms(dt, Structure(b'I', 1539347861, 474716862, -3600))\n <mask token>\n <mask token>\n <mask token>\n\n def test_native_date_time_zone_id(self, assert_transforms):\n dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716)\n dt = pytz.timezone('Europe/Stockholm').localize(dt)\n assert_transforms(dt, Structure(b'i', 1539337061, 474716000,\n 'Europe/Stockholm'))\n\n @pytest.mark.parametrize(('dt', 'fields'), ((pd.Timestamp(\n '2018-10-12T11:37:41.474716862+0200', tz='Europe/Stockholm'), (\n 1539337061, 474716862, 'Europe/Stockholm')), (pd.Timestamp((1032 * \n 24 + 2) * 3600 * 1000000000 + 1001000001, tz='Europe/London'), ((\n 1032 * 24 + 2) * 3600 + 1, 1000001, 'Europe/London')), (pd.\n Timestamp((1032 * 24 + 1) * 3600 * 1000000000 + 1001000001, tz=\n 'Europe/London'), ((1032 * 24 + 1) * 3600 + 1, 1000001,\n 'Europe/London'))))\n def test_pandas_date_time_zone_id(self, dt, fields, assert_transforms):\n assert_transforms(dt, Structure(b'i', *fields))\n",
"step-2": "<mask token>\n\n\nclass TestTimeDehydration(_TestTemporalDehydrationV1):\n\n @pytest.fixture\n def hydration_handler(self):\n return HydrationHandler()\n <mask token>\n <mask token>\n\n def test_pandas_date_time_fixed_offset(self, assert_transforms):\n dt = pd.Timestamp('2018-10-12T11:37:41.474716862+0100')\n assert_transforms(dt, Structure(b'I', 1539340661, 474716862, 3600))\n\n def test_date_time_fixed_negative_offset(self, assert_transforms):\n dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862, pytz.FixedOffset\n (-60))\n assert_transforms(dt, Structure(b'I', 1539347861, 474716862, -3600))\n\n def test_native_date_time_fixed_negative_offset(self, assert_transforms):\n dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716, pytz.\n FixedOffset(-60))\n assert_transforms(dt, Structure(b'I', 1539347861, 474716000, -3600))\n <mask token>\n <mask token>\n\n def test_native_date_time_zone_id(self, assert_transforms):\n dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716)\n dt = pytz.timezone('Europe/Stockholm').localize(dt)\n assert_transforms(dt, Structure(b'i', 1539337061, 474716000,\n 'Europe/Stockholm'))\n\n @pytest.mark.parametrize(('dt', 'fields'), ((pd.Timestamp(\n '2018-10-12T11:37:41.474716862+0200', tz='Europe/Stockholm'), (\n 1539337061, 474716862, 'Europe/Stockholm')), (pd.Timestamp((1032 * \n 24 + 2) * 3600 * 1000000000 + 1001000001, tz='Europe/London'), ((\n 1032 * 24 + 2) * 3600 + 1, 1000001, 'Europe/London')), (pd.\n Timestamp((1032 * 24 + 1) * 3600 * 1000000000 + 1001000001, tz=\n 'Europe/London'), ((1032 * 24 + 1) * 3600 + 1, 1000001,\n 'Europe/London'))))\n def test_pandas_date_time_zone_id(self, dt, fields, assert_transforms):\n assert_transforms(dt, Structure(b'i', *fields))\n",
"step-3": "<mask token>\n\n\nclass TestTimeDehydration(_TestTemporalDehydrationV1):\n\n @pytest.fixture\n def hydration_handler(self):\n return HydrationHandler()\n <mask token>\n\n def test_native_date_time_fixed_offset(self, assert_transforms):\n dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716, pytz.\n FixedOffset(60))\n assert_transforms(dt, Structure(b'I', 1539340661, 474716000, 3600))\n\n def test_pandas_date_time_fixed_offset(self, assert_transforms):\n dt = pd.Timestamp('2018-10-12T11:37:41.474716862+0100')\n assert_transforms(dt, Structure(b'I', 1539340661, 474716862, 3600))\n\n def test_date_time_fixed_negative_offset(self, assert_transforms):\n dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862, pytz.FixedOffset\n (-60))\n assert_transforms(dt, Structure(b'I', 1539347861, 474716862, -3600))\n\n def test_native_date_time_fixed_negative_offset(self, assert_transforms):\n dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716, pytz.\n FixedOffset(-60))\n assert_transforms(dt, Structure(b'I', 1539347861, 474716000, -3600))\n <mask token>\n <mask token>\n\n def test_native_date_time_zone_id(self, assert_transforms):\n dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716)\n dt = pytz.timezone('Europe/Stockholm').localize(dt)\n assert_transforms(dt, Structure(b'i', 1539337061, 474716000,\n 'Europe/Stockholm'))\n\n @pytest.mark.parametrize(('dt', 'fields'), ((pd.Timestamp(\n '2018-10-12T11:37:41.474716862+0200', tz='Europe/Stockholm'), (\n 1539337061, 474716862, 'Europe/Stockholm')), (pd.Timestamp((1032 * \n 24 + 2) * 3600 * 1000000000 + 1001000001, tz='Europe/London'), ((\n 1032 * 24 + 2) * 3600 + 1, 1000001, 'Europe/London')), (pd.\n Timestamp((1032 * 24 + 1) * 3600 * 1000000000 + 1001000001, tz=\n 'Europe/London'), ((1032 * 24 + 1) * 3600 + 1, 1000001,\n 'Europe/London'))))\n def test_pandas_date_time_zone_id(self, dt, fields, assert_transforms):\n assert_transforms(dt, Structure(b'i', *fields))\n",
"step-4": "<mask token>\n\n\nclass TestTimeDehydration(_TestTemporalDehydrationV1):\n\n @pytest.fixture\n def hydration_handler(self):\n return HydrationHandler()\n\n def test_date_time_fixed_offset(self, assert_transforms):\n dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862, pytz.FixedOffset(60)\n )\n assert_transforms(dt, Structure(b'I', 1539340661, 474716862, 3600))\n\n def test_native_date_time_fixed_offset(self, assert_transforms):\n dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716, pytz.\n FixedOffset(60))\n assert_transforms(dt, Structure(b'I', 1539340661, 474716000, 3600))\n\n def test_pandas_date_time_fixed_offset(self, assert_transforms):\n dt = pd.Timestamp('2018-10-12T11:37:41.474716862+0100')\n assert_transforms(dt, Structure(b'I', 1539340661, 474716862, 3600))\n\n def test_date_time_fixed_negative_offset(self, assert_transforms):\n dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862, pytz.FixedOffset\n (-60))\n assert_transforms(dt, Structure(b'I', 1539347861, 474716862, -3600))\n\n def test_native_date_time_fixed_negative_offset(self, assert_transforms):\n dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716, pytz.\n FixedOffset(-60))\n assert_transforms(dt, Structure(b'I', 1539347861, 474716000, -3600))\n\n def test_pandas_date_time_fixed_negative_offset(self, assert_transforms):\n dt = pd.Timestamp('2018-10-12T11:37:41.474716862-0100')\n assert_transforms(dt, Structure(b'I', 1539347861, 474716862, -3600))\n\n def test_date_time_zone_id(self, assert_transforms):\n dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862)\n dt = pytz.timezone('Europe/Stockholm').localize(dt)\n assert_transforms(dt, Structure(b'i', 1539337061, 474716862,\n 'Europe/Stockholm'))\n\n def test_native_date_time_zone_id(self, assert_transforms):\n dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716)\n dt = pytz.timezone('Europe/Stockholm').localize(dt)\n assert_transforms(dt, Structure(b'i', 1539337061, 474716000,\n 'Europe/Stockholm'))\n\n @pytest.mark.parametrize(('dt', 'fields'), ((pd.Timestamp(\n '2018-10-12T11:37:41.474716862+0200', tz='Europe/Stockholm'), (\n 1539337061, 474716862, 'Europe/Stockholm')), (pd.Timestamp((1032 * \n 24 + 2) * 3600 * 1000000000 + 1001000001, tz='Europe/London'), ((\n 1032 * 24 + 2) * 3600 + 1, 1000001, 'Europe/London')), (pd.\n Timestamp((1032 * 24 + 1) * 3600 * 1000000000 + 1001000001, tz=\n 'Europe/London'), ((1032 * 24 + 1) * 3600 + 1, 1000001,\n 'Europe/London'))))\n def test_pandas_date_time_zone_id(self, dt, fields, assert_transforms):\n assert_transforms(dt, Structure(b'i', *fields))\n",
"step-5": "# Copyright (c) \"Neo4j\"\n# Neo4j Sweden AB [https://neo4j.com]\n#\n# This file is part of Neo4j.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport datetime\n\nimport pandas as pd\nimport pytest\nimport pytz\n\nfrom neo4j._codec.hydration.v2 import HydrationHandler\nfrom neo4j._codec.packstream import Structure\nfrom neo4j.time import DateTime\n\nfrom ..v1.test_temporal_dehydration import (\n TestTimeDehydration as _TestTemporalDehydrationV1,\n)\n\n\nclass TestTimeDehydration(_TestTemporalDehydrationV1):\n @pytest.fixture\n def hydration_handler(self):\n return HydrationHandler()\n\n def test_date_time_fixed_offset(self, assert_transforms):\n dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862,\n pytz.FixedOffset(60))\n assert_transforms(\n dt,\n Structure(b\"I\", 1539340661, 474716862, 3600)\n )\n\n def test_native_date_time_fixed_offset(self, assert_transforms):\n dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716,\n pytz.FixedOffset(60))\n assert_transforms(\n dt,\n Structure(b\"I\", 1539340661, 474716000, 3600)\n )\n\n def test_pandas_date_time_fixed_offset(self, assert_transforms):\n dt = pd.Timestamp(\"2018-10-12T11:37:41.474716862+0100\")\n assert_transforms(dt, Structure(b\"I\", 1539340661, 474716862, 3600))\n\n def test_date_time_fixed_negative_offset(self, assert_transforms):\n dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862,\n pytz.FixedOffset(-60))\n assert_transforms(\n dt,\n Structure(b\"I\", 1539347861, 474716862, -3600)\n )\n\n def test_native_date_time_fixed_negative_offset(self, assert_transforms):\n dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716,\n pytz.FixedOffset(-60))\n assert_transforms(\n dt,\n Structure(b\"I\", 1539347861, 474716000, -3600)\n )\n\n def test_pandas_date_time_fixed_negative_offset(self, assert_transforms):\n dt = pd.Timestamp(\"2018-10-12T11:37:41.474716862-0100\")\n assert_transforms(dt, Structure(b\"I\", 1539347861, 474716862, -3600))\n\n def test_date_time_zone_id(self, assert_transforms):\n dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862)\n dt = pytz.timezone(\"Europe/Stockholm\").localize(dt)\n # offset should be UTC+2 (7200 seconds)\n assert_transforms(\n dt,\n Structure(b\"i\", 1539337061, 474716862, \"Europe/Stockholm\")\n )\n\n def test_native_date_time_zone_id(self, assert_transforms):\n dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716)\n dt = pytz.timezone(\"Europe/Stockholm\").localize(dt)\n # offset should be UTC+2 (7200 seconds)\n assert_transforms(\n dt,\n Structure(b\"i\", 1539337061, 474716000, \"Europe/Stockholm\")\n )\n\n @pytest.mark.parametrize((\"dt\", \"fields\"), (\n (\n pd.Timestamp(\"2018-10-12T11:37:41.474716862+0200\",\n tz=\"Europe/Stockholm\"),\n (1539337061, 474716862, \"Europe/Stockholm\"),\n ),\n (\n # 1972-10-29 02:00:01.001000001+0100 pre DST change\n pd.Timestamp((1032 * 24 + 2) * 3600 * 1000000000 + 1001000001,\n tz=\"Europe/London\"),\n ((1032 * 24 + 2) * 3600 + 1, 1000001, \"Europe/London\"),\n ),\n (\n # 1972-10-29 02:00:01.001000001+0000 post DST change\n pd.Timestamp((1032 * 24 + 1) * 3600 * 1000000000 + 1001000001,\n tz=\"Europe/London\"),\n ((1032 * 24 + 1) * 3600 + 1, 1000001, \"Europe/London\"),\n )\n ))\n def test_pandas_date_time_zone_id(self, dt, fields, assert_transforms):\n assert_transforms(dt, Structure(b\"i\", *fields))\n",
"step-ids": [
6,
7,
8,
11,
13
]
}
|
[
6,
7,
8,
11,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TextPageContentModelTest(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TextPageContentModelTest(TestCase):
def test_instance(self):
file = Image.create_empty_image_file(name='hello.jpg')
image = Image.objects.create(image=file, alt='World')
self.assertEqual(Image.objects.count(), 1)
self.assertEqual(str(image)[16:21], 'hello')
<|reserved_special_token_1|>
from django.test import TestCase
from stack_it.models import Image
class TextPageContentModelTest(TestCase):
def test_instance(self):
file = Image.create_empty_image_file(name='hello.jpg')
image = Image.objects.create(image=file, alt='World')
self.assertEqual(Image.objects.count(), 1)
self.assertEqual(str(image)[16:21], 'hello')
<|reserved_special_token_1|>
from django.test import TestCase
from stack_it.models import Image
class TextPageContentModelTest(TestCase):
def test_instance(self):
file = Image.create_empty_image_file(name='hello.jpg')
image = Image.objects.create(image=file, alt="World")
self.assertEqual(Image.objects.count(), 1)
self.assertEqual(str(image)[16:21], 'hello')
|
flexible
|
{
"blob_id": "5287bd1847848aa527df8ce57e896bc30c70b43c",
"index": 4432,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TextPageContentModelTest(TestCase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TextPageContentModelTest(TestCase):\n\n def test_instance(self):\n file = Image.create_empty_image_file(name='hello.jpg')\n image = Image.objects.create(image=file, alt='World')\n self.assertEqual(Image.objects.count(), 1)\n self.assertEqual(str(image)[16:21], 'hello')\n",
"step-4": "from django.test import TestCase\nfrom stack_it.models import Image\n\n\nclass TextPageContentModelTest(TestCase):\n\n def test_instance(self):\n file = Image.create_empty_image_file(name='hello.jpg')\n image = Image.objects.create(image=file, alt='World')\n self.assertEqual(Image.objects.count(), 1)\n self.assertEqual(str(image)[16:21], 'hello')\n",
"step-5": "from django.test import TestCase\nfrom stack_it.models import Image\n\n\nclass TextPageContentModelTest(TestCase):\n def test_instance(self):\n file = Image.create_empty_image_file(name='hello.jpg')\n image = Image.objects.create(image=file, alt=\"World\")\n self.assertEqual(Image.objects.count(), 1)\n self.assertEqual(str(image)[16:21], 'hello')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def test():
webbrowser.open_new_tab('Test.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ventana.geometry('1920x1080')
def test():
webbrowser.open_new_tab('Test.html')
<|reserved_special_token_0|>
boton1.grid(row=3, column=0)
boton2.grid(row=4, column=0)
boton3.grid(row=5, column=0)
ventana.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ventana = tkinter.Tk()
ventana.geometry('1920x1080')
def test():
webbrowser.open_new_tab('Test.html')
boton1 = tkinter.Button(ventana, text='WEB', width=10, height=5, command=test)
boton2 = tkinter.Button(ventana, text='boton2', width=10, height=5)
boton3 = tkinter.Button(ventana, text='boton3', width=10, height=5)
boton1.grid(row=3, column=0)
boton2.grid(row=4, column=0)
boton3.grid(row=5, column=0)
ventana.mainloop()
<|reserved_special_token_1|>
import tkinter
import webbrowser
ventana = tkinter.Tk()
ventana.geometry('1920x1080')
def test():
webbrowser.open_new_tab('Test.html')
boton1 = tkinter.Button(ventana, text='WEB', width=10, height=5, command=test)
boton2 = tkinter.Button(ventana, text='boton2', width=10, height=5)
boton3 = tkinter.Button(ventana, text='boton3', width=10, height=5)
boton1.grid(row=3, column=0)
boton2.grid(row=4, column=0)
boton3.grid(row=5, column=0)
ventana.mainloop()
<|reserved_special_token_1|>
import tkinter
import webbrowser
ventana = tkinter.Tk()
ventana.geometry("1920x1080")
def test():
webbrowser.open_new_tab('Test.html')
boton1 = tkinter.Button(ventana,text ="WEB", width = 10, height=5, command = test );
boton2 = tkinter.Button(ventana,text ="boton2", width = 10, height=5);
boton3 = tkinter.Button(ventana,text ="boton3", width = 10, height=5);
boton1.grid(row = 3, column = 0)
boton2.grid(row = 4, column = 0)
boton3.grid(row = 5, column = 0)
ventana.mainloop()
|
flexible
|
{
"blob_id": "8bf330dc7bee65ac9478722233477ebe5d0286c2",
"index": 1102,
"step-1": "<mask token>\n\n\ndef test():\n webbrowser.open_new_tab('Test.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\nventana.geometry('1920x1080')\n\n\ndef test():\n webbrowser.open_new_tab('Test.html')\n\n\n<mask token>\nboton1.grid(row=3, column=0)\nboton2.grid(row=4, column=0)\nboton3.grid(row=5, column=0)\nventana.mainloop()\n",
"step-3": "<mask token>\nventana = tkinter.Tk()\nventana.geometry('1920x1080')\n\n\ndef test():\n webbrowser.open_new_tab('Test.html')\n\n\nboton1 = tkinter.Button(ventana, text='WEB', width=10, height=5, command=test)\nboton2 = tkinter.Button(ventana, text='boton2', width=10, height=5)\nboton3 = tkinter.Button(ventana, text='boton3', width=10, height=5)\nboton1.grid(row=3, column=0)\nboton2.grid(row=4, column=0)\nboton3.grid(row=5, column=0)\nventana.mainloop()\n",
"step-4": "import tkinter\nimport webbrowser\nventana = tkinter.Tk()\nventana.geometry('1920x1080')\n\n\ndef test():\n webbrowser.open_new_tab('Test.html')\n\n\nboton1 = tkinter.Button(ventana, text='WEB', width=10, height=5, command=test)\nboton2 = tkinter.Button(ventana, text='boton2', width=10, height=5)\nboton3 = tkinter.Button(ventana, text='boton3', width=10, height=5)\nboton1.grid(row=3, column=0)\nboton2.grid(row=4, column=0)\nboton3.grid(row=5, column=0)\nventana.mainloop()\n",
"step-5": "import tkinter\r\nimport webbrowser\r\nventana = tkinter.Tk()\r\nventana.geometry(\"1920x1080\")\r\n\r\ndef test():\r\n webbrowser.open_new_tab('Test.html')\r\n\r\nboton1 = tkinter.Button(ventana,text =\"WEB\", width = 10, height=5, command = test );\r\nboton2 = tkinter.Button(ventana,text =\"boton2\", width = 10, height=5);\r\nboton3 = tkinter.Button(ventana,text =\"boton3\", width = 10, height=5);\r\n\r\n\r\nboton1.grid(row = 3, column = 0)\r\nboton2.grid(row = 4, column = 0)\r\nboton3.grid(row = 5, column = 0)\r\n\r\nventana.mainloop()\r\n\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""Utilities for AnalysisModules."""
import inspect
from mongoengine import QuerySet
from numpy import percentile
from .modules import AnalysisModule
def get_primary_module(package):
"""Extract AnalysisModule primary module from package."""
def test_submodule(submodule):
"""Test a submodule to see if it is an AnalysisModule module."""
is_correct_subclass = issubclass(submodule, AnalysisModule)
# Ensure submodule is defined within the package we are inspecting (and not 'base')
is_correct_module = package.__name__ in submodule.__module__
return is_correct_subclass and is_correct_module
submodules = inspect.getmembers(package, inspect.isclass)
module = next(submodule for _, submodule in submodules
if test_submodule(submodule))
return module
def scrub_object(obj):
"""Remove protected fields from object (dict or list)."""
if isinstance(obj, list):
return [scrub_object(item) for item in obj]
if isinstance(obj, dict):
clean_dict = {key: scrub_object(value)
for key, value in obj.items()
if not key.startswith('_')}
return clean_dict
return obj
def jsonify(mongo_doc):
"""Convert Mongo document to JSON for serialization."""
if isinstance(mongo_doc, (QuerySet, list,)):
return [jsonify(element) for element in mongo_doc]
result_dict = mongo_doc.to_mongo().to_dict()
clean_dict = scrub_object(result_dict)
return clean_dict
def boxplot(values):
"""Calculate percentiles needed for a boxplot."""
percentiles = percentile(values, [0, 25, 50, 75, 100])
result = {'min_val': percentiles[0],
'q1_val': percentiles[1],
'mean_val': percentiles[2],
'q3_val': percentiles[3],
'max_val': percentiles[4]}
return result
def scrub_category_val(category_val):
"""Make sure that category val is a string with positive length."""
if not isinstance(category_val, str):
category_val = str(category_val)
if category_val.lower() == 'nan':
category_val = 'NaN'
if not category_val:
category_val = 'NaN'
return category_val
def collate_samples(tool_name, fields, samples):
"""Group a set of ToolResult fields from a set of samples by sample name."""
sample_dict = {}
for sample in samples:
sample_name = sample['name']
sample_dict[sample_name] = {}
tool_result = sample[tool_name]
for field in fields:
sample_dict[sample_name][field] = tool_result[field]
return sample_dict
def categories_from_metadata(samples, min_size=2):
"""
Create dict of categories and their values from sample metadata.
Parameters
----------
samples : list
List of sample models.
min_size: int
Minimum number of values required for a given metadata item to
be included in returned categories.
Returns
-------
dict
Dictionary of form {<category_name>: [category_value[, category_value]]}
"""
categories = {}
# Gather categories and values
all_metadata = [sample['metadata'] for sample in samples]
for metadata in all_metadata:
properties = [prop for prop in metadata.keys()]
for prop in properties:
if prop not in categories:
categories[prop] = set([])
category_val = metadata[prop]
category_val = scrub_category_val(category_val)
categories[prop].add(category_val)
# Filter for minimum number of values
categories = {category_name: list(category_values)
for category_name, category_values in categories.items()
if len(category_values) >= min_size}
return categories
|
normal
|
{
"blob_id": "3472dc0c9d00c10ab0690c052e70fbf6a4bdb13d",
"index": 7889,
"step-1": "<mask token>\n\n\ndef boxplot(values):\n \"\"\"Calculate percentiles needed for a boxplot.\"\"\"\n percentiles = percentile(values, [0, 25, 50, 75, 100])\n result = {'min_val': percentiles[0], 'q1_val': percentiles[1],\n 'mean_val': percentiles[2], 'q3_val': percentiles[3], 'max_val':\n percentiles[4]}\n return result\n\n\ndef scrub_category_val(category_val):\n \"\"\"Make sure that category val is a string with positive length.\"\"\"\n if not isinstance(category_val, str):\n category_val = str(category_val)\n if category_val.lower() == 'nan':\n category_val = 'NaN'\n if not category_val:\n category_val = 'NaN'\n return category_val\n\n\ndef collate_samples(tool_name, fields, samples):\n \"\"\"Group a set of ToolResult fields from a set of samples by sample name.\"\"\"\n sample_dict = {}\n for sample in samples:\n sample_name = sample['name']\n sample_dict[sample_name] = {}\n tool_result = sample[tool_name]\n for field in fields:\n sample_dict[sample_name][field] = tool_result[field]\n return sample_dict\n\n\ndef categories_from_metadata(samples, min_size=2):\n \"\"\"\n Create dict of categories and their values from sample metadata.\n\n Parameters\n ----------\n samples : list\n List of sample models.\n min_size: int\n Minimum number of values required for a given metadata item to\n be included in returned categories.\n\n Returns\n -------\n dict\n Dictionary of form {<category_name>: [category_value[, category_value]]}\n\n \"\"\"\n categories = {}\n all_metadata = [sample['metadata'] for sample in samples]\n for metadata in all_metadata:\n properties = [prop for prop in metadata.keys()]\n for prop in properties:\n if prop not in categories:\n categories[prop] = set([])\n category_val = metadata[prop]\n category_val = scrub_category_val(category_val)\n categories[prop].add(category_val)\n categories = {category_name: list(category_values) for category_name,\n category_values in categories.items() if len(category_values) >=\n min_size}\n return categories\n",
"step-2": "<mask token>\n\n\ndef get_primary_module(package):\n \"\"\"Extract AnalysisModule primary module from package.\"\"\"\n\n def test_submodule(submodule):\n \"\"\"Test a submodule to see if it is an AnalysisModule module.\"\"\"\n is_correct_subclass = issubclass(submodule, AnalysisModule)\n is_correct_module = package.__name__ in submodule.__module__\n return is_correct_subclass and is_correct_module\n submodules = inspect.getmembers(package, inspect.isclass)\n module = next(submodule for _, submodule in submodules if\n test_submodule(submodule))\n return module\n\n\ndef scrub_object(obj):\n \"\"\"Remove protected fields from object (dict or list).\"\"\"\n if isinstance(obj, list):\n return [scrub_object(item) for item in obj]\n if isinstance(obj, dict):\n clean_dict = {key: scrub_object(value) for key, value in obj.items(\n ) if not key.startswith('_')}\n return clean_dict\n return obj\n\n\n<mask token>\n\n\ndef boxplot(values):\n \"\"\"Calculate percentiles needed for a boxplot.\"\"\"\n percentiles = percentile(values, [0, 25, 50, 75, 100])\n result = {'min_val': percentiles[0], 'q1_val': percentiles[1],\n 'mean_val': percentiles[2], 'q3_val': percentiles[3], 'max_val':\n percentiles[4]}\n return result\n\n\ndef scrub_category_val(category_val):\n \"\"\"Make sure that category val is a string with positive length.\"\"\"\n if not isinstance(category_val, str):\n category_val = str(category_val)\n if category_val.lower() == 'nan':\n category_val = 'NaN'\n if not category_val:\n category_val = 'NaN'\n return category_val\n\n\ndef collate_samples(tool_name, fields, samples):\n \"\"\"Group a set of ToolResult fields from a set of samples by sample name.\"\"\"\n sample_dict = {}\n for sample in samples:\n sample_name = sample['name']\n sample_dict[sample_name] = {}\n tool_result = sample[tool_name]\n for field in fields:\n sample_dict[sample_name][field] = tool_result[field]\n return sample_dict\n\n\ndef categories_from_metadata(samples, min_size=2):\n \"\"\"\n Create dict of categories and their values from sample metadata.\n\n Parameters\n ----------\n samples : list\n List of sample models.\n min_size: int\n Minimum number of values required for a given metadata item to\n be included in returned categories.\n\n Returns\n -------\n dict\n Dictionary of form {<category_name>: [category_value[, category_value]]}\n\n \"\"\"\n categories = {}\n all_metadata = [sample['metadata'] for sample in samples]\n for metadata in all_metadata:\n properties = [prop for prop in metadata.keys()]\n for prop in properties:\n if prop not in categories:\n categories[prop] = set([])\n category_val = metadata[prop]\n category_val = scrub_category_val(category_val)\n categories[prop].add(category_val)\n categories = {category_name: list(category_values) for category_name,\n category_values in categories.items() if len(category_values) >=\n min_size}\n return categories\n",
"step-3": "<mask token>\n\n\ndef get_primary_module(package):\n \"\"\"Extract AnalysisModule primary module from package.\"\"\"\n\n def test_submodule(submodule):\n \"\"\"Test a submodule to see if it is an AnalysisModule module.\"\"\"\n is_correct_subclass = issubclass(submodule, AnalysisModule)\n is_correct_module = package.__name__ in submodule.__module__\n return is_correct_subclass and is_correct_module\n submodules = inspect.getmembers(package, inspect.isclass)\n module = next(submodule for _, submodule in submodules if\n test_submodule(submodule))\n return module\n\n\ndef scrub_object(obj):\n \"\"\"Remove protected fields from object (dict or list).\"\"\"\n if isinstance(obj, list):\n return [scrub_object(item) for item in obj]\n if isinstance(obj, dict):\n clean_dict = {key: scrub_object(value) for key, value in obj.items(\n ) if not key.startswith('_')}\n return clean_dict\n return obj\n\n\ndef jsonify(mongo_doc):\n \"\"\"Convert Mongo document to JSON for serialization.\"\"\"\n if isinstance(mongo_doc, (QuerySet, list)):\n return [jsonify(element) for element in mongo_doc]\n result_dict = mongo_doc.to_mongo().to_dict()\n clean_dict = scrub_object(result_dict)\n return clean_dict\n\n\ndef boxplot(values):\n \"\"\"Calculate percentiles needed for a boxplot.\"\"\"\n percentiles = percentile(values, [0, 25, 50, 75, 100])\n result = {'min_val': percentiles[0], 'q1_val': percentiles[1],\n 'mean_val': percentiles[2], 'q3_val': percentiles[3], 'max_val':\n percentiles[4]}\n return result\n\n\ndef scrub_category_val(category_val):\n \"\"\"Make sure that category val is a string with positive length.\"\"\"\n if not isinstance(category_val, str):\n category_val = str(category_val)\n if category_val.lower() == 'nan':\n category_val = 'NaN'\n if not category_val:\n category_val = 'NaN'\n return category_val\n\n\ndef collate_samples(tool_name, fields, samples):\n \"\"\"Group a set of ToolResult fields from a set of samples by sample name.\"\"\"\n sample_dict = {}\n for sample in samples:\n sample_name = sample['name']\n sample_dict[sample_name] = {}\n tool_result = sample[tool_name]\n for field in fields:\n sample_dict[sample_name][field] = tool_result[field]\n return sample_dict\n\n\ndef categories_from_metadata(samples, min_size=2):\n \"\"\"\n Create dict of categories and their values from sample metadata.\n\n Parameters\n ----------\n samples : list\n List of sample models.\n min_size: int\n Minimum number of values required for a given metadata item to\n be included in returned categories.\n\n Returns\n -------\n dict\n Dictionary of form {<category_name>: [category_value[, category_value]]}\n\n \"\"\"\n categories = {}\n all_metadata = [sample['metadata'] for sample in samples]\n for metadata in all_metadata:\n properties = [prop for prop in metadata.keys()]\n for prop in properties:\n if prop not in categories:\n categories[prop] = set([])\n category_val = metadata[prop]\n category_val = scrub_category_val(category_val)\n categories[prop].add(category_val)\n categories = {category_name: list(category_values) for category_name,\n category_values in categories.items() if len(category_values) >=\n min_size}\n return categories\n",
"step-4": "<mask token>\nimport inspect\nfrom mongoengine import QuerySet\nfrom numpy import percentile\nfrom .modules import AnalysisModule\n\n\ndef get_primary_module(package):\n \"\"\"Extract AnalysisModule primary module from package.\"\"\"\n\n def test_submodule(submodule):\n \"\"\"Test a submodule to see if it is an AnalysisModule module.\"\"\"\n is_correct_subclass = issubclass(submodule, AnalysisModule)\n is_correct_module = package.__name__ in submodule.__module__\n return is_correct_subclass and is_correct_module\n submodules = inspect.getmembers(package, inspect.isclass)\n module = next(submodule for _, submodule in submodules if\n test_submodule(submodule))\n return module\n\n\ndef scrub_object(obj):\n \"\"\"Remove protected fields from object (dict or list).\"\"\"\n if isinstance(obj, list):\n return [scrub_object(item) for item in obj]\n if isinstance(obj, dict):\n clean_dict = {key: scrub_object(value) for key, value in obj.items(\n ) if not key.startswith('_')}\n return clean_dict\n return obj\n\n\ndef jsonify(mongo_doc):\n \"\"\"Convert Mongo document to JSON for serialization.\"\"\"\n if isinstance(mongo_doc, (QuerySet, list)):\n return [jsonify(element) for element in mongo_doc]\n result_dict = mongo_doc.to_mongo().to_dict()\n clean_dict = scrub_object(result_dict)\n return clean_dict\n\n\ndef boxplot(values):\n \"\"\"Calculate percentiles needed for a boxplot.\"\"\"\n percentiles = percentile(values, [0, 25, 50, 75, 100])\n result = {'min_val': percentiles[0], 'q1_val': percentiles[1],\n 'mean_val': percentiles[2], 'q3_val': percentiles[3], 'max_val':\n percentiles[4]}\n return result\n\n\ndef scrub_category_val(category_val):\n \"\"\"Make sure that category val is a string with positive length.\"\"\"\n if not isinstance(category_val, str):\n category_val = str(category_val)\n if category_val.lower() == 'nan':\n category_val = 'NaN'\n if not category_val:\n category_val = 'NaN'\n return category_val\n\n\ndef collate_samples(tool_name, fields, samples):\n \"\"\"Group a set of ToolResult fields from a set of samples by sample name.\"\"\"\n sample_dict = {}\n for sample in samples:\n sample_name = sample['name']\n sample_dict[sample_name] = {}\n tool_result = sample[tool_name]\n for field in fields:\n sample_dict[sample_name][field] = tool_result[field]\n return sample_dict\n\n\ndef categories_from_metadata(samples, min_size=2):\n \"\"\"\n Create dict of categories and their values from sample metadata.\n\n Parameters\n ----------\n samples : list\n List of sample models.\n min_size: int\n Minimum number of values required for a given metadata item to\n be included in returned categories.\n\n Returns\n -------\n dict\n Dictionary of form {<category_name>: [category_value[, category_value]]}\n\n \"\"\"\n categories = {}\n all_metadata = [sample['metadata'] for sample in samples]\n for metadata in all_metadata:\n properties = [prop for prop in metadata.keys()]\n for prop in properties:\n if prop not in categories:\n categories[prop] = set([])\n category_val = metadata[prop]\n category_val = scrub_category_val(category_val)\n categories[prop].add(category_val)\n categories = {category_name: list(category_values) for category_name,\n category_values in categories.items() if len(category_values) >=\n min_size}\n return categories\n",
"step-5": "\"\"\"Utilities for AnalysisModules.\"\"\"\n\nimport inspect\n\nfrom mongoengine import QuerySet\nfrom numpy import percentile\n\nfrom .modules import AnalysisModule\n\n\ndef get_primary_module(package):\n \"\"\"Extract AnalysisModule primary module from package.\"\"\"\n def test_submodule(submodule):\n \"\"\"Test a submodule to see if it is an AnalysisModule module.\"\"\"\n is_correct_subclass = issubclass(submodule, AnalysisModule)\n # Ensure submodule is defined within the package we are inspecting (and not 'base')\n is_correct_module = package.__name__ in submodule.__module__\n return is_correct_subclass and is_correct_module\n\n submodules = inspect.getmembers(package, inspect.isclass)\n module = next(submodule for _, submodule in submodules\n if test_submodule(submodule))\n return module\n\n\ndef scrub_object(obj):\n \"\"\"Remove protected fields from object (dict or list).\"\"\"\n if isinstance(obj, list):\n return [scrub_object(item) for item in obj]\n if isinstance(obj, dict):\n clean_dict = {key: scrub_object(value)\n for key, value in obj.items()\n if not key.startswith('_')}\n return clean_dict\n return obj\n\n\ndef jsonify(mongo_doc):\n \"\"\"Convert Mongo document to JSON for serialization.\"\"\"\n if isinstance(mongo_doc, (QuerySet, list,)):\n return [jsonify(element) for element in mongo_doc]\n result_dict = mongo_doc.to_mongo().to_dict()\n clean_dict = scrub_object(result_dict)\n return clean_dict\n\n\ndef boxplot(values):\n \"\"\"Calculate percentiles needed for a boxplot.\"\"\"\n percentiles = percentile(values, [0, 25, 50, 75, 100])\n result = {'min_val': percentiles[0],\n 'q1_val': percentiles[1],\n 'mean_val': percentiles[2],\n 'q3_val': percentiles[3],\n 'max_val': percentiles[4]}\n return result\n\n\ndef scrub_category_val(category_val):\n \"\"\"Make sure that category val is a string with positive length.\"\"\"\n if not isinstance(category_val, str):\n category_val = str(category_val)\n if category_val.lower() == 'nan':\n category_val = 'NaN'\n if not category_val:\n category_val = 'NaN'\n return category_val\n\n\ndef collate_samples(tool_name, fields, samples):\n \"\"\"Group a set of ToolResult fields from a set of samples by sample name.\"\"\"\n sample_dict = {}\n for sample in samples:\n sample_name = sample['name']\n sample_dict[sample_name] = {}\n tool_result = sample[tool_name]\n for field in fields:\n sample_dict[sample_name][field] = tool_result[field]\n\n return sample_dict\n\n\ndef categories_from_metadata(samples, min_size=2):\n \"\"\"\n Create dict of categories and their values from sample metadata.\n\n Parameters\n ----------\n samples : list\n List of sample models.\n min_size: int\n Minimum number of values required for a given metadata item to\n be included in returned categories.\n\n Returns\n -------\n dict\n Dictionary of form {<category_name>: [category_value[, category_value]]}\n\n \"\"\"\n categories = {}\n\n # Gather categories and values\n all_metadata = [sample['metadata'] for sample in samples]\n for metadata in all_metadata:\n properties = [prop for prop in metadata.keys()]\n for prop in properties:\n if prop not in categories:\n categories[prop] = set([])\n category_val = metadata[prop]\n category_val = scrub_category_val(category_val)\n categories[prop].add(category_val)\n\n # Filter for minimum number of values\n categories = {category_name: list(category_values)\n for category_name, category_values in categories.items()\n if len(category_values) >= min_size}\n\n return categories\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def main():
subs = subdir_maker(os.path.dirname(os.path.realpath(__file__)))
for i in range(len(subs)):
daily_github_upload(subs[i])
print('_' * 40 + '\n\n' + 'Uploaded {0} to Github. '.format(i) +
'\n' + '_' * 40)
time.sleep(86400)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def daily_github_upload(sub_to_repo):
g = Github('****************************************')
current_subdir = sub_to_repo
title = current_subdir[current_subdir.rindex('\\') + 1:]
repo = g.get_user().create_repo(title)
repo.create_file('README.MD', 'A readme file',
'This was an auto-upload on ' + str(datetime.datetime.now()))
commit_message = 'This was automatically committed.'
file_list = []
file_names = []
for subdir, dirs, files in os.walk(current_subdir):
for file in files:
print(os.path.join(subdir, file))
file_list.append(os.path.join(subdir, file))
file_names.append(file)
master_ref = repo.get_git_ref('heads/master')
master_sha = master_ref.object.sha
base_tree = repo.get_git_tree(master_sha)
element_list = list()
for i, entry in enumerate(file_list):
with open(entry) as input_file:
data = input_file.read()
if entry.endswith('.png' or '.pdf' or '.xlsx'):
data = base64.b64encode(data)
element = InputGitTreeElement(file_names[i], '100644', 'blob', data)
element_list.append(element)
tree = repo.create_git_tree(element_list, base_tree)
parent = repo.get_git_commit(master_sha)
commit = repo.create_git_commit(commit_message, tree, [parent])
master_ref.edit(commit.sha)
shutil.rmtree(current_subdir)
def main():
subs = subdir_maker(os.path.dirname(os.path.realpath(__file__)))
for i in range(len(subs)):
daily_github_upload(subs[i])
print('_' * 40 + '\n\n' + 'Uploaded {0} to Github. '.format(i) +
'\n' + '_' * 40)
time.sleep(86400)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def subdir_maker(directory):
subdirs = []
for i, j, y in os.walk(directory):
subdirs.append(i)
subdirs.remove(subdirs[0])
return subdirs
def daily_github_upload(sub_to_repo):
g = Github('****************************************')
current_subdir = sub_to_repo
title = current_subdir[current_subdir.rindex('\\') + 1:]
repo = g.get_user().create_repo(title)
repo.create_file('README.MD', 'A readme file',
'This was an auto-upload on ' + str(datetime.datetime.now()))
commit_message = 'This was automatically committed.'
file_list = []
file_names = []
for subdir, dirs, files in os.walk(current_subdir):
for file in files:
print(os.path.join(subdir, file))
file_list.append(os.path.join(subdir, file))
file_names.append(file)
master_ref = repo.get_git_ref('heads/master')
master_sha = master_ref.object.sha
base_tree = repo.get_git_tree(master_sha)
element_list = list()
for i, entry in enumerate(file_list):
with open(entry) as input_file:
data = input_file.read()
if entry.endswith('.png' or '.pdf' or '.xlsx'):
data = base64.b64encode(data)
element = InputGitTreeElement(file_names[i], '100644', 'blob', data)
element_list.append(element)
tree = repo.create_git_tree(element_list, base_tree)
parent = repo.get_git_commit(master_sha)
commit = repo.create_git_commit(commit_message, tree, [parent])
master_ref.edit(commit.sha)
shutil.rmtree(current_subdir)
def main():
subs = subdir_maker(os.path.dirname(os.path.realpath(__file__)))
for i in range(len(subs)):
daily_github_upload(subs[i])
print('_' * 40 + '\n\n' + 'Uploaded {0} to Github. '.format(i) +
'\n' + '_' * 40)
time.sleep(86400)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os, shutil, base64, time, datetime
from github import Github, InputGitTreeElement
def subdir_maker(directory):
subdirs = []
for i, j, y in os.walk(directory):
subdirs.append(i)
subdirs.remove(subdirs[0])
return subdirs
def daily_github_upload(sub_to_repo):
g = Github('****************************************')
current_subdir = sub_to_repo
title = current_subdir[current_subdir.rindex('\\') + 1:]
repo = g.get_user().create_repo(title)
repo.create_file('README.MD', 'A readme file',
'This was an auto-upload on ' + str(datetime.datetime.now()))
commit_message = 'This was automatically committed.'
file_list = []
file_names = []
for subdir, dirs, files in os.walk(current_subdir):
for file in files:
print(os.path.join(subdir, file))
file_list.append(os.path.join(subdir, file))
file_names.append(file)
master_ref = repo.get_git_ref('heads/master')
master_sha = master_ref.object.sha
base_tree = repo.get_git_tree(master_sha)
element_list = list()
for i, entry in enumerate(file_list):
with open(entry) as input_file:
data = input_file.read()
if entry.endswith('.png' or '.pdf' or '.xlsx'):
data = base64.b64encode(data)
element = InputGitTreeElement(file_names[i], '100644', 'blob', data)
element_list.append(element)
tree = repo.create_git_tree(element_list, base_tree)
parent = repo.get_git_commit(master_sha)
commit = repo.create_git_commit(commit_message, tree, [parent])
master_ref.edit(commit.sha)
shutil.rmtree(current_subdir)
def main():
subs = subdir_maker(os.path.dirname(os.path.realpath(__file__)))
for i in range(len(subs)):
daily_github_upload(subs[i])
print('_' * 40 + '\n\n' + 'Uploaded {0} to Github. '.format(i) +
'\n' + '_' * 40)
time.sleep(86400)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 2 22:49:00 2020
@author: Drew
____________________________________________________________________
basic_github_auto_uploader.py - A Basic Automated GitHub Uploader
____________________________________________________________________
1. Requirements:
Version: Python 3.7
Built-in Libs: base64, os, shutil, time, datetime
Dependencies: pygithub, Git (maybe)
2. Description:
This file automatically uploads subdirectories as new repositories in
GitHub. You will need an internet connection to do this.
The first function [subdir_maker(directory)] will sort the subdirectories
in the folder.
The second function [daily_github_upload(subdirs)] will do the actual repo
creation and commit.
The second function can be fun on a schedule using a for loop and
time.sleep or a dedicated scheduling library. You need to restart the
script if you add new subdirectories that you want to upload.
3. Running Instructions:
Place this file in a root directory where you keep your project
subdirectories. Keep the file structure in the subdirectories flat (don't
make subdirectories in the subdirectory) as this is not handled in this
simplified script. Also, support for PDFs is a bit sketchy.
Be sure to replace the Github key in the second function with your own
generated key. You can configure the README.MD file as well to say a
custom message.
4. Performance:
Performance is poor for now. The script needs to run constantly and uses
quite a bit of memory. A more efficient future version will be made.
"""
# Import libraries that we need to use.
import os, shutil, base64, time, datetime
from github import Github, InputGitTreeElement
# Function 1: Given a directory/file path, return all the subdirectories in
# the given directory in a list of strings. Uses the os library.
# Individual files should not be left in the directory.
def subdir_maker(directory):
# Create an empty list to store the resultant subdirectories in.
subdirs = []
# Walk through the directory and add items to the empty list we made.
for i,j,y in os.walk(directory):
subdirs.append(i)
# os.walk's first element is the directory itself, so remove it.
subdirs.remove(subdirs[0])
# Return the list of subdirectories.
return subdirs
# Function 2: When invoked with a filepath, upload all the files.
# Does not support subdirectories within the subdirectory.
# Also, cannot be empty!
def daily_github_upload(sub_to_repo):
# Create a Github object that we can use to connect to Github and do work.
# It must be initialized with a 40-character secret key. You generate this
# on Github itself.
g = Github('****************************************')
# Copy the location to a local variable.
current_subdir = sub_to_repo
# Extract the subdirectory name - this will be the Repo name.
title = current_subdir[current_subdir.rindex("\\")+1:]
# Create Repo through Github object. We will not work on the repo object.
repo = g.get_user().create_repo(title)
# Initialize with a README.MD file. You can configure this as needed.
repo.create_file("README.MD","A readme file","This was an auto-upload on "
+ str(datetime.datetime.now()))
# The message we will add under the commit.
commit_message = "This was automatically committed."
# Create a list of file objects.
file_list = []
# Create a list of file names.
file_names = []
# Do a walk through the subdirectory.
for subdir, dirs, files in os.walk(current_subdir):
# For the files in the subdirectory, print them and then add them to
# list we created, along with the name to the other list.
for file in files:
print(os.path.join(subdir, file))
file_list.append(os.path.join(subdir, file))
file_names.append(file)
# Get the branch to add to.
master_ref = repo.get_git_ref('heads/master')
master_sha = master_ref.object.sha
base_tree = repo.get_git_tree(master_sha)
# Create an empty list to add files to.
element_list = list()
# For each file in list of file objects, read and adjust as needed.
for i, entry in enumerate(file_list):
# If normal file type.
with open(entry) as input_file:
data = input_file.read()
# If proprietary file type, encode it.
if entry.endswith('.png' or '.pdf' or '.xlsx'):
data = base64.b64encode(data)
# Put each file that was encoded from above into an appropriate format
# to add to a branch.
element = InputGitTreeElement(file_names[i], '100644', 'blob', data)
# Append the object created above to the list made before the loop.
element_list.append(element)
# Create a tree with the elements and specify settings to add the element
# list to the repo.
tree = repo.create_git_tree(element_list, base_tree)
parent = repo.get_git_commit(master_sha)
# Commit!
commit = repo.create_git_commit(commit_message, tree, [parent])
master_ref.edit(commit.sha)
# Remove the subdirectory from the folder so we don't repeat.
shutil.rmtree(current_subdir)
def main():
# Invoke the subdir_maker() function with the current directory at runtime.
subs = subdir_maker(os.path.dirname(os.path.realpath(__file__)))
# Use a loop to call the daily_github_upload() function for each subdir in
# the subs list. We keep the subs in case we want to see what was uploaded.
for i in range(len(subs)):
# Call the function for each elem of the list.
daily_github_upload(subs[i])
# Print what was done.
print("_"*40 + "\n\n" + "Uploaded {0} to Github. ".format(i) + "\n" + "_"*40)
# Sleep for 24 hours then do it again.
time.sleep(86400)
|
flexible
|
{
"blob_id": "bcc3d4e9be0de575c97bb3bf11eeb379ab5be458",
"index": 5380,
"step-1": "<mask token>\n\n\ndef main():\n subs = subdir_maker(os.path.dirname(os.path.realpath(__file__)))\n for i in range(len(subs)):\n daily_github_upload(subs[i])\n print('_' * 40 + '\\n\\n' + 'Uploaded {0} to Github. '.format(i) +\n '\\n' + '_' * 40)\n time.sleep(86400)\n",
"step-2": "<mask token>\n\n\ndef daily_github_upload(sub_to_repo):\n g = Github('****************************************')\n current_subdir = sub_to_repo\n title = current_subdir[current_subdir.rindex('\\\\') + 1:]\n repo = g.get_user().create_repo(title)\n repo.create_file('README.MD', 'A readme file', \n 'This was an auto-upload on ' + str(datetime.datetime.now()))\n commit_message = 'This was automatically committed.'\n file_list = []\n file_names = []\n for subdir, dirs, files in os.walk(current_subdir):\n for file in files:\n print(os.path.join(subdir, file))\n file_list.append(os.path.join(subdir, file))\n file_names.append(file)\n master_ref = repo.get_git_ref('heads/master')\n master_sha = master_ref.object.sha\n base_tree = repo.get_git_tree(master_sha)\n element_list = list()\n for i, entry in enumerate(file_list):\n with open(entry) as input_file:\n data = input_file.read()\n if entry.endswith('.png' or '.pdf' or '.xlsx'):\n data = base64.b64encode(data)\n element = InputGitTreeElement(file_names[i], '100644', 'blob', data)\n element_list.append(element)\n tree = repo.create_git_tree(element_list, base_tree)\n parent = repo.get_git_commit(master_sha)\n commit = repo.create_git_commit(commit_message, tree, [parent])\n master_ref.edit(commit.sha)\n shutil.rmtree(current_subdir)\n\n\ndef main():\n subs = subdir_maker(os.path.dirname(os.path.realpath(__file__)))\n for i in range(len(subs)):\n daily_github_upload(subs[i])\n print('_' * 40 + '\\n\\n' + 'Uploaded {0} to Github. '.format(i) +\n '\\n' + '_' * 40)\n time.sleep(86400)\n",
"step-3": "<mask token>\n\n\ndef subdir_maker(directory):\n subdirs = []\n for i, j, y in os.walk(directory):\n subdirs.append(i)\n subdirs.remove(subdirs[0])\n return subdirs\n\n\ndef daily_github_upload(sub_to_repo):\n g = Github('****************************************')\n current_subdir = sub_to_repo\n title = current_subdir[current_subdir.rindex('\\\\') + 1:]\n repo = g.get_user().create_repo(title)\n repo.create_file('README.MD', 'A readme file', \n 'This was an auto-upload on ' + str(datetime.datetime.now()))\n commit_message = 'This was automatically committed.'\n file_list = []\n file_names = []\n for subdir, dirs, files in os.walk(current_subdir):\n for file in files:\n print(os.path.join(subdir, file))\n file_list.append(os.path.join(subdir, file))\n file_names.append(file)\n master_ref = repo.get_git_ref('heads/master')\n master_sha = master_ref.object.sha\n base_tree = repo.get_git_tree(master_sha)\n element_list = list()\n for i, entry in enumerate(file_list):\n with open(entry) as input_file:\n data = input_file.read()\n if entry.endswith('.png' or '.pdf' or '.xlsx'):\n data = base64.b64encode(data)\n element = InputGitTreeElement(file_names[i], '100644', 'blob', data)\n element_list.append(element)\n tree = repo.create_git_tree(element_list, base_tree)\n parent = repo.get_git_commit(master_sha)\n commit = repo.create_git_commit(commit_message, tree, [parent])\n master_ref.edit(commit.sha)\n shutil.rmtree(current_subdir)\n\n\ndef main():\n subs = subdir_maker(os.path.dirname(os.path.realpath(__file__)))\n for i in range(len(subs)):\n daily_github_upload(subs[i])\n print('_' * 40 + '\\n\\n' + 'Uploaded {0} to Github. '.format(i) +\n '\\n' + '_' * 40)\n time.sleep(86400)\n",
"step-4": "<mask token>\nimport os, shutil, base64, time, datetime\nfrom github import Github, InputGitTreeElement\n\n\ndef subdir_maker(directory):\n subdirs = []\n for i, j, y in os.walk(directory):\n subdirs.append(i)\n subdirs.remove(subdirs[0])\n return subdirs\n\n\ndef daily_github_upload(sub_to_repo):\n g = Github('****************************************')\n current_subdir = sub_to_repo\n title = current_subdir[current_subdir.rindex('\\\\') + 1:]\n repo = g.get_user().create_repo(title)\n repo.create_file('README.MD', 'A readme file', \n 'This was an auto-upload on ' + str(datetime.datetime.now()))\n commit_message = 'This was automatically committed.'\n file_list = []\n file_names = []\n for subdir, dirs, files in os.walk(current_subdir):\n for file in files:\n print(os.path.join(subdir, file))\n file_list.append(os.path.join(subdir, file))\n file_names.append(file)\n master_ref = repo.get_git_ref('heads/master')\n master_sha = master_ref.object.sha\n base_tree = repo.get_git_tree(master_sha)\n element_list = list()\n for i, entry in enumerate(file_list):\n with open(entry) as input_file:\n data = input_file.read()\n if entry.endswith('.png' or '.pdf' or '.xlsx'):\n data = base64.b64encode(data)\n element = InputGitTreeElement(file_names[i], '100644', 'blob', data)\n element_list.append(element)\n tree = repo.create_git_tree(element_list, base_tree)\n parent = repo.get_git_commit(master_sha)\n commit = repo.create_git_commit(commit_message, tree, [parent])\n master_ref.edit(commit.sha)\n shutil.rmtree(current_subdir)\n\n\ndef main():\n subs = subdir_maker(os.path.dirname(os.path.realpath(__file__)))\n for i in range(len(subs)):\n daily_github_upload(subs[i])\n print('_' * 40 + '\\n\\n' + 'Uploaded {0} to Github. '.format(i) +\n '\\n' + '_' * 40)\n time.sleep(86400)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 2 22:49:00 2020\n@author: Drew\n\n____________________________________________________________________\n\nbasic_github_auto_uploader.py - A Basic Automated GitHub Uploader\n____________________________________________________________________\n\n1. Requirements:\n Version: Python 3.7\n Built-in Libs: base64, os, shutil, time, datetime\n Dependencies: pygithub, Git (maybe)\n\n2. Description:\n This file automatically uploads subdirectories as new repositories in\n GitHub. You will need an internet connection to do this.\n The first function [subdir_maker(directory)] will sort the subdirectories\n in the folder. \n The second function [daily_github_upload(subdirs)] will do the actual repo \n creation and commit.\n The second function can be fun on a schedule using a for loop and \n time.sleep or a dedicated scheduling library. You need to restart the\n script if you add new subdirectories that you want to upload. \n\n3. Running Instructions:\n Place this file in a root directory where you keep your project\n subdirectories. Keep the file structure in the subdirectories flat (don't\n make subdirectories in the subdirectory) as this is not handled in this \n simplified script. Also, support for PDFs is a bit sketchy. \n Be sure to replace the Github key in the second function with your own \n generated key. You can configure the README.MD file as well to say a \n custom message. \n \n4. Performance:\n Performance is poor for now. The script needs to run constantly and uses\n quite a bit of memory. A more efficient future version will be made. \n\"\"\"\n\n# Import libraries that we need to use.\nimport os, shutil, base64, time, datetime\nfrom github import Github, InputGitTreeElement\n\n# Function 1: Given a directory/file path, return all the subdirectories in\n# the given directory in a list of strings. Uses the os library.\n# Individual files should not be left in the directory.\ndef subdir_maker(directory):\n # Create an empty list to store the resultant subdirectories in.\n subdirs = []\n # Walk through the directory and add items to the empty list we made.\n for i,j,y in os.walk(directory):\n subdirs.append(i)\n # os.walk's first element is the directory itself, so remove it. \n subdirs.remove(subdirs[0])\n # Return the list of subdirectories. \n return subdirs\n\n# Function 2: When invoked with a filepath, upload all the files. \n# Does not support subdirectories within the subdirectory. \n# Also, cannot be empty!\ndef daily_github_upload(sub_to_repo):\n # Create a Github object that we can use to connect to Github and do work.\n # It must be initialized with a 40-character secret key. You generate this\n # on Github itself. \n g = Github('****************************************')\n # Copy the location to a local variable. \n current_subdir = sub_to_repo\n # Extract the subdirectory name - this will be the Repo name. \n title = current_subdir[current_subdir.rindex(\"\\\\\")+1:]\n # Create Repo through Github object. We will not work on the repo object.\n repo = g.get_user().create_repo(title)\n # Initialize with a README.MD file. You can configure this as needed. \n repo.create_file(\"README.MD\",\"A readme file\",\"This was an auto-upload on \"\n + str(datetime.datetime.now()))\n # The message we will add under the commit. \n commit_message = \"This was automatically committed.\"\n # Create a list of file objects.\n file_list = []\n # Create a list of file names.\n file_names = []\n # Do a walk through the subdirectory. \n for subdir, dirs, files in os.walk(current_subdir):\n # For the files in the subdirectory, print them and then add them to\n # list we created, along with the name to the other list. \n for file in files:\n print(os.path.join(subdir, file))\n file_list.append(os.path.join(subdir, file))\n file_names.append(file)\n # Get the branch to add to. \n master_ref = repo.get_git_ref('heads/master')\n master_sha = master_ref.object.sha\n base_tree = repo.get_git_tree(master_sha)\n # Create an empty list to add files to. \n element_list = list()\n # For each file in list of file objects, read and adjust as needed.\n for i, entry in enumerate(file_list):\n # If normal file type.\n with open(entry) as input_file:\n data = input_file.read()\n # If proprietary file type, encode it. \n if entry.endswith('.png' or '.pdf' or '.xlsx'):\n data = base64.b64encode(data)\n # Put each file that was encoded from above into an appropriate format \n # to add to a branch.\n element = InputGitTreeElement(file_names[i], '100644', 'blob', data)\n # Append the object created above to the list made before the loop. \n element_list.append(element)\n # Create a tree with the elements and specify settings to add the element\n # list to the repo. \n tree = repo.create_git_tree(element_list, base_tree)\n parent = repo.get_git_commit(master_sha)\n # Commit!\n commit = repo.create_git_commit(commit_message, tree, [parent])\n master_ref.edit(commit.sha)\n # Remove the subdirectory from the folder so we don't repeat. \n shutil.rmtree(current_subdir)\n\ndef main():\n # Invoke the subdir_maker() function with the current directory at runtime. \n subs = subdir_maker(os.path.dirname(os.path.realpath(__file__)))\n # Use a loop to call the daily_github_upload() function for each subdir in\n # the subs list. We keep the subs in case we want to see what was uploaded. \n for i in range(len(subs)):\n # Call the function for each elem of the list. \n daily_github_upload(subs[i])\n # Print what was done. \n print(\"_\"*40 + \"\\n\\n\" + \"Uploaded {0} to Github. \".format(i) + \"\\n\" + \"_\"*40)\n # Sleep for 24 hours then do it again. \n time.sleep(86400)\n \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def partial_correlation_loop(solver, x, y, ensemble=None):
e_hat = np.zeros(y.shape[1])
for i in range(y.shape[1]):
y_i = y[:, i].reshape(-1, 1)
y_not_i = np.delete(y, i, axis=1)
r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)
e_hat[i] = np.sum(r ** 2)
return e_hat
class PartialCorrelation(object):
def __init__(self, solver=None, bagging=False):
self.solver = RidgeCV() if solver is None else solver
self.bagging = bagging
def fit(self, X, Y):
ensemble = None
if self.bagging:
cv = ShuffleSplit(test_size=0.5)
ensemble = [(train, test) for train, test in cv.split(X, Y)]
self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)
return self
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def correlation(x, y):
a = (x - x.mean(0)) / x.std(0)
b = (y - y.mean(0)) / y.std(0)
return a.T @ b / x.shape[0]
<|reserved_special_token_0|>
def partial_correlation_loop(solver, x, y, ensemble=None):
e_hat = np.zeros(y.shape[1])
for i in range(y.shape[1]):
y_i = y[:, i].reshape(-1, 1)
y_not_i = np.delete(y, i, axis=1)
r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)
e_hat[i] = np.sum(r ** 2)
return e_hat
class PartialCorrelation(object):
def __init__(self, solver=None, bagging=False):
self.solver = RidgeCV() if solver is None else solver
self.bagging = bagging
def fit(self, X, Y):
ensemble = None
if self.bagging:
cv = ShuffleSplit(test_size=0.5)
ensemble = [(train, test) for train, test in cv.split(X, Y)]
self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)
return self
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def correlation(x, y):
a = (x - x.mean(0)) / x.std(0)
b = (y - y.mean(0)) / y.std(0)
return a.T @ b / x.shape[0]
def partial_correlation_bagging(solver, x, y, z, ensemble=None):
if ensemble is None:
ensemble = [(range(len(x)), range(len(x)))]
r = []
for set1, set2 in ensemble:
p_x = solver.fit(z[set1], x[set1]).predict(z[set2])
p_y = solver.fit(z[set1], y[set1]).predict(z[set2])
r.append(correlation(x[set2] - p_x, y[set2] - p_y))
return np.mean(r, 0)
def partial_correlation_loop(solver, x, y, ensemble=None):
e_hat = np.zeros(y.shape[1])
for i in range(y.shape[1]):
y_i = y[:, i].reshape(-1, 1)
y_not_i = np.delete(y, i, axis=1)
r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)
e_hat[i] = np.sum(r ** 2)
return e_hat
class PartialCorrelation(object):
def __init__(self, solver=None, bagging=False):
self.solver = RidgeCV() if solver is None else solver
self.bagging = bagging
def fit(self, X, Y):
ensemble = None
if self.bagging:
cv = ShuffleSplit(test_size=0.5)
ensemble = [(train, test) for train, test in cv.split(X, Y)]
self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)
return self
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def correlation(x, y):
a = (x - x.mean(0)) / x.std(0)
b = (y - y.mean(0)) / y.std(0)
return a.T @ b / x.shape[0]
def partial_correlation_bagging(solver, x, y, z, ensemble=None):
if ensemble is None:
ensemble = [(range(len(x)), range(len(x)))]
r = []
for set1, set2 in ensemble:
p_x = solver.fit(z[set1], x[set1]).predict(z[set2])
p_y = solver.fit(z[set1], y[set1]).predict(z[set2])
r.append(correlation(x[set2] - p_x, y[set2] - p_y))
return np.mean(r, 0)
def partial_correlation_loop(solver, x, y, ensemble=None):
e_hat = np.zeros(y.shape[1])
for i in range(y.shape[1]):
y_i = y[:, i].reshape(-1, 1)
y_not_i = np.delete(y, i, axis=1)
r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)
e_hat[i] = np.sum(r ** 2)
return e_hat
class PartialCorrelation(object):
def __init__(self, solver=None, bagging=False):
self.solver = RidgeCV() if solver is None else solver
self.bagging = bagging
def fit(self, X, Y):
ensemble = None
if self.bagging:
cv = ShuffleSplit(test_size=0.5)
ensemble = [(train, test) for train, test in cv.split(X, Y)]
self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)
return self
if __name__ == '__main__':
from sklearn.preprocessing import scale
from sklearn.metrics import roc_auc_score
"""Y = F(EX+N)"""
np.random.seed(0)
n = 1000
nE = nX = 10
nY = 10
snr = 25
selected = 0.5
selected = min(int(np.floor(selected * nX)) + 1, nX - 1)
E = np.identity(nX)
E[selected:] = 0
Cx = np.random.randn(nX, nX)
Cx = Cx.dot(Cx.T) / nX
X = np.random.multivariate_normal(np.zeros(nX), Cx, n)
N = np.random.randn(n, nE)
F = np.random.randn(nY, nE)
Y = (X @ E.T * snr + N) @ F.T
X = scale(X)
Y = scale(Y)
partialcorr = PartialCorrelation()
train, test = range(0, n, 2), range(1, n, 2)
E_hat = partialcorr.fit(X[train], Y[train]).E_
print('E_auc', roc_auc_score(np.diag(E), E_hat))
<|reserved_special_token_1|>
"""David's first approach when I exposed the problem.
Reasonable to add in the comparison?
"""
import numpy as np
from sklearn.linear_model import RidgeCV
from sklearn.model_selection import ShuffleSplit
def correlation(x, y):
a = (x - x.mean(0)) / x.std(0)
b = (y - y.mean(0)) / y.std(0)
return a.T @ b / x.shape[0]
def partial_correlation_bagging(solver, x, y, z, ensemble=None):
if ensemble is None:
ensemble = [(range(len(x)), range(len(x))), ]
r = []
for set1, set2 in ensemble:
p_x = solver.fit(z[set1], x[set1]).predict(z[set2])
p_y = solver.fit(z[set1], y[set1]).predict(z[set2])
r.append(correlation(x[set2] - p_x, y[set2] - p_y))
return np.mean(r, 0)
def partial_correlation_loop(solver, x, y, ensemble=None):
e_hat = np.zeros(y.shape[1])
for i in range(y.shape[1]):
y_i = y[:, i].reshape(-1, 1)
y_not_i = np.delete(y, i, axis=1)
r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)
e_hat[i] = np.sum(r**2)
return e_hat
class PartialCorrelation(object):
def __init__(self, solver=None, bagging=False):
self.solver = RidgeCV() if solver is None else solver
self.bagging = bagging
def fit(self, X, Y):
ensemble = None
if self.bagging:
cv = ShuffleSplit(test_size=.5)
ensemble = [(train, test) for train, test in cv.split(X, Y)]
self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)
return self
if __name__ == '__main__':
from sklearn.preprocessing import scale
from sklearn.metrics import roc_auc_score
# Simulate data
"""Y = F(EX+N)"""
np.random.seed(0)
# Problem dimensionality
n = 1000
nE = nX = 10
nY = 10
snr = 25 # signal to noise ratio
selected = .5 # number of X feature selected by E
selected = min(int(np.floor(selected*nX)) + 1, nX-1)
E = np.identity(nX)
E[selected:] = 0
# X covariance
Cx = np.random.randn(nX, nX)
Cx = Cx.dot(Cx.T) / nX # sym pos-semidefin
X = np.random.multivariate_normal(np.zeros(nX), Cx, n)
# Noise (homosedastic in source space)
N = np.random.randn(n, nE)
# Forward operator (linear mixture)
F = np.random.randn(nY, nE)
Y = ((X @ E.T) * snr + N) @ F.T
X = scale(X)
Y = scale(Y)
# Fit method
partialcorr = PartialCorrelation()
train, test = range(0, n, 2), range(1, n, 2)
E_hat = partialcorr.fit(X[train], Y[train]).E_
# score = partialcorr.score(X[test], Y[test]) # TODO
print('E_auc', roc_auc_score(np.diag(E), E_hat))
|
flexible
|
{
"blob_id": "dfd2b515e08f285345c750bf00f6a55f43d60039",
"index": 8379,
"step-1": "<mask token>\n\n\ndef partial_correlation_loop(solver, x, y, ensemble=None):\n e_hat = np.zeros(y.shape[1])\n for i in range(y.shape[1]):\n y_i = y[:, i].reshape(-1, 1)\n y_not_i = np.delete(y, i, axis=1)\n r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)\n e_hat[i] = np.sum(r ** 2)\n return e_hat\n\n\nclass PartialCorrelation(object):\n\n def __init__(self, solver=None, bagging=False):\n self.solver = RidgeCV() if solver is None else solver\n self.bagging = bagging\n\n def fit(self, X, Y):\n ensemble = None\n if self.bagging:\n cv = ShuffleSplit(test_size=0.5)\n ensemble = [(train, test) for train, test in cv.split(X, Y)]\n self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)\n return self\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef correlation(x, y):\n a = (x - x.mean(0)) / x.std(0)\n b = (y - y.mean(0)) / y.std(0)\n return a.T @ b / x.shape[0]\n\n\n<mask token>\n\n\ndef partial_correlation_loop(solver, x, y, ensemble=None):\n e_hat = np.zeros(y.shape[1])\n for i in range(y.shape[1]):\n y_i = y[:, i].reshape(-1, 1)\n y_not_i = np.delete(y, i, axis=1)\n r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)\n e_hat[i] = np.sum(r ** 2)\n return e_hat\n\n\nclass PartialCorrelation(object):\n\n def __init__(self, solver=None, bagging=False):\n self.solver = RidgeCV() if solver is None else solver\n self.bagging = bagging\n\n def fit(self, X, Y):\n ensemble = None\n if self.bagging:\n cv = ShuffleSplit(test_size=0.5)\n ensemble = [(train, test) for train, test in cv.split(X, Y)]\n self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)\n return self\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef correlation(x, y):\n a = (x - x.mean(0)) / x.std(0)\n b = (y - y.mean(0)) / y.std(0)\n return a.T @ b / x.shape[0]\n\n\ndef partial_correlation_bagging(solver, x, y, z, ensemble=None):\n if ensemble is None:\n ensemble = [(range(len(x)), range(len(x)))]\n r = []\n for set1, set2 in ensemble:\n p_x = solver.fit(z[set1], x[set1]).predict(z[set2])\n p_y = solver.fit(z[set1], y[set1]).predict(z[set2])\n r.append(correlation(x[set2] - p_x, y[set2] - p_y))\n return np.mean(r, 0)\n\n\ndef partial_correlation_loop(solver, x, y, ensemble=None):\n e_hat = np.zeros(y.shape[1])\n for i in range(y.shape[1]):\n y_i = y[:, i].reshape(-1, 1)\n y_not_i = np.delete(y, i, axis=1)\n r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)\n e_hat[i] = np.sum(r ** 2)\n return e_hat\n\n\nclass PartialCorrelation(object):\n\n def __init__(self, solver=None, bagging=False):\n self.solver = RidgeCV() if solver is None else solver\n self.bagging = bagging\n\n def fit(self, X, Y):\n ensemble = None\n if self.bagging:\n cv = ShuffleSplit(test_size=0.5)\n ensemble = [(train, test) for train, test in cv.split(X, Y)]\n self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)\n return self\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef correlation(x, y):\n a = (x - x.mean(0)) / x.std(0)\n b = (y - y.mean(0)) / y.std(0)\n return a.T @ b / x.shape[0]\n\n\ndef partial_correlation_bagging(solver, x, y, z, ensemble=None):\n if ensemble is None:\n ensemble = [(range(len(x)), range(len(x)))]\n r = []\n for set1, set2 in ensemble:\n p_x = solver.fit(z[set1], x[set1]).predict(z[set2])\n p_y = solver.fit(z[set1], y[set1]).predict(z[set2])\n r.append(correlation(x[set2] - p_x, y[set2] - p_y))\n return np.mean(r, 0)\n\n\ndef partial_correlation_loop(solver, x, y, ensemble=None):\n e_hat = np.zeros(y.shape[1])\n for i in range(y.shape[1]):\n y_i = y[:, i].reshape(-1, 1)\n y_not_i = np.delete(y, i, axis=1)\n r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)\n e_hat[i] = np.sum(r ** 2)\n return e_hat\n\n\nclass PartialCorrelation(object):\n\n def __init__(self, solver=None, bagging=False):\n self.solver = RidgeCV() if solver is None else solver\n self.bagging = bagging\n\n def fit(self, X, Y):\n ensemble = None\n if self.bagging:\n cv = ShuffleSplit(test_size=0.5)\n ensemble = [(train, test) for train, test in cv.split(X, Y)]\n self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)\n return self\n\n\nif __name__ == '__main__':\n from sklearn.preprocessing import scale\n from sklearn.metrics import roc_auc_score\n \"\"\"Y = F(EX+N)\"\"\"\n np.random.seed(0)\n n = 1000\n nE = nX = 10\n nY = 10\n snr = 25\n selected = 0.5\n selected = min(int(np.floor(selected * nX)) + 1, nX - 1)\n E = np.identity(nX)\n E[selected:] = 0\n Cx = np.random.randn(nX, nX)\n Cx = Cx.dot(Cx.T) / nX\n X = np.random.multivariate_normal(np.zeros(nX), Cx, n)\n N = np.random.randn(n, nE)\n F = np.random.randn(nY, nE)\n Y = (X @ E.T * snr + N) @ F.T\n X = scale(X)\n Y = scale(Y)\n partialcorr = PartialCorrelation()\n train, test = range(0, n, 2), range(1, n, 2)\n E_hat = partialcorr.fit(X[train], Y[train]).E_\n print('E_auc', roc_auc_score(np.diag(E), E_hat))\n",
"step-5": "\"\"\"David's first approach when I exposed the problem.\nReasonable to add in the comparison?\n\"\"\"\nimport numpy as np\nfrom sklearn.linear_model import RidgeCV\nfrom sklearn.model_selection import ShuffleSplit\n\n\ndef correlation(x, y):\n a = (x - x.mean(0)) / x.std(0)\n b = (y - y.mean(0)) / y.std(0)\n return a.T @ b / x.shape[0]\n\n\ndef partial_correlation_bagging(solver, x, y, z, ensemble=None):\n if ensemble is None:\n ensemble = [(range(len(x)), range(len(x))), ]\n r = []\n for set1, set2 in ensemble:\n p_x = solver.fit(z[set1], x[set1]).predict(z[set2])\n p_y = solver.fit(z[set1], y[set1]).predict(z[set2])\n r.append(correlation(x[set2] - p_x, y[set2] - p_y))\n return np.mean(r, 0)\n\n\ndef partial_correlation_loop(solver, x, y, ensemble=None):\n e_hat = np.zeros(y.shape[1])\n for i in range(y.shape[1]):\n y_i = y[:, i].reshape(-1, 1)\n y_not_i = np.delete(y, i, axis=1)\n r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)\n e_hat[i] = np.sum(r**2)\n return e_hat\n\n\nclass PartialCorrelation(object):\n\n def __init__(self, solver=None, bagging=False):\n self.solver = RidgeCV() if solver is None else solver\n self.bagging = bagging\n\n def fit(self, X, Y):\n ensemble = None\n if self.bagging:\n cv = ShuffleSplit(test_size=.5)\n ensemble = [(train, test) for train, test in cv.split(X, Y)]\n self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)\n return self\n\n\nif __name__ == '__main__':\n from sklearn.preprocessing import scale\n from sklearn.metrics import roc_auc_score\n # Simulate data\n \"\"\"Y = F(EX+N)\"\"\"\n\n np.random.seed(0)\n\n # Problem dimensionality\n n = 1000\n nE = nX = 10\n nY = 10\n snr = 25 # signal to noise ratio\n selected = .5 # number of X feature selected by E\n\n selected = min(int(np.floor(selected*nX)) + 1, nX-1)\n E = np.identity(nX)\n E[selected:] = 0\n\n # X covariance\n Cx = np.random.randn(nX, nX)\n Cx = Cx.dot(Cx.T) / nX # sym pos-semidefin\n X = np.random.multivariate_normal(np.zeros(nX), Cx, n)\n\n # Noise (homosedastic in source space)\n N = np.random.randn(n, nE)\n\n # Forward operator (linear mixture)\n F = np.random.randn(nY, nE)\n\n Y = ((X @ E.T) * snr + N) @ F.T\n\n X = scale(X)\n Y = scale(Y)\n\n # Fit method\n partialcorr = PartialCorrelation()\n train, test = range(0, n, 2), range(1, n, 2)\n E_hat = partialcorr.fit(X[train], Y[train]).E_\n # score = partialcorr.score(X[test], Y[test]) # TODO\n\n print('E_auc', roc_auc_score(np.diag(E), E_hat))\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
<|reserved_special_token_0|>
def workingDate(start, end):
cal = UnitedKingdom()
res = []
delta = end - start
for i in range(delta.days + 1):
day = start + timedelta(days=i)
if cal.is_working_day(day) or day.weekday() < 5:
res.append(day)
else:
pass
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(cal.holidays(2020))
def workingDate(start, end):
cal = UnitedKingdom()
res = []
delta = end - start
for i in range(delta.days + 1):
day = start + timedelta(days=i)
if cal.is_working_day(day) or day.weekday() < 5:
res.append(day)
else:
pass
return res
<|reserved_special_token_0|>
for d in r:
print(d.strftime('%d-%B-%Y'))
print('\n' * 3)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cal = UnitedKingdom()
print(cal.holidays(2020))
def workingDate(start, end):
cal = UnitedKingdom()
res = []
delta = end - start
for i in range(delta.days + 1):
day = start + timedelta(days=i)
if cal.is_working_day(day) or day.weekday() < 5:
res.append(day)
else:
pass
return res
start = datetime.today()
end = datetime(2020, 12, 23)
r = workingDate(start, end)
for d in r:
print(d.strftime('%d-%B-%Y'))
print('\n' * 3)
<|reserved_special_token_1|>
from datetime import date, timedelta, datetime
from workalendar.europe import UnitedKingdom
cal = UnitedKingdom()
print(cal.holidays(2020))
def workingDate(start, end):
cal = UnitedKingdom()
res = []
delta = end - start
for i in range(delta.days + 1):
day = start + timedelta(days=i)
if cal.is_working_day(day) or day.weekday() < 5:
res.append(day)
else:
pass
return res
start = datetime.today()
end = datetime(2020, 12, 23)
r = workingDate(start, end)
for d in r:
print(d.strftime('%d-%B-%Y'))
print('\n' * 3)
<|reserved_special_token_1|>
# -*- coding: UTF-8 -*-
# File name: ukWorkingDays
# Created by JKChang
# 29/07/2020, 11:20
# Tag:
# Description:
from datetime import date,timedelta,datetime
from workalendar.europe import UnitedKingdom
cal = UnitedKingdom()
print(cal.holidays(2020))
def workingDate(start,end):
cal = UnitedKingdom()
res = []
delta = end - start
for i in range(delta.days +1):
day = start + timedelta(days=i)
if cal.is_working_day(day) or day.weekday() < 5:
res.append(day)
else:
pass
return res
start = datetime.today()
end = datetime(2020, 12, 23)
r = workingDate(start,end)
for d in r:
print(d.strftime('%d-%B-%Y'))
print('\n'*3)
|
flexible
|
{
"blob_id": "feed412278d9e711e49ef209ece0876c1de4a873",
"index": 886,
"step-1": "<mask token>\n\n\ndef workingDate(start, end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days + 1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(cal.holidays(2020))\n\n\ndef workingDate(start, end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days + 1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\n\n<mask token>\nfor d in r:\n print(d.strftime('%d-%B-%Y'))\n print('\\n' * 3)\n",
"step-3": "<mask token>\ncal = UnitedKingdom()\nprint(cal.holidays(2020))\n\n\ndef workingDate(start, end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days + 1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\n\nstart = datetime.today()\nend = datetime(2020, 12, 23)\nr = workingDate(start, end)\nfor d in r:\n print(d.strftime('%d-%B-%Y'))\n print('\\n' * 3)\n",
"step-4": "from datetime import date, timedelta, datetime\nfrom workalendar.europe import UnitedKingdom\ncal = UnitedKingdom()\nprint(cal.holidays(2020))\n\n\ndef workingDate(start, end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days + 1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\n\nstart = datetime.today()\nend = datetime(2020, 12, 23)\nr = workingDate(start, end)\nfor d in r:\n print(d.strftime('%d-%B-%Y'))\n print('\\n' * 3)\n",
"step-5": "# -*- coding: UTF-8 -*-\n# File name: ukWorkingDays\n# Created by JKChang\n# 29/07/2020, 11:20\n# Tag:\n# Description:\n\nfrom datetime import date,timedelta,datetime\nfrom workalendar.europe import UnitedKingdom\n\ncal = UnitedKingdom()\nprint(cal.holidays(2020))\n\ndef workingDate(start,end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days +1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\nstart = datetime.today()\nend = datetime(2020, 12, 23)\nr = workingDate(start,end)\nfor d in r:\n print(d.strftime('%d-%B-%Y'))\n print('\\n'*3)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Christiaan Frans Rademan <chris@fwiw.co.za>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import os
import random
import string
# Use cryptographic-safe random generator as provided by the OS.
random_generator = random.SystemRandom()
def string_id(length=8):
""" Generate Random ID.
Random ID contains ascii letters and digitis.
Args:
length (int): Character length of id.
Returns:
Random id string.
"""
return ''.join(random.choice(string.ascii_letters +
string.digits)
for _ in range(length))
# Request ID Counter
####################
req_c = None
pid = None
def request_id():
# Using random is pretty slow. This is way quicker.
# It uses cached proc id. Then only does this append counter.
# per request...
#
# It may not be as unique, but highly unlikely to collide
# with recent requet ids.
global req_c, pid
if req_c is None:
req_c = random.randint(1000*1000, 1000*1000*1000)
if pid is None:
pid = str(os.getpid())
req_id = req_c = req_c + 1
req_id = hex(req_id)[2:].zfill(8)[-8:]
return pid + '-' + req_id
|
normal
|
{
"blob_id": "cdbf9427d48f0a5c53b6efe0de7dfea65a8afd83",
"index": 87,
"step-1": "<mask token>\n\n\ndef request_id():\n global req_c, pid\n if req_c is None:\n req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)\n if pid is None:\n pid = str(os.getpid())\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n return pid + '-' + req_id\n",
"step-2": "<mask token>\n\n\ndef string_id(length=8):\n \"\"\" Generate Random ID.\n\n Random ID contains ascii letters and digitis.\n\n Args:\n length (int): Character length of id.\n\n Returns:\n Random id string.\n \"\"\"\n return ''.join(random.choice(string.ascii_letters + string.digits) for\n _ in range(length))\n\n\n<mask token>\n\n\ndef request_id():\n global req_c, pid\n if req_c is None:\n req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)\n if pid is None:\n pid = str(os.getpid())\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n return pid + '-' + req_id\n",
"step-3": "<mask token>\nrandom_generator = random.SystemRandom()\n\n\ndef string_id(length=8):\n \"\"\" Generate Random ID.\n\n Random ID contains ascii letters and digitis.\n\n Args:\n length (int): Character length of id.\n\n Returns:\n Random id string.\n \"\"\"\n return ''.join(random.choice(string.ascii_letters + string.digits) for\n _ in range(length))\n\n\nreq_c = None\npid = None\n\n\ndef request_id():\n global req_c, pid\n if req_c is None:\n req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)\n if pid is None:\n pid = str(os.getpid())\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n return pid + '-' + req_id\n",
"step-4": "import os\nimport random\nimport string\nrandom_generator = random.SystemRandom()\n\n\ndef string_id(length=8):\n \"\"\" Generate Random ID.\n\n Random ID contains ascii letters and digitis.\n\n Args:\n length (int): Character length of id.\n\n Returns:\n Random id string.\n \"\"\"\n return ''.join(random.choice(string.ascii_letters + string.digits) for\n _ in range(length))\n\n\nreq_c = None\npid = None\n\n\ndef request_id():\n global req_c, pid\n if req_c is None:\n req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)\n if pid is None:\n pid = str(os.getpid())\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n return pid + '-' + req_id\n",
"step-5": "# -*- coding: utf-8 -*-\n# Copyright (c) 2018-2020 Christiaan Frans Rademan <chris@fwiw.co.za>.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holders nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\nimport os\nimport random\nimport string\n\n# Use cryptographic-safe random generator as provided by the OS.\nrandom_generator = random.SystemRandom()\n\n\ndef string_id(length=8):\n \"\"\" Generate Random ID.\n\n Random ID contains ascii letters and digitis.\n\n Args:\n length (int): Character length of id.\n\n Returns:\n Random id string.\n \"\"\"\n return ''.join(random.choice(string.ascii_letters +\n string.digits)\n for _ in range(length))\n\n\n# Request ID Counter\n####################\n\nreq_c = None\npid = None\n\n\ndef request_id():\n # Using random is pretty slow. This is way quicker.\n # It uses cached proc id. Then only does this append counter.\n # per request...\n #\n # It may not be as unique, but highly unlikely to collide\n # with recent requet ids.\n global req_c, pid\n\n if req_c is None:\n req_c = random.randint(1000*1000, 1000*1000*1000)\n\n if pid is None:\n pid = str(os.getpid())\n\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n\n return pid + '-' + req_id\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python3
#
# compare-sorts.py
# Copyright (c) 2017 Dylan Brown. All rights reserved.
#
# Use Python 3. Run from within the scripts/ directory.
import os
import sys
import re
import subprocess
# Ensure we don't silently fail by running Python 2.
assert sys.version_info[0] >= 3, "This script requires Python 3.x"
assert os.getcwd().split("/")[-1] == "algorithms-sedgewick-wayne", \
"This script must be run from the project's root directory."
# Number of iterations to average over.
N = 25
# Data file to sort.
# DATA = "./algs4-data/words3.txt"
DATA = "./algs4-data/medTale.txt"
def main():
sorts = ["selection-sort",
"insertion-sort",
"shell-sort"]
for sort in sorts:
exe_path = "./build/{}".format(sort.rstrip())
if not os.path.isfile(exe_path):
raise OSError("The executable {} does not exist.".format(exe_path))
accumulated_time = 0
for i in range(N):
# Note shell=True has security implications. Don't accept external inputs.
b_output = subprocess.check_output(" ".join([exe_path, DATA]), shell=True)
str_output = str(b_output)
# Use regex to extract the number follwing "(ns) =" in the output.
accumulated_time += int(re.findall("\d+", str_output)[0]) # Elapsed time in nanoseconds.
average_time = accumulated_time / N
if "selection-sort" == sort:
print("{:>14} took {:>8} ns on average.".format(sort, int(average_time)))
sel_sort_time = average_time
else:
print("{:>14} took {:>8} ns on average, "
"a {:4.1f}x speedup over selection sort.".format(sort,
int(average_time),
sel_sort_time / average_time))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "501d50fa933f55c178b4b2eba6cfc5b85592beaa",
"index": 8473,
"step-1": "<mask token>\n\n\ndef main():\n sorts = ['selection-sort', 'insertion-sort', 'shell-sort']\n for sort in sorts:\n exe_path = './build/{}'.format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError('The executable {} does not exist.'.format(exe_path))\n accumulated_time = 0\n for i in range(N):\n b_output = subprocess.check_output(' '.join([exe_path, DATA]),\n shell=True)\n str_output = str(b_output)\n accumulated_time += int(re.findall('\\\\d+', str_output)[0])\n average_time = accumulated_time / N\n if 'selection-sort' == sort:\n print('{:>14} took {:>8} ns on average.'.format(sort, int(\n average_time)))\n sel_sort_time = average_time\n else:\n print(\n '{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'\n .format(sort, int(average_time), sel_sort_time / average_time))\n\n\n<mask token>\n",
"step-2": "<mask token>\nassert sys.version_info[0] >= 3, 'This script requires Python 3.x'\nassert os.getcwd().split('/')[-1\n ] == 'algorithms-sedgewick-wayne', \"This script must be run from the project's root directory.\"\n<mask token>\n\n\ndef main():\n sorts = ['selection-sort', 'insertion-sort', 'shell-sort']\n for sort in sorts:\n exe_path = './build/{}'.format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError('The executable {} does not exist.'.format(exe_path))\n accumulated_time = 0\n for i in range(N):\n b_output = subprocess.check_output(' '.join([exe_path, DATA]),\n shell=True)\n str_output = str(b_output)\n accumulated_time += int(re.findall('\\\\d+', str_output)[0])\n average_time = accumulated_time / N\n if 'selection-sort' == sort:\n print('{:>14} took {:>8} ns on average.'.format(sort, int(\n average_time)))\n sel_sort_time = average_time\n else:\n print(\n '{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'\n .format(sort, int(average_time), sel_sort_time / average_time))\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nassert sys.version_info[0] >= 3, 'This script requires Python 3.x'\nassert os.getcwd().split('/')[-1\n ] == 'algorithms-sedgewick-wayne', \"This script must be run from the project's root directory.\"\nN = 25\nDATA = './algs4-data/medTale.txt'\n\n\ndef main():\n sorts = ['selection-sort', 'insertion-sort', 'shell-sort']\n for sort in sorts:\n exe_path = './build/{}'.format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError('The executable {} does not exist.'.format(exe_path))\n accumulated_time = 0\n for i in range(N):\n b_output = subprocess.check_output(' '.join([exe_path, DATA]),\n shell=True)\n str_output = str(b_output)\n accumulated_time += int(re.findall('\\\\d+', str_output)[0])\n average_time = accumulated_time / N\n if 'selection-sort' == sort:\n print('{:>14} took {:>8} ns on average.'.format(sort, int(\n average_time)))\n sel_sort_time = average_time\n else:\n print(\n '{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'\n .format(sort, int(average_time), sel_sort_time / average_time))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport sys\nimport re\nimport subprocess\nassert sys.version_info[0] >= 3, 'This script requires Python 3.x'\nassert os.getcwd().split('/')[-1\n ] == 'algorithms-sedgewick-wayne', \"This script must be run from the project's root directory.\"\nN = 25\nDATA = './algs4-data/medTale.txt'\n\n\ndef main():\n sorts = ['selection-sort', 'insertion-sort', 'shell-sort']\n for sort in sorts:\n exe_path = './build/{}'.format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError('The executable {} does not exist.'.format(exe_path))\n accumulated_time = 0\n for i in range(N):\n b_output = subprocess.check_output(' '.join([exe_path, DATA]),\n shell=True)\n str_output = str(b_output)\n accumulated_time += int(re.findall('\\\\d+', str_output)[0])\n average_time = accumulated_time / N\n if 'selection-sort' == sort:\n print('{:>14} took {:>8} ns on average.'.format(sort, int(\n average_time)))\n sel_sort_time = average_time\n else:\n print(\n '{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'\n .format(sort, int(average_time), sel_sort_time / average_time))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n#\n# compare-sorts.py\n# Copyright (c) 2017 Dylan Brown. All rights reserved.\n#\n\n# Use Python 3. Run from within the scripts/ directory.\n\nimport os\nimport sys\nimport re\nimport subprocess\n\n# Ensure we don't silently fail by running Python 2.\nassert sys.version_info[0] >= 3, \"This script requires Python 3.x\"\nassert os.getcwd().split(\"/\")[-1] == \"algorithms-sedgewick-wayne\", \\\n \"This script must be run from the project's root directory.\"\n\n# Number of iterations to average over.\nN = 25\n\n# Data file to sort.\n# DATA = \"./algs4-data/words3.txt\"\nDATA = \"./algs4-data/medTale.txt\"\n\ndef main():\n sorts = [\"selection-sort\",\n \"insertion-sort\",\n \"shell-sort\"]\n\n for sort in sorts:\n exe_path = \"./build/{}\".format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError(\"The executable {} does not exist.\".format(exe_path))\n\n accumulated_time = 0\n for i in range(N):\n # Note shell=True has security implications. Don't accept external inputs.\n b_output = subprocess.check_output(\" \".join([exe_path, DATA]), shell=True)\n str_output = str(b_output)\n # Use regex to extract the number follwing \"(ns) =\" in the output.\n accumulated_time += int(re.findall(\"\\d+\", str_output)[0]) # Elapsed time in nanoseconds.\n average_time = accumulated_time / N\n\n if \"selection-sort\" == sort:\n print(\"{:>14} took {:>8} ns on average.\".format(sort, int(average_time)))\n sel_sort_time = average_time\n else:\n print(\"{:>14} took {:>8} ns on average, \"\n \"a {:4.1f}x speedup over selection sort.\".format(sort,\n int(average_time),\n sel_sort_time / average_time))\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import numpy as np
from math import ceil, log2
def avg(list):
return np.mean(list)
def dispersion(list):
res = 0
for i in list:
res += (i - np.mean(list)) ** 2
return res / len(list)
def variation_coefficient(list):
return (dispersion(list) ** (1/2) / np.mean(list)) * 100
def chi_square(list):
b = sorted(list)
k = ceil(log2(len(list)) + 1)
step = 10000 / k
p = 1 / k
frequency_vector = []
for i in range(k):
counter = 0
for j in b:
if (j > i * step) and (j <= (i + 1) * step):
counter += 1
else:
continue
frequency_vector.append(counter)
chi = 0
for i in range(k):
chi += ((frequency_vector[i] - p * len(list)) ** 2) / (p * len(list))
return 0.8 <= chi <= 16.8
|
normal
|
{
"blob_id": "f2b978b9a4c00469cdd2f5e1e9275df73c7379b8",
"index": 3904,
"step-1": "<mask token>\n\n\ndef dispersion(list):\n res = 0\n for i in list:\n res += (i - np.mean(list)) ** 2\n return res / len(list)\n\n\n<mask token>\n\n\ndef chi_square(list):\n b = sorted(list)\n k = ceil(log2(len(list)) + 1)\n step = 10000 / k\n p = 1 / k\n frequency_vector = []\n for i in range(k):\n counter = 0\n for j in b:\n if j > i * step and j <= (i + 1) * step:\n counter += 1\n else:\n continue\n frequency_vector.append(counter)\n chi = 0\n for i in range(k):\n chi += (frequency_vector[i] - p * len(list)) ** 2 / (p * len(list))\n return 0.8 <= chi <= 16.8\n",
"step-2": "<mask token>\n\n\ndef avg(list):\n return np.mean(list)\n\n\ndef dispersion(list):\n res = 0\n for i in list:\n res += (i - np.mean(list)) ** 2\n return res / len(list)\n\n\n<mask token>\n\n\ndef chi_square(list):\n b = sorted(list)\n k = ceil(log2(len(list)) + 1)\n step = 10000 / k\n p = 1 / k\n frequency_vector = []\n for i in range(k):\n counter = 0\n for j in b:\n if j > i * step and j <= (i + 1) * step:\n counter += 1\n else:\n continue\n frequency_vector.append(counter)\n chi = 0\n for i in range(k):\n chi += (frequency_vector[i] - p * len(list)) ** 2 / (p * len(list))\n return 0.8 <= chi <= 16.8\n",
"step-3": "<mask token>\n\n\ndef avg(list):\n return np.mean(list)\n\n\ndef dispersion(list):\n res = 0\n for i in list:\n res += (i - np.mean(list)) ** 2\n return res / len(list)\n\n\ndef variation_coefficient(list):\n return dispersion(list) ** (1 / 2) / np.mean(list) * 100\n\n\ndef chi_square(list):\n b = sorted(list)\n k = ceil(log2(len(list)) + 1)\n step = 10000 / k\n p = 1 / k\n frequency_vector = []\n for i in range(k):\n counter = 0\n for j in b:\n if j > i * step and j <= (i + 1) * step:\n counter += 1\n else:\n continue\n frequency_vector.append(counter)\n chi = 0\n for i in range(k):\n chi += (frequency_vector[i] - p * len(list)) ** 2 / (p * len(list))\n return 0.8 <= chi <= 16.8\n",
"step-4": "import numpy as np\nfrom math import ceil, log2\n\n\ndef avg(list):\n return np.mean(list)\n\n\ndef dispersion(list):\n res = 0\n for i in list:\n res += (i - np.mean(list)) ** 2\n return res / len(list)\n\n\ndef variation_coefficient(list):\n return dispersion(list) ** (1 / 2) / np.mean(list) * 100\n\n\ndef chi_square(list):\n b = sorted(list)\n k = ceil(log2(len(list)) + 1)\n step = 10000 / k\n p = 1 / k\n frequency_vector = []\n for i in range(k):\n counter = 0\n for j in b:\n if j > i * step and j <= (i + 1) * step:\n counter += 1\n else:\n continue\n frequency_vector.append(counter)\n chi = 0\n for i in range(k):\n chi += (frequency_vector[i] - p * len(list)) ** 2 / (p * len(list))\n return 0.8 <= chi <= 16.8\n",
"step-5": "import numpy as np\nfrom math import ceil, log2\n\n\ndef avg(list):\n return np.mean(list)\n\n\ndef dispersion(list):\n res = 0\n for i in list:\n res += (i - np.mean(list)) ** 2\n return res / len(list)\n\n\ndef variation_coefficient(list):\n return (dispersion(list) ** (1/2) / np.mean(list)) * 100\n\n\ndef chi_square(list):\n b = sorted(list)\n k = ceil(log2(len(list)) + 1)\n step = 10000 / k\n p = 1 / k\n\n frequency_vector = []\n\n for i in range(k):\n counter = 0\n for j in b:\n if (j > i * step) and (j <= (i + 1) * step):\n counter += 1\n else:\n continue\n frequency_vector.append(counter)\n chi = 0\n for i in range(k):\n chi += ((frequency_vector[i] - p * len(list)) ** 2) / (p * len(list))\n\n return 0.8 <= chi <= 16.8\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# Generated by Django 3.1.6 on 2021-04-03 20:16
import django.contrib.postgres.fields
from django.db import migrations, models
import enrolments.validators
class Migration(migrations.Migration):
dependencies = [
("enrolments", "0007_merge_20210320_1853"),
]
operations = [
migrations.AddField(
model_name="enrolment",
name="students",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.PositiveIntegerField(),
default=list,
size=None,
validators=[enrolments.validators.validate_student_ids_in_family],
),
),
]
|
normal
|
{
"blob_id": "dbea2b1555368460b7d14369d2dfe4f0a01f9e4f",
"index": 8423,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('enrolments', '0007_merge_20210320_1853')]\n operations = [migrations.AddField(model_name='enrolment', name=\n 'students', field=django.contrib.postgres.fields.ArrayField(\n base_field=models.PositiveIntegerField(), default=list, size=None,\n validators=[enrolments.validators.validate_student_ids_in_family]))]\n",
"step-4": "import django.contrib.postgres.fields\nfrom django.db import migrations, models\nimport enrolments.validators\n\n\nclass Migration(migrations.Migration):\n dependencies = [('enrolments', '0007_merge_20210320_1853')]\n operations = [migrations.AddField(model_name='enrolment', name=\n 'students', field=django.contrib.postgres.fields.ArrayField(\n base_field=models.PositiveIntegerField(), default=list, size=None,\n validators=[enrolments.validators.validate_student_ids_in_family]))]\n",
"step-5": "# Generated by Django 3.1.6 on 2021-04-03 20:16\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\nimport enrolments.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"enrolments\", \"0007_merge_20210320_1853\"),\n ]\n\n operations = [\n migrations.AddField(\n model_name=\"enrolment\",\n name=\"students\",\n field=django.contrib.postgres.fields.ArrayField(\n base_field=models.PositiveIntegerField(),\n default=list,\n size=None,\n validators=[enrolments.validators.validate_student_ids_in_family],\n ),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
g#https://www.acmicpc.net/problem/9461
'''
1. Divide 2 case △ and ▽
d[0] is △ sequence
d[1] is ▽ sequence
2. find a role between d[0] and d[1]
'''
import math
t = int(input())
n = []
for _ in range(t):
n.append(int(input()))
index = math.ceil(max(n)/2)
d = [[0 for _ in range(52)] for _ in range(2)]
d[0][1],d[0][2],d[1][1],d[1][2] = 1,1,1,2
for i in range(3,index + 1):
d[0][i] = d[1][i-1] + d[1][i-3]
d[1][i] = d[0][i] + d[0][i-2]
for k in n:
if k % 2 == 1:
print(d[0][math.ceil(k/2)])
else:
print(d[1][math.ceil(k/2)])
|
normal
|
{
"blob_id": "524b6ebd0be4c2285fac540627bb48baca71452e",
"index": 2989,
"step-1": "<mask token>\n",
"step-2": "g\n<mask token>\nfor _ in range(t):\n n.append(int(input()))\n<mask token>\nfor i in range(3, index + 1):\n d[0][i] = d[1][i - 1] + d[1][i - 3]\n d[1][i] = d[0][i] + d[0][i - 2]\nfor k in n:\n if k % 2 == 1:\n print(d[0][math.ceil(k / 2)])\n else:\n print(d[1][math.ceil(k / 2)])\n",
"step-3": "g\n<mask token>\nt = int(input())\nn = []\nfor _ in range(t):\n n.append(int(input()))\nindex = math.ceil(max(n) / 2)\nd = [[(0) for _ in range(52)] for _ in range(2)]\nd[0][1], d[0][2], d[1][1], d[1][2] = 1, 1, 1, 2\nfor i in range(3, index + 1):\n d[0][i] = d[1][i - 1] + d[1][i - 3]\n d[1][i] = d[0][i] + d[0][i - 2]\nfor k in n:\n if k % 2 == 1:\n print(d[0][math.ceil(k / 2)])\n else:\n print(d[1][math.ceil(k / 2)])\n",
"step-4": "g\n<mask token>\nimport math\nt = int(input())\nn = []\nfor _ in range(t):\n n.append(int(input()))\nindex = math.ceil(max(n) / 2)\nd = [[(0) for _ in range(52)] for _ in range(2)]\nd[0][1], d[0][2], d[1][1], d[1][2] = 1, 1, 1, 2\nfor i in range(3, index + 1):\n d[0][i] = d[1][i - 1] + d[1][i - 3]\n d[1][i] = d[0][i] + d[0][i - 2]\nfor k in n:\n if k % 2 == 1:\n print(d[0][math.ceil(k / 2)])\n else:\n print(d[1][math.ceil(k / 2)])\n",
"step-5": "g#https://www.acmicpc.net/problem/9461\n'''\n1. Divide 2 case △ and ▽\nd[0] is △ sequence\nd[1] is ▽ sequence\n2. find a role between d[0] and d[1]\n'''\nimport math\nt = int(input())\nn = []\nfor _ in range(t):\n n.append(int(input()))\nindex = math.ceil(max(n)/2)\nd = [[0 for _ in range(52)] for _ in range(2)]\nd[0][1],d[0][2],d[1][1],d[1][2] = 1,1,1,2\nfor i in range(3,index + 1):\n d[0][i] = d[1][i-1] + d[1][i-3]\n d[1][i] = d[0][i] + d[0][i-2]\nfor k in n:\n if k % 2 == 1:\n print(d[0][math.ceil(k/2)])\n else:\n print(d[1][math.ceil(k/2)])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# name: Ali
# date: 7/12/2016
# description: uses openweathermap.org's api to get weather data about
# the city that is inputted
# unbreakable? = idk
import json
import urllib2
from collections import OrderedDict
from pprint import pprint
api_key = "&APPID=507e30d896f751513350c41899382d89"
city_name_url = "http://api.openweathermap.org/data/2.5/weather?q="
units = "&units=metric"
general_info = {
"Humidity (%)": 0,
"Pressure": 0,
"Temperature(C)": 0,
"Max. Temp.(C)": 0,
"Min. Temp.(C)": 0
}
def connectapi():
global parsed
global data
urlrequest = city_name_url + city_input + units + api_key
response = urllib2.urlopen(urlrequest)
content = response.read()
data = json.loads(content, object_pairs_hook=OrderedDict)
parsed = json.dumps(data, indent=4, sort_keys=True)
print parsed
def find_data():
global country_name
global city_name
global general_info
global weather_description
global formatted_general_info
city_name = str(data['name'])
country_name = str(data['sys']['country'])
#weather_description = data['weather']['description']
for key, value in data['main'].iteritems():
if key == "humidity":
general_info['Humidity (%)'] = value
elif key == "pressure":
general_info['Pressure'] = value
elif key == "temp":
general_info['Temperature(C)'] = value
elif key == "temp_max":
general_info['Max. Temp.(C)'] = value
elif key == "temp_min":
general_info['Min. Temp.(C)'] = value
else:
continue
print "Weather Lookup\n\nEnter the name of the city that you want\nto look at the weather details of.\n"
while True:
try:
city_input = str(raw_input("What city would you like to look at?"))
except ValueError:
print"Please enter a city name."
connectapi()
if "name" in data:
find_data()
print "\n%r in %r:\n"% (city_name, country_name)
print """General info:"""
pprint(general_info)
print "\nWeather Description:\n\tidk why it doesn't let me take this data so annoying\n"
else:
print "Something went wrong, would you like to try again?"
continue
|
normal
|
{
"blob_id": "94540561ba29d2fc1766dac7b199e0cbbbeecdfc",
"index": 8046,
"step-1": "# name: Ali\n# date: 7/12/2016\n# description: uses openweathermap.org's api to get weather data about\n# the city that is inputted\n\n# unbreakable? = idk\nimport json\nimport urllib2\nfrom collections import OrderedDict\nfrom pprint import pprint\napi_key = \"&APPID=507e30d896f751513350c41899382d89\"\ncity_name_url = \"http://api.openweathermap.org/data/2.5/weather?q=\"\nunits = \"&units=metric\"\n\ngeneral_info = {\n \"Humidity (%)\": 0,\n \"Pressure\": 0,\n \"Temperature(C)\": 0,\n \"Max. Temp.(C)\": 0,\n \"Min. Temp.(C)\": 0\n }\n\ndef connectapi():\n global parsed\n global data\n urlrequest = city_name_url + city_input + units + api_key\n response = urllib2.urlopen(urlrequest)\n content = response.read()\n\n data = json.loads(content, object_pairs_hook=OrderedDict)\n parsed = json.dumps(data, indent=4, sort_keys=True)\n print parsed\n\n\ndef find_data():\n global country_name\n global city_name\n global general_info\n global weather_description\n global formatted_general_info\n city_name = str(data['name'])\n country_name = str(data['sys']['country'])\n #weather_description = data['weather']['description']\n for key, value in data['main'].iteritems():\n if key == \"humidity\":\n general_info['Humidity (%)'] = value\n elif key == \"pressure\":\n general_info['Pressure'] = value\n elif key == \"temp\":\n general_info['Temperature(C)'] = value\n elif key == \"temp_max\":\n general_info['Max. Temp.(C)'] = value\n elif key == \"temp_min\":\n general_info['Min. Temp.(C)'] = value\n else:\n continue\n\n\n\n\nprint \"Weather Lookup\\n\\nEnter the name of the city that you want\\nto look at the weather details of.\\n\"\nwhile True:\n\n try:\n city_input = str(raw_input(\"What city would you like to look at?\"))\n except ValueError:\n print\"Please enter a city name.\"\n\n connectapi()\n if \"name\" in data:\n find_data()\n print \"\\n%r in %r:\\n\"% (city_name, country_name)\n print \"\"\"General info:\"\"\"\n pprint(general_info)\n print \"\\nWeather Description:\\n\\tidk why it doesn't let me take this data so annoying\\n\"\n else:\n print \"Something went wrong, would you like to try again?\"\n continue\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#from __future__ import absolute_import
#import os
from celery import Celery
#from django.conf import settings
#os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'learning.settings')
app = Celery('tasks', broker="redis://localhost")
#app.config_from_object('django.conf:settings')
#app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task
def add(x, y):
return x+y
#print('Request:{0!r}'.format(self.request))
|
normal
|
{
"blob_id": "3ef114dd35ef3995ae73bf85bbe38db4fb7045d8",
"index": 7315,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@app.task\ndef add(x, y):\n return x + y\n",
"step-3": "<mask token>\napp = Celery('tasks', broker='redis://localhost')\n\n\n@app.task\ndef add(x, y):\n return x + y\n",
"step-4": "from celery import Celery\napp = Celery('tasks', broker='redis://localhost')\n\n\n@app.task\ndef add(x, y):\n return x + y\n",
"step-5": "\n#from __future__ import absolute_import\n#import os\nfrom celery import Celery\n#from django.conf import settings\n\n#os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'learning.settings')\napp = Celery('tasks', broker=\"redis://localhost\")\n\n\n#app.config_from_object('django.conf:settings')\n#app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n\n\n@app.task\ndef add(x, y):\n return x+y\n #print('Request:{0!r}'.format(self.request))\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cgi
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from models.nutrient import *
class SoilRecord(db.Model):
year=db.DateProperty(auto_now_add=True)
stats=NutrientProfile()
amendments=db.StringProperty()
notes=db.StringProperty()
@property
def plot(self):
Plot.gql("Where soilrecord=:1",self.key())
def create(self, year):
self.year=year
class CropRecord(db.Model):
year=db.DateProperty(auto_now_add=True)
crops=db.ListProperty(db.Key)
notes=db.StringProperty()
@property
def plot(self):
Plot.gql("Where croprecord=:1",self.key())
def create(self, year):
self.year=year
def addCrop(self, crop):
if addByKey(crop, self.crops):
self.put()
|
normal
|
{
"blob_id": "01a6283d2331590082cdf1d409ecdb6f93459882",
"index": 4861,
"step-1": "<mask token>\n\n\nclass CropRecord(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CropRecord(db.Model):\n year = db.DateProperty(auto_now_add=True)\n crops = db.ListProperty(db.Key)\n notes = db.StringProperty()\n\n @property\n def plot(self):\n Plot.gql('Where croprecord=:1', self.key())\n\n def create(self, year):\n self.year = year\n\n def addCrop(self, crop):\n if addByKey(crop, self.crops):\n self.put()\n",
"step-3": "<mask token>\n\n\nclass SoilRecord(db.Model):\n year = db.DateProperty(auto_now_add=True)\n stats = NutrientProfile()\n amendments = db.StringProperty()\n notes = db.StringProperty()\n\n @property\n def plot(self):\n Plot.gql('Where soilrecord=:1', self.key())\n\n def create(self, year):\n self.year = year\n\n\nclass CropRecord(db.Model):\n year = db.DateProperty(auto_now_add=True)\n crops = db.ListProperty(db.Key)\n notes = db.StringProperty()\n\n @property\n def plot(self):\n Plot.gql('Where croprecord=:1', self.key())\n\n def create(self, year):\n self.year = year\n\n def addCrop(self, crop):\n if addByKey(crop, self.crops):\n self.put()\n",
"step-4": "import cgi\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext import db\nfrom models.nutrient import *\n\n\nclass SoilRecord(db.Model):\n year = db.DateProperty(auto_now_add=True)\n stats = NutrientProfile()\n amendments = db.StringProperty()\n notes = db.StringProperty()\n\n @property\n def plot(self):\n Plot.gql('Where soilrecord=:1', self.key())\n\n def create(self, year):\n self.year = year\n\n\nclass CropRecord(db.Model):\n year = db.DateProperty(auto_now_add=True)\n crops = db.ListProperty(db.Key)\n notes = db.StringProperty()\n\n @property\n def plot(self):\n Plot.gql('Where croprecord=:1', self.key())\n\n def create(self, year):\n self.year = year\n\n def addCrop(self, crop):\n if addByKey(crop, self.crops):\n self.put()\n",
"step-5": "import cgi\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext import db\nfrom models.nutrient import *\nclass SoilRecord(db.Model):\n year=db.DateProperty(auto_now_add=True)\n stats=NutrientProfile()\n amendments=db.StringProperty()\n notes=db.StringProperty()\n\n @property\n def plot(self):\n Plot.gql(\"Where soilrecord=:1\",self.key())\n\n def create(self, year):\n self.year=year\n\nclass CropRecord(db.Model):\n year=db.DateProperty(auto_now_add=True)\n crops=db.ListProperty(db.Key)\n notes=db.StringProperty()\n\n @property\n def plot(self):\n Plot.gql(\"Where croprecord=:1\",self.key())\n\n\n def create(self, year):\n self.year=year\n\n def addCrop(self, crop):\n if addByKey(crop, self.crops):\n self.put()\n\n",
"step-ids": [
1,
5,
9,
10,
11
]
}
|
[
1,
5,
9,
10,
11
] |
#https://www.hackerrank.com/challenges/caesar-cipher-1/problem
n=int(input())
stringy=input()
k=int(input())
s=""
for i in stringy:
if ord(i)>=65 and ord(i)<=90:
temp=(ord(i)+k-65)%26
s+=chr(temp+65)
elif ord(i)>=97 and ord(i)<=122:
temp=(ord(i)+k-97)%26
s+=chr(temp+97)
else:
s+=i
print(s)
|
normal
|
{
"blob_id": "acf787885834961a71fb2655b9d8a1eb026942c7",
"index": 4089,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in stringy:\n if ord(i) >= 65 and ord(i) <= 90:\n temp = (ord(i) + k - 65) % 26\n s += chr(temp + 65)\n elif ord(i) >= 97 and ord(i) <= 122:\n temp = (ord(i) + k - 97) % 26\n s += chr(temp + 97)\n else:\n s += i\nprint(s)\n",
"step-3": "n = int(input())\nstringy = input()\nk = int(input())\ns = ''\nfor i in stringy:\n if ord(i) >= 65 and ord(i) <= 90:\n temp = (ord(i) + k - 65) % 26\n s += chr(temp + 65)\n elif ord(i) >= 97 and ord(i) <= 122:\n temp = (ord(i) + k - 97) % 26\n s += chr(temp + 97)\n else:\n s += i\nprint(s)\n",
"step-4": "#https://www.hackerrank.com/challenges/caesar-cipher-1/problem\n\nn=int(input())\nstringy=input()\nk=int(input())\ns=\"\"\nfor i in stringy:\n if ord(i)>=65 and ord(i)<=90:\n temp=(ord(i)+k-65)%26\n s+=chr(temp+65)\n elif ord(i)>=97 and ord(i)<=122:\n temp=(ord(i)+k-97)%26\n s+=chr(temp+97)\n else:\n s+=i\nprint(s)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import json
import requests
from requests.auth import HTTPBasicAuth
if __name__ == "__main__":
auth = HTTPBasicAuth('cisco', 'cisco')
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
url = "https://asav/api/interfaces/physical/GigabitEthernet0_API_SLASH_0"
body = {
"kind": "object#GigabitInterface",
"interfaceDesc": "Configured by Python"
}
requests.packages.urllib3.disable_warnings()
response = requests.patch(url, data=json.dumps(body), auth=auth, headers=headers, verify=False)
|
normal
|
{
"blob_id": "6801d68ebcc6ff52d9be92efeeb8727997a14bbd",
"index": 523,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n auth = HTTPBasicAuth('cisco', 'cisco')\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'\n }\n url = 'https://asav/api/interfaces/physical/GigabitEthernet0_API_SLASH_0'\n body = {'kind': 'object#GigabitInterface', 'interfaceDesc':\n 'Configured by Python'}\n requests.packages.urllib3.disable_warnings()\n response = requests.patch(url, data=json.dumps(body), auth=auth,\n headers=headers, verify=False)\n",
"step-3": "import json\nimport requests\nfrom requests.auth import HTTPBasicAuth\nif __name__ == '__main__':\n auth = HTTPBasicAuth('cisco', 'cisco')\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'\n }\n url = 'https://asav/api/interfaces/physical/GigabitEthernet0_API_SLASH_0'\n body = {'kind': 'object#GigabitInterface', 'interfaceDesc':\n 'Configured by Python'}\n requests.packages.urllib3.disable_warnings()\n response = requests.patch(url, data=json.dumps(body), auth=auth,\n headers=headers, verify=False)\n",
"step-4": "#!/usr/bin/env python\n\nimport json\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\nif __name__ == \"__main__\":\n\n auth = HTTPBasicAuth('cisco', 'cisco')\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json'\n }\n\n url = \"https://asav/api/interfaces/physical/GigabitEthernet0_API_SLASH_0\"\n\n body = {\n \"kind\": \"object#GigabitInterface\",\n \"interfaceDesc\": \"Configured by Python\"\n }\n\n requests.packages.urllib3.disable_warnings()\n response = requests.patch(url, data=json.dumps(body), auth=auth, headers=headers, verify=False)\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
TABLE_NAME = 'active_module'
|
flexible
|
{
"blob_id": "ff3962d875da8e3f9e6c3178b1a8191ebb8a7b60",
"index": 3639,
"step-1": "<mask token>\n",
"step-2": "TABLE_NAME = 'active_module'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import json
import argparse
import sys
import os
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-sd","--startdate", help="Date to start scheduling trials, format is MM/DD.", required=True)
ap.add_argument("-r", "--round",help="A number.", required=True)
ap.add_argument("-hs", "--hsched", help="Which high schedule to use (e.g. H1, H2, H3)", required=True)
ap.add_argument("-ls", "--lsched", help="Which low schedule to use (e.g. H1, H2, H3)", required=True)
ap.add_argument("-h1", "--hfish1", help="1st Fish that will be assigned H schedule", required=True)
ap.add_argument("-h2", "--hfish2", help="2nd Fish that will be assigned H schedule", required=True)
ap.add_argument("-h3", "--hfish3", help="3rd Fish that will be assigned H schedule", required=True)
ap.add_argument("-l1", "--lfish1", help="1st Fish that will be assigned L schedule", required=True)
ap.add_argument("-l2", "--lfish2", help="2nd Fish that will be assigned L schedule", required=True)
ap.add_argument("-l3", "--lfish3", help="3rd Fish that will be assigned L schedule", required=True)
args = vars(ap.parse_args())
a_dict = {"startDate": args["startdate"], "round": args["round"], "h_schedule": args["hsched"], "l_schedule": args["lsched"], "mapping": {"H": { "fish1" : args["hfish1"], "fish2": args["hfish2"], "fish3": args["hfish3"]}, "L": { "fish1" : args["lfish1"], "fish2": args["lfish2"], "fish3": args["lfish3"]}}}
#print a_dict
os.remove('top.json')
with open('top.json', 'w') as f:
json.dump(a_dict, f, sort_keys=True, indent=4, separators=(',', ': '))
sys.exit(0)
|
normal
|
{
"blob_id": "e4767d8a4991a1180cc185c4c2d77104d63f9c7a",
"index": 6858,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n ap = argparse.ArgumentParser()\n ap.add_argument('-sd', '--startdate', help=\n 'Date to start scheduling trials, format is MM/DD.', required=True)\n ap.add_argument('-r', '--round', help='A number.', required=True)\n ap.add_argument('-hs', '--hsched', help=\n 'Which high schedule to use (e.g. H1, H2, H3)', required=True)\n ap.add_argument('-ls', '--lsched', help=\n 'Which low schedule to use (e.g. H1, H2, H3)', required=True)\n ap.add_argument('-h1', '--hfish1', help=\n '1st Fish that will be assigned H schedule', required=True)\n ap.add_argument('-h2', '--hfish2', help=\n '2nd Fish that will be assigned H schedule', required=True)\n ap.add_argument('-h3', '--hfish3', help=\n '3rd Fish that will be assigned H schedule', required=True)\n ap.add_argument('-l1', '--lfish1', help=\n '1st Fish that will be assigned L schedule', required=True)\n ap.add_argument('-l2', '--lfish2', help=\n '2nd Fish that will be assigned L schedule', required=True)\n ap.add_argument('-l3', '--lfish3', help=\n '3rd Fish that will be assigned L schedule', required=True)\n args = vars(ap.parse_args())\n a_dict = {'startDate': args['startdate'], 'round': args['round'],\n 'h_schedule': args['hsched'], 'l_schedule': args['lsched'],\n 'mapping': {'H': {'fish1': args['hfish1'], 'fish2': args['hfish2'],\n 'fish3': args['hfish3']}, 'L': {'fish1': args['lfish1'], 'fish2':\n args['lfish2'], 'fish3': args['lfish3']}}}\n os.remove('top.json')\n with open('top.json', 'w') as f:\n json.dump(a_dict, f, sort_keys=True, indent=4, separators=(',', ': '))\n sys.exit(0)\n",
"step-3": "import json\nimport argparse\nimport sys\nimport os\nif __name__ == '__main__':\n ap = argparse.ArgumentParser()\n ap.add_argument('-sd', '--startdate', help=\n 'Date to start scheduling trials, format is MM/DD.', required=True)\n ap.add_argument('-r', '--round', help='A number.', required=True)\n ap.add_argument('-hs', '--hsched', help=\n 'Which high schedule to use (e.g. H1, H2, H3)', required=True)\n ap.add_argument('-ls', '--lsched', help=\n 'Which low schedule to use (e.g. H1, H2, H3)', required=True)\n ap.add_argument('-h1', '--hfish1', help=\n '1st Fish that will be assigned H schedule', required=True)\n ap.add_argument('-h2', '--hfish2', help=\n '2nd Fish that will be assigned H schedule', required=True)\n ap.add_argument('-h3', '--hfish3', help=\n '3rd Fish that will be assigned H schedule', required=True)\n ap.add_argument('-l1', '--lfish1', help=\n '1st Fish that will be assigned L schedule', required=True)\n ap.add_argument('-l2', '--lfish2', help=\n '2nd Fish that will be assigned L schedule', required=True)\n ap.add_argument('-l3', '--lfish3', help=\n '3rd Fish that will be assigned L schedule', required=True)\n args = vars(ap.parse_args())\n a_dict = {'startDate': args['startdate'], 'round': args['round'],\n 'h_schedule': args['hsched'], 'l_schedule': args['lsched'],\n 'mapping': {'H': {'fish1': args['hfish1'], 'fish2': args['hfish2'],\n 'fish3': args['hfish3']}, 'L': {'fish1': args['lfish1'], 'fish2':\n args['lfish2'], 'fish3': args['lfish3']}}}\n os.remove('top.json')\n with open('top.json', 'w') as f:\n json.dump(a_dict, f, sort_keys=True, indent=4, separators=(',', ': '))\n sys.exit(0)\n",
"step-4": "import json\nimport argparse\nimport sys\nimport os\n\nif __name__ == '__main__':\n\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-sd\",\"--startdate\", help=\"Date to start scheduling trials, format is MM/DD.\", required=True)\n ap.add_argument(\"-r\", \"--round\",help=\"A number.\", required=True)\n ap.add_argument(\"-hs\", \"--hsched\", help=\"Which high schedule to use (e.g. H1, H2, H3)\", required=True)\n ap.add_argument(\"-ls\", \"--lsched\", help=\"Which low schedule to use (e.g. H1, H2, H3)\", required=True)\n ap.add_argument(\"-h1\", \"--hfish1\", help=\"1st Fish that will be assigned H schedule\", required=True)\n ap.add_argument(\"-h2\", \"--hfish2\", help=\"2nd Fish that will be assigned H schedule\", required=True)\n ap.add_argument(\"-h3\", \"--hfish3\", help=\"3rd Fish that will be assigned H schedule\", required=True)\n ap.add_argument(\"-l1\", \"--lfish1\", help=\"1st Fish that will be assigned L schedule\", required=True)\n ap.add_argument(\"-l2\", \"--lfish2\", help=\"2nd Fish that will be assigned L schedule\", required=True)\n ap.add_argument(\"-l3\", \"--lfish3\", help=\"3rd Fish that will be assigned L schedule\", required=True)\n\n args = vars(ap.parse_args())\n\n a_dict = {\"startDate\": args[\"startdate\"], \"round\": args[\"round\"], \"h_schedule\": args[\"hsched\"], \"l_schedule\": args[\"lsched\"], \"mapping\": {\"H\": { \"fish1\" : args[\"hfish1\"], \"fish2\": args[\"hfish2\"], \"fish3\": args[\"hfish3\"]}, \"L\": { \"fish1\" : args[\"lfish1\"], \"fish2\": args[\"lfish2\"], \"fish3\": args[\"lfish3\"]}}}\n\n #print a_dict\n\n os.remove('top.json')\n\n with open('top.json', 'w') as f:\n json.dump(a_dict, f, sort_keys=True, indent=4, separators=(',', ': '))\n\n sys.exit(0)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Link(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Link(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.text
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Link(models.Model):
text = models.CharField(max_length=100)
link = models.URLField()
def __str__(self):
return self.text
<|reserved_special_token_1|>
from django.db import models
class Link(models.Model):
text = models.CharField(max_length=100)
link = models.URLField()
def __str__(self):
return self.text
|
flexible
|
{
"blob_id": "61a58b934c6663e87824e4f9f9ffd92c3236947c",
"index": 7930,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Link(models.Model):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Link(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.text\n",
"step-4": "<mask token>\n\n\nclass Link(models.Model):\n text = models.CharField(max_length=100)\n link = models.URLField()\n\n def __str__(self):\n return self.text\n",
"step-5": "from django.db import models\n\n\nclass Link(models.Model):\n text = models.CharField(max_length=100)\n link = models.URLField()\n\n def __str__(self):\n return self.text\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
config = {
'name': 'beziers',
'author': 'Simon Cozens',
'author_email': 'simon@simon-cozens.org',
'url': 'https://github.com/simoncozens/beziers.py',
'description': 'Bezier curve manipulation library',
'long_description': open('README.rst', 'r').read(),
'license': 'MIT',
'version': '0.5.0',
'install_requires': [
'pyclipper'
],
'classifiers': [
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta"
],
'packages': find_packages(),
}
if __name__ == '__main__':
setup(**config)
|
normal
|
{
"blob_id": "98ddf0be2c38cd9b10dfa9cc09f53907b34c1287",
"index": 7728,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n setup(**config)\n",
"step-3": "<mask token>\nconfig = {'name': 'beziers', 'author': 'Simon Cozens', 'author_email':\n 'simon@simon-cozens.org', 'url':\n 'https://github.com/simoncozens/beziers.py', 'description':\n 'Bezier curve manipulation library', 'long_description': open(\n 'README.rst', 'r').read(), 'license': 'MIT', 'version': '0.5.0',\n 'install_requires': ['pyclipper'], 'classifiers': [\n 'Programming Language :: Python', 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 4 - Beta'], 'packages': find_packages()}\nif __name__ == '__main__':\n setup(**config)\n",
"step-4": "from setuptools import setup, find_packages\nconfig = {'name': 'beziers', 'author': 'Simon Cozens', 'author_email':\n 'simon@simon-cozens.org', 'url':\n 'https://github.com/simoncozens/beziers.py', 'description':\n 'Bezier curve manipulation library', 'long_description': open(\n 'README.rst', 'r').read(), 'license': 'MIT', 'version': '0.5.0',\n 'install_requires': ['pyclipper'], 'classifiers': [\n 'Programming Language :: Python', 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 4 - Beta'], 'packages': find_packages()}\nif __name__ == '__main__':\n setup(**config)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\n\nconfig = {\n 'name': 'beziers',\n 'author': 'Simon Cozens',\n 'author_email': 'simon@simon-cozens.org',\n 'url': 'https://github.com/simoncozens/beziers.py',\n 'description': 'Bezier curve manipulation library',\n 'long_description': open('README.rst', 'r').read(),\n 'license': 'MIT',\n 'version': '0.5.0',\n 'install_requires': [\n 'pyclipper'\n ],\n 'classifiers': [\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 4 - Beta\"\n\n ],\n 'packages': find_packages(),\n}\n\nif __name__ == '__main__':\n setup(**config)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def emphasize(sentence):
words = sentence.split(' ')
for i, word in enumerate(words):
words[i] = word[0].upper() + word[1:].lower()
return ' '.join(words)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def emphasize(sentence):
words = sentence.split(' ')
for i, word in enumerate(words):
words[i] = word[0].upper() + word[1:].lower()
return ' '.join(words)
<|reserved_special_token_0|>
assert ans1 == exp1, f'expected {exp1}, got {ans1}'
<|reserved_special_token_0|>
assert ans2 == exp2, f'expected {exp2}, got {ans2}'
<|reserved_special_token_0|>
assert ans3 == exp3, f'expected {exp3}, got {ans3}'
print('everything okay')
<|reserved_special_token_1|>
def emphasize(sentence):
words = sentence.split(' ')
for i, word in enumerate(words):
words[i] = word[0].upper() + word[1:].lower()
return ' '.join(words)
exp1 = 'Hello World'
ans1 = emphasize('hello world')
assert ans1 == exp1, f'expected {exp1}, got {ans1}'
exp2 = 'Good Morning'
ans2 = emphasize('GOOD MORNING')
assert ans2 == exp2, f'expected {exp2}, got {ans2}'
exp3 = '99 Red Balloons!'
ans3 = emphasize('99 red balloons!')
assert ans3 == exp3, f'expected {exp3}, got {ans3}'
print('everything okay')
<|reserved_special_token_1|>
def emphasize(sentence):
words = sentence.split(" ")
for i, word in enumerate(words):
words[i] = word[0].upper() + word[1:].lower()
return " ".join(words)
exp1 = "Hello World"
ans1 = emphasize("hello world")
assert ans1 == exp1, f"expected {exp1}, got {ans1}"
exp2 = "Good Morning"
ans2 = emphasize("GOOD MORNING")
assert ans2 == exp2, f"expected {exp2}, got {ans2}"
exp3 = "99 Red Balloons!"
ans3 = emphasize("99 red balloons!")
assert ans3 == exp3, f"expected {exp3}, got {ans3}"
print("everything okay")
|
flexible
|
{
"blob_id": "518dcdca8f5e6b42624083e4327143dfba59b2ba",
"index": 9785,
"step-1": "<mask token>\n",
"step-2": "def emphasize(sentence):\n words = sentence.split(' ')\n for i, word in enumerate(words):\n words[i] = word[0].upper() + word[1:].lower()\n return ' '.join(words)\n\n\n<mask token>\n",
"step-3": "def emphasize(sentence):\n words = sentence.split(' ')\n for i, word in enumerate(words):\n words[i] = word[0].upper() + word[1:].lower()\n return ' '.join(words)\n\n\n<mask token>\nassert ans1 == exp1, f'expected {exp1}, got {ans1}'\n<mask token>\nassert ans2 == exp2, f'expected {exp2}, got {ans2}'\n<mask token>\nassert ans3 == exp3, f'expected {exp3}, got {ans3}'\nprint('everything okay')\n",
"step-4": "def emphasize(sentence):\n words = sentence.split(' ')\n for i, word in enumerate(words):\n words[i] = word[0].upper() + word[1:].lower()\n return ' '.join(words)\n\n\nexp1 = 'Hello World'\nans1 = emphasize('hello world')\nassert ans1 == exp1, f'expected {exp1}, got {ans1}'\nexp2 = 'Good Morning'\nans2 = emphasize('GOOD MORNING')\nassert ans2 == exp2, f'expected {exp2}, got {ans2}'\nexp3 = '99 Red Balloons!'\nans3 = emphasize('99 red balloons!')\nassert ans3 == exp3, f'expected {exp3}, got {ans3}'\nprint('everything okay')\n",
"step-5": "def emphasize(sentence):\n words = sentence.split(\" \")\n for i, word in enumerate(words):\n words[i] = word[0].upper() + word[1:].lower()\n return \" \".join(words)\n\n\nexp1 = \"Hello World\"\nans1 = emphasize(\"hello world\")\nassert ans1 == exp1, f\"expected {exp1}, got {ans1}\"\n\nexp2 = \"Good Morning\"\nans2 = emphasize(\"GOOD MORNING\")\nassert ans2 == exp2, f\"expected {exp2}, got {ans2}\"\n\nexp3 = \"99 Red Balloons!\"\nans3 = emphasize(\"99 red balloons!\")\nassert ans3 == exp3, f\"expected {exp3}, got {ans3}\"\n\nprint(\"everything okay\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import torch
import argparse
from DialogGenerator import DialogGenerator
from DialogDataset import DialogDataset
from DialogDiscriminator import DialogDiscriminator
from transformers import GPT2Tokenizer
import os
def prep_folder(args):
""" Append to slash to filepath if needed, and generate folder if it doesn't exist"""
if(args.save_folder[-1]!='/'):
args.save_folder += '/'
if(not os.path.isdir(args.save_folder)):
os.mkdir(args.save_folder)
if(__name__=="__main__"):
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=3, dest="epochs", help='Number of epochs to run')
parser.add_argument('--batch-size', type=int, default=50, dest="batch_size", help='Batch size')
parser.add_argument('--max-out-length', type=int, default=128, dest="max_out_length", help='Maximum output length (outputs truncated if longer)')
parser.add_argument('--adversarial-model', type=str, default=None, dest="adv_model", help='Type of adversarial model to use. Will use traditional teacher forcing if None.')
parser.add_argument('--train-disc-only-steps', type=int, default=0, dest="train_disc_only_steps", help='Number of steps for which to train discriminator only (without updating generator)')
parser.add_argument('--gen_weight_decay', type=float, default=0, dest="gen_weight_decay", help='Weight decay for the generator\'s training scheduler')
parser.add_argument('--gen_lr', type=float, default=2e-5, dest="gen_lr", help='Learning rate for generator')
parser.add_argument('--gen_epsilon', type=float, default=1e-8, dest="gen_epsilon", help='Epsilon parameter for generator optimizer')
parser.add_argument('--gen_warmup_steps', type=int, default=0, dest="gen_warmup_steps", help='Number of warmup steps for training generator')
parser.add_argument('--disc_weight_decay', type=float, default=0, dest="disc_weight_decay", help='Weight decay for the discriminator\'s training scheduler')
parser.add_argument('--disc_lr', type=float, default=2e-5, dest="disc_lr", help='Learning rate for discriminator')
parser.add_argument('--disc_epsilon', type=float, default=1e-8, dest="disc_epsilon", help='Epsilon parameter for discriminator optimizer')
parser.add_argument('--disc_warmup_steps', type=int, default=0, dest="disc_warmup_steps", help='Number of warmup steps for training discriminator')
parser.add_argument('--train-data-path', type=str, dest="train_data_path", help="Filepath to preprocessed data")
parser.add_argument('--save-folder', type=str, dest="save_folder", help="Filepath to folder where checkpoints should be saved")
parser.add_argument('--pretrained-gen', type=str, default=None, dest="pretrained_gen", help="Filepath to trained generator. If None, will instantiate a default pretrained generator.")
parser.add_argument('--pretrained-disc', type=str, default=None, dest="pretrained_disc", help="Filepath to trained discriminator. If None, will instantiate a default pretrained discriminator of type specified by --adversarial-model option.")
args = parser.parse_args()
assert args.train_data_path is not None
assert args.save_folder is not None
prep_folder(args)
eos_token_id = GPT2Tokenizer.from_pretrained("gpt2").eos_token_id
train_dataset = DialogDataset(args.train_data_path, eos_token_id)
train_loader = train_dataset.get_loader(args.batch_size, shuffle=True)
gen_opt_params = {"weight_decay": args.gen_weight_decay,
"lr": args.gen_lr,
"warmup_steps": args.gen_warmup_steps,
"epsilon": args.gen_epsilon,
"total_steps": int(len(train_dataset) / args.batch_size) * args.epochs }
generator = DialogGenerator(args.pretrained_gen, args.save_folder, gen_opt_params)
if(args.adv_model is not None):
disc_opt_params = {"weight_decay": args.disc_weight_decay,
"lr": args.disc_lr,
"warmup_steps": args.disc_warmup_steps,
"epsilon": args.disc_epsilon,
"total_steps": int(len(train_dataset) / args.batch_size) * args.epochs }
discriminator = DialogDiscriminator(args.adv_model, args.pretrained_disc, args.save_folder, disc_opt_params)
generator.train_adversarial(train_loader, args.epochs, args.max_out_length, discriminator, args.train_disc_only_steps)
else:
generator.train_traditional(train_loader, args.epochs, args.max_out_length)
|
normal
|
{
"blob_id": "18be97061c65185fcebf10c628e0e51bb08522cf",
"index": 3609,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef prep_folder(args):\n \"\"\" Append to slash to filepath if needed, and generate folder if it doesn't exist\"\"\"\n if args.save_folder[-1] != '/':\n args.save_folder += '/'\n if not os.path.isdir(args.save_folder):\n os.mkdir(args.save_folder)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef prep_folder(args):\n \"\"\" Append to slash to filepath if needed, and generate folder if it doesn't exist\"\"\"\n if args.save_folder[-1] != '/':\n args.save_folder += '/'\n if not os.path.isdir(args.save_folder):\n os.mkdir(args.save_folder)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int, default=3, dest='epochs',\n help='Number of epochs to run')\n parser.add_argument('--batch-size', type=int, default=50, dest=\n 'batch_size', help='Batch size')\n parser.add_argument('--max-out-length', type=int, default=128, dest=\n 'max_out_length', help=\n 'Maximum output length (outputs truncated if longer)')\n parser.add_argument('--adversarial-model', type=str, default=None, dest\n ='adv_model', help=\n 'Type of adversarial model to use. Will use traditional teacher forcing if None.'\n )\n parser.add_argument('--train-disc-only-steps', type=int, default=0,\n dest='train_disc_only_steps', help=\n 'Number of steps for which to train discriminator only (without updating generator)'\n )\n parser.add_argument('--gen_weight_decay', type=float, default=0, dest=\n 'gen_weight_decay', help=\n \"Weight decay for the generator's training scheduler\")\n parser.add_argument('--gen_lr', type=float, default=2e-05, dest=\n 'gen_lr', help='Learning rate for generator')\n parser.add_argument('--gen_epsilon', type=float, default=1e-08, dest=\n 'gen_epsilon', help='Epsilon parameter for generator optimizer')\n parser.add_argument('--gen_warmup_steps', type=int, default=0, dest=\n 'gen_warmup_steps', help=\n 'Number of warmup steps for training generator')\n parser.add_argument('--disc_weight_decay', type=float, default=0, dest=\n 'disc_weight_decay', help=\n \"Weight decay for the discriminator's training scheduler\")\n parser.add_argument('--disc_lr', type=float, default=2e-05, dest=\n 'disc_lr', help='Learning rate for discriminator')\n parser.add_argument('--disc_epsilon', type=float, default=1e-08, dest=\n 'disc_epsilon', help='Epsilon parameter for discriminator optimizer')\n parser.add_argument('--disc_warmup_steps', type=int, default=0, dest=\n 'disc_warmup_steps', help=\n 'Number of warmup steps for training discriminator')\n parser.add_argument('--train-data-path', type=str, dest=\n 'train_data_path', help='Filepath to preprocessed data')\n parser.add_argument('--save-folder', type=str, dest='save_folder', help\n ='Filepath to folder where checkpoints should be saved')\n parser.add_argument('--pretrained-gen', type=str, default=None, dest=\n 'pretrained_gen', help=\n 'Filepath to trained generator. If None, will instantiate a default pretrained generator.'\n )\n parser.add_argument('--pretrained-disc', type=str, default=None, dest=\n 'pretrained_disc', help=\n 'Filepath to trained discriminator. If None, will instantiate a default pretrained discriminator of type specified by --adversarial-model option.'\n )\n args = parser.parse_args()\n assert args.train_data_path is not None\n assert args.save_folder is not None\n prep_folder(args)\n eos_token_id = GPT2Tokenizer.from_pretrained('gpt2').eos_token_id\n train_dataset = DialogDataset(args.train_data_path, eos_token_id)\n train_loader = train_dataset.get_loader(args.batch_size, shuffle=True)\n gen_opt_params = {'weight_decay': args.gen_weight_decay, 'lr': args.\n gen_lr, 'warmup_steps': args.gen_warmup_steps, 'epsilon': args.\n gen_epsilon, 'total_steps': int(len(train_dataset) / args.\n batch_size) * args.epochs}\n generator = DialogGenerator(args.pretrained_gen, args.save_folder,\n gen_opt_params)\n if args.adv_model is not None:\n disc_opt_params = {'weight_decay': args.disc_weight_decay, 'lr':\n args.disc_lr, 'warmup_steps': args.disc_warmup_steps, 'epsilon':\n args.disc_epsilon, 'total_steps': int(len(train_dataset) / args\n .batch_size) * args.epochs}\n discriminator = DialogDiscriminator(args.adv_model, args.\n pretrained_disc, args.save_folder, disc_opt_params)\n generator.train_adversarial(train_loader, args.epochs, args.\n max_out_length, discriminator, args.train_disc_only_steps)\n else:\n generator.train_traditional(train_loader, args.epochs, args.\n max_out_length)\n",
"step-4": "import torch\nimport argparse\nfrom DialogGenerator import DialogGenerator\nfrom DialogDataset import DialogDataset\nfrom DialogDiscriminator import DialogDiscriminator\nfrom transformers import GPT2Tokenizer\nimport os\n\n\ndef prep_folder(args):\n \"\"\" Append to slash to filepath if needed, and generate folder if it doesn't exist\"\"\"\n if args.save_folder[-1] != '/':\n args.save_folder += '/'\n if not os.path.isdir(args.save_folder):\n os.mkdir(args.save_folder)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int, default=3, dest='epochs',\n help='Number of epochs to run')\n parser.add_argument('--batch-size', type=int, default=50, dest=\n 'batch_size', help='Batch size')\n parser.add_argument('--max-out-length', type=int, default=128, dest=\n 'max_out_length', help=\n 'Maximum output length (outputs truncated if longer)')\n parser.add_argument('--adversarial-model', type=str, default=None, dest\n ='adv_model', help=\n 'Type of adversarial model to use. Will use traditional teacher forcing if None.'\n )\n parser.add_argument('--train-disc-only-steps', type=int, default=0,\n dest='train_disc_only_steps', help=\n 'Number of steps for which to train discriminator only (without updating generator)'\n )\n parser.add_argument('--gen_weight_decay', type=float, default=0, dest=\n 'gen_weight_decay', help=\n \"Weight decay for the generator's training scheduler\")\n parser.add_argument('--gen_lr', type=float, default=2e-05, dest=\n 'gen_lr', help='Learning rate for generator')\n parser.add_argument('--gen_epsilon', type=float, default=1e-08, dest=\n 'gen_epsilon', help='Epsilon parameter for generator optimizer')\n parser.add_argument('--gen_warmup_steps', type=int, default=0, dest=\n 'gen_warmup_steps', help=\n 'Number of warmup steps for training generator')\n parser.add_argument('--disc_weight_decay', type=float, default=0, dest=\n 'disc_weight_decay', help=\n \"Weight decay for the discriminator's training scheduler\")\n parser.add_argument('--disc_lr', type=float, default=2e-05, dest=\n 'disc_lr', help='Learning rate for discriminator')\n parser.add_argument('--disc_epsilon', type=float, default=1e-08, dest=\n 'disc_epsilon', help='Epsilon parameter for discriminator optimizer')\n parser.add_argument('--disc_warmup_steps', type=int, default=0, dest=\n 'disc_warmup_steps', help=\n 'Number of warmup steps for training discriminator')\n parser.add_argument('--train-data-path', type=str, dest=\n 'train_data_path', help='Filepath to preprocessed data')\n parser.add_argument('--save-folder', type=str, dest='save_folder', help\n ='Filepath to folder where checkpoints should be saved')\n parser.add_argument('--pretrained-gen', type=str, default=None, dest=\n 'pretrained_gen', help=\n 'Filepath to trained generator. If None, will instantiate a default pretrained generator.'\n )\n parser.add_argument('--pretrained-disc', type=str, default=None, dest=\n 'pretrained_disc', help=\n 'Filepath to trained discriminator. If None, will instantiate a default pretrained discriminator of type specified by --adversarial-model option.'\n )\n args = parser.parse_args()\n assert args.train_data_path is not None\n assert args.save_folder is not None\n prep_folder(args)\n eos_token_id = GPT2Tokenizer.from_pretrained('gpt2').eos_token_id\n train_dataset = DialogDataset(args.train_data_path, eos_token_id)\n train_loader = train_dataset.get_loader(args.batch_size, shuffle=True)\n gen_opt_params = {'weight_decay': args.gen_weight_decay, 'lr': args.\n gen_lr, 'warmup_steps': args.gen_warmup_steps, 'epsilon': args.\n gen_epsilon, 'total_steps': int(len(train_dataset) / args.\n batch_size) * args.epochs}\n generator = DialogGenerator(args.pretrained_gen, args.save_folder,\n gen_opt_params)\n if args.adv_model is not None:\n disc_opt_params = {'weight_decay': args.disc_weight_decay, 'lr':\n args.disc_lr, 'warmup_steps': args.disc_warmup_steps, 'epsilon':\n args.disc_epsilon, 'total_steps': int(len(train_dataset) / args\n .batch_size) * args.epochs}\n discriminator = DialogDiscriminator(args.adv_model, args.\n pretrained_disc, args.save_folder, disc_opt_params)\n generator.train_adversarial(train_loader, args.epochs, args.\n max_out_length, discriminator, args.train_disc_only_steps)\n else:\n generator.train_traditional(train_loader, args.epochs, args.\n max_out_length)\n",
"step-5": "import torch\nimport argparse\nfrom DialogGenerator import DialogGenerator\nfrom DialogDataset import DialogDataset\nfrom DialogDiscriminator import DialogDiscriminator\nfrom transformers import GPT2Tokenizer\nimport os\n\ndef prep_folder(args):\n \"\"\" Append to slash to filepath if needed, and generate folder if it doesn't exist\"\"\"\n if(args.save_folder[-1]!='/'):\n args.save_folder += '/'\n if(not os.path.isdir(args.save_folder)):\n os.mkdir(args.save_folder)\n\nif(__name__==\"__main__\"):\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int, default=3, dest=\"epochs\", help='Number of epochs to run')\n parser.add_argument('--batch-size', type=int, default=50, dest=\"batch_size\", help='Batch size')\n parser.add_argument('--max-out-length', type=int, default=128, dest=\"max_out_length\", help='Maximum output length (outputs truncated if longer)')\n parser.add_argument('--adversarial-model', type=str, default=None, dest=\"adv_model\", help='Type of adversarial model to use. Will use traditional teacher forcing if None.')\n parser.add_argument('--train-disc-only-steps', type=int, default=0, dest=\"train_disc_only_steps\", help='Number of steps for which to train discriminator only (without updating generator)')\n\n parser.add_argument('--gen_weight_decay', type=float, default=0, dest=\"gen_weight_decay\", help='Weight decay for the generator\\'s training scheduler')\n parser.add_argument('--gen_lr', type=float, default=2e-5, dest=\"gen_lr\", help='Learning rate for generator')\n parser.add_argument('--gen_epsilon', type=float, default=1e-8, dest=\"gen_epsilon\", help='Epsilon parameter for generator optimizer')\n parser.add_argument('--gen_warmup_steps', type=int, default=0, dest=\"gen_warmup_steps\", help='Number of warmup steps for training generator')\n\n parser.add_argument('--disc_weight_decay', type=float, default=0, dest=\"disc_weight_decay\", help='Weight decay for the discriminator\\'s training scheduler')\n parser.add_argument('--disc_lr', type=float, default=2e-5, dest=\"disc_lr\", help='Learning rate for discriminator')\n parser.add_argument('--disc_epsilon', type=float, default=1e-8, dest=\"disc_epsilon\", help='Epsilon parameter for discriminator optimizer')\n parser.add_argument('--disc_warmup_steps', type=int, default=0, dest=\"disc_warmup_steps\", help='Number of warmup steps for training discriminator')\n\n parser.add_argument('--train-data-path', type=str, dest=\"train_data_path\", help=\"Filepath to preprocessed data\")\n parser.add_argument('--save-folder', type=str, dest=\"save_folder\", help=\"Filepath to folder where checkpoints should be saved\")\n parser.add_argument('--pretrained-gen', type=str, default=None, dest=\"pretrained_gen\", help=\"Filepath to trained generator. If None, will instantiate a default pretrained generator.\")\n parser.add_argument('--pretrained-disc', type=str, default=None, dest=\"pretrained_disc\", help=\"Filepath to trained discriminator. If None, will instantiate a default pretrained discriminator of type specified by --adversarial-model option.\")\n\n args = parser.parse_args()\n\n assert args.train_data_path is not None\n assert args.save_folder is not None\n\n prep_folder(args)\n \n eos_token_id = GPT2Tokenizer.from_pretrained(\"gpt2\").eos_token_id\n train_dataset = DialogDataset(args.train_data_path, eos_token_id)\n train_loader = train_dataset.get_loader(args.batch_size, shuffle=True)\n\n gen_opt_params = {\"weight_decay\": args.gen_weight_decay, \n \"lr\": args.gen_lr, \n \"warmup_steps\": args.gen_warmup_steps,\n \"epsilon\": args.gen_epsilon,\n \"total_steps\": int(len(train_dataset) / args.batch_size) * args.epochs }\n\n generator = DialogGenerator(args.pretrained_gen, args.save_folder, gen_opt_params)\n\n if(args.adv_model is not None):\n disc_opt_params = {\"weight_decay\": args.disc_weight_decay, \n \"lr\": args.disc_lr, \n \"warmup_steps\": args.disc_warmup_steps,\n \"epsilon\": args.disc_epsilon,\n \"total_steps\": int(len(train_dataset) / args.batch_size) * args.epochs }\n discriminator = DialogDiscriminator(args.adv_model, args.pretrained_disc, args.save_folder, disc_opt_params)\n \n generator.train_adversarial(train_loader, args.epochs, args.max_out_length, discriminator, args.train_disc_only_steps)\n else:\n generator.train_traditional(train_loader, args.epochs, args.max_out_length)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
""" sed_thermal.py
Author: Joshua Lande <joshualande@gmail.com>
"""
import numpy as np
from scipy import integrate
from . sed_integrate import logsimps
from . sed_spectrum import Spectrum
from . import sed_config
from . import units as u
class ThermalSpectrum(Spectrum):
vectorized = True
def __init__(self, energy_density, kT=None, T=None):
""" A thermal spectrum has the sameself):
spectral shape as the blackbody
spectrum but has an arbitrarily
normalizable energy density.
The thermal spectrum is
n(E) = 15*U/(pi*kT)^4*E^2/(exp(E/kT)-1)
where
* n(E) is the number of photons per unit energy per unit volume,
* U is the total energy per unit volume.
* kT is the temperature of the photons
This formula is equation 33 from Sturner et al 1997
http://iopscience.iop.org/0004-637X/490/2/619/pdf/35841.pdf
Input can be either 'kT' in energy units or
'T' in temperature units.
For example, in XXX et al, the infrared photon
field has temperature kT=3e-3 eV and energy
density U=0.9 eV/cm^3
>>> infrared=ThermalSpectrum(kT=3e-3*u.eV, energy_density=0.9*u.eV/u.cm**3)
To convince yourself that this code correctly normalized
the spectrum, you can explicity integrate E*dN/dE = total energy per unit volume:
>>> print u.repr(infrared.integrate(units=True,e_weight=1),'eV/cm^3','%.2f')
0.90 eV/cm^3
"""
if kT is not None: kT = kT
elif T is not None: kT = u.boltzmann*kwargs.pop('T')
else: raise Exception("kT or k must be passed to ThermalSpectrum")
self.kT = float(kT/u.erg)
# function is essentially 0 outside of this energy range.
self.emin=1e-4*self.kT
self.emax=1e2*self.kT
# equation 33 in Sturner et al 1997
# Note, prefactor*E^2/(exp(E/kT)-1) has units
# of photons/energy/volume, so prefactor has units
# of photons/energy^3/volume.
self.pref = 15*energy_density/(np.pi*kT)**4
self.pref = float(self.pref/(u.erg**-3*u.cm**-3))
@staticmethod
def occupation_number(x):
""" This is equation 1.49 in R&L. """
return 1/(np.exp(x)-1)
def _spectrum(self, energy):
""" Return the energy density in units of [1/erg/cm^-3]."""
return self.pref*energy**2*self.occupation_number(energy/self.kT)
@staticmethod
def units_string(): return '1/erg/cm^3'
def integrate(self, units=True, e_weight=0):
""" Integrate the thermal spectrum from emin to emax.
Returns the integral in units of [erg^e_weight/cm^-3] """
int = logsimps(lambda e: e**e_weight*self(e, units=False), self.emin, self.emax, sed_config.PER_DECADE)
return int*(u.erg**(e_weight+1)*self.units() if units else 1)
class BlackBody(ThermalSpectrum):
@staticmethod
def compute_energy_density(kT):
""" Comparing the formula for a blackbody spectrum
with prefactor
pref = 8pi/(hc)^3
to the fomrula for a general thermal spectrum:
pref = 15*U/(pi*kT)^4,
we find that for a blackbody spectrum,
we have a thermal spectrum with
U = (8*pi/(hc)^3)*(pi*kT)^4/15. """
h=u.planck
c=u.speed_of_light
pi=np.pi
return (8*pi/(h*c)**3)*((pi*kT)**4/15)
def __init__(self,kT=None,T=None):
""" Implement a blackbody spectrum.
The formula for the blackbody spectrum is
n(E)=((8pi)/(hc)^3)*E^2/(exp(E/kT)-1)
where
* n(E) is the number of photons per unit energy per unit volume,
* kT is the temperature of the photons
This formula is on the top of page 208 in R&L
"""
if kT is not None: kT = kT
elif T is not None: kT = u.boltzmann*T
else: raise Exception("kT or k must be passed to BlackBody")
energy_density=BlackBody.compute_energy_density(kT)
super(BlackBody,self).__init__(energy_density=energy_density, kT=kT)
class CMB(BlackBody):
""" The CMB is a blackbody spectrum with temperature 2.725K.
Note, the energy density for a CMB spectrum is 0.26 eV/cm^3:
>>> cmb = CMB()
>>> print u.repr(cmb.integrate(units=True,e_weight=1),'eV/cm^3','%.2f')
0.26 eV/cm^3
"""
def __init__(self): super(CMB,self).__init__(T=2.725*u.kelvin)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
normal
|
{
"blob_id": "8560c0068eff894e5aa1d0788bd9e5ad05c14997",
"index": 2262,
"step-1": "<mask token>\n\n\nclass ThermalSpectrum(Spectrum):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def units_string():\n return '1/erg/cm^3'\n\n def integrate(self, units=True, e_weight=0):\n \"\"\" Integrate the thermal spectrum from emin to emax.\n \n Returns the integral in units of [erg^e_weight/cm^-3] \"\"\"\n int = logsimps(lambda e: e ** e_weight * self(e, units=False), self\n .emin, self.emax, sed_config.PER_DECADE)\n return int * (u.erg ** (e_weight + 1) * self.units() if units else 1)\n\n\nclass BlackBody(ThermalSpectrum):\n\n @staticmethod\n def compute_energy_density(kT):\n \"\"\" Comparing the formula for a blackbody spectrum\n with prefactor \n\n pref = 8pi/(hc)^3\n\n to the fomrula for a general thermal spectrum:\n\n pref = 15*U/(pi*kT)^4,\n\n we find that for a blackbody spectrum,\n we have a thermal spectrum with\n\n U = (8*pi/(hc)^3)*(pi*kT)^4/15. \"\"\"\n h = u.planck\n c = u.speed_of_light\n pi = np.pi\n return 8 * pi / (h * c) ** 3 * ((pi * kT) ** 4 / 15)\n\n def __init__(self, kT=None, T=None):\n \"\"\" Implement a blackbody spectrum.\n\n The formula for the blackbody spectrum is \n \n n(E)=((8pi)/(hc)^3)*E^2/(exp(E/kT)-1)\n\n where \n * n(E) is the number of photons per unit energy per unit volume,\n * kT is the temperature of the photons\n\n This formula is on the top of page 208 in R&L\n \"\"\"\n if kT is not None:\n kT = kT\n elif T is not None:\n kT = u.boltzmann * T\n else:\n raise Exception('kT or k must be passed to BlackBody')\n energy_density = BlackBody.compute_energy_density(kT)\n super(BlackBody, self).__init__(energy_density=energy_density, kT=kT)\n\n\nclass CMB(BlackBody):\n \"\"\" The CMB is a blackbody spectrum with temperature 2.725K.\n\n Note, the energy density for a CMB spectrum is 0.26 eV/cm^3:\n \n >>> cmb = CMB()\n >>> print u.repr(cmb.integrate(units=True,e_weight=1),'eV/cm^3','%.2f')\n 0.26 eV/cm^3\n \"\"\"\n\n def __init__(self):\n super(CMB, self).__init__(T=2.725 * u.kelvin)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ThermalSpectrum(Spectrum):\n <mask token>\n\n def __init__(self, energy_density, kT=None, T=None):\n \"\"\" A thermal spectrum has the sameself):\n spectral shape as the blackbody\n spectrum but has an arbitrarily \n normalizable energy density.\n\n The thermal spectrum is\n\n n(E) = 15*U/(pi*kT)^4*E^2/(exp(E/kT)-1)\n\n where \n * n(E) is the number of photons per unit energy per unit volume,\n * U is the total energy per unit volume.\n * kT is the temperature of the photons\n\n This formula is equation 33 from Sturner et al 1997\n http://iopscience.iop.org/0004-637X/490/2/619/pdf/35841.pdf\n \n Input can be either 'kT' in energy units or\n 'T' in temperature units.\n\n For example, in XXX et al, the infrared photon\n field has temperature kT=3e-3 eV and energy\n density U=0.9 eV/cm^3\n\n >>> infrared=ThermalSpectrum(kT=3e-3*u.eV, energy_density=0.9*u.eV/u.cm**3)\n\n To convince yourself that this code correctly normalized\n the spectrum, you can explicity integrate E*dN/dE = total energy per unit volume:\n\n >>> print u.repr(infrared.integrate(units=True,e_weight=1),'eV/cm^3','%.2f')\n 0.90 eV/cm^3\n \"\"\"\n if kT is not None:\n kT = kT\n elif T is not None:\n kT = u.boltzmann * kwargs.pop('T')\n else:\n raise Exception('kT or k must be passed to ThermalSpectrum')\n self.kT = float(kT / u.erg)\n self.emin = 0.0001 * self.kT\n self.emax = 100.0 * self.kT\n self.pref = 15 * energy_density / (np.pi * kT) ** 4\n self.pref = float(self.pref / (u.erg ** -3 * u.cm ** -3))\n\n @staticmethod\n def occupation_number(x):\n \"\"\" This is equation 1.49 in R&L. \"\"\"\n return 1 / (np.exp(x) - 1)\n\n def _spectrum(self, energy):\n \"\"\" Return the energy density in units of [1/erg/cm^-3].\"\"\"\n return self.pref * energy ** 2 * self.occupation_number(energy /\n self.kT)\n\n @staticmethod\n def units_string():\n return '1/erg/cm^3'\n\n def integrate(self, units=True, e_weight=0):\n \"\"\" Integrate the thermal spectrum from emin to emax.\n \n Returns the integral in units of [erg^e_weight/cm^-3] \"\"\"\n int = logsimps(lambda e: e ** e_weight * self(e, units=False), self\n .emin, self.emax, sed_config.PER_DECADE)\n return int * (u.erg ** (e_weight + 1) * self.units() if units else 1)\n\n\nclass BlackBody(ThermalSpectrum):\n\n @staticmethod\n def compute_energy_density(kT):\n \"\"\" Comparing the formula for a blackbody spectrum\n with prefactor \n\n pref = 8pi/(hc)^3\n\n to the fomrula for a general thermal spectrum:\n\n pref = 15*U/(pi*kT)^4,\n\n we find that for a blackbody spectrum,\n we have a thermal spectrum with\n\n U = (8*pi/(hc)^3)*(pi*kT)^4/15. \"\"\"\n h = u.planck\n c = u.speed_of_light\n pi = np.pi\n return 8 * pi / (h * c) ** 3 * ((pi * kT) ** 4 / 15)\n\n def __init__(self, kT=None, T=None):\n \"\"\" Implement a blackbody spectrum.\n\n The formula for the blackbody spectrum is \n \n n(E)=((8pi)/(hc)^3)*E^2/(exp(E/kT)-1)\n\n where \n * n(E) is the number of photons per unit energy per unit volume,\n * kT is the temperature of the photons\n\n This formula is on the top of page 208 in R&L\n \"\"\"\n if kT is not None:\n kT = kT\n elif T is not None:\n kT = u.boltzmann * T\n else:\n raise Exception('kT or k must be passed to BlackBody')\n energy_density = BlackBody.compute_energy_density(kT)\n super(BlackBody, self).__init__(energy_density=energy_density, kT=kT)\n\n\nclass CMB(BlackBody):\n \"\"\" The CMB is a blackbody spectrum with temperature 2.725K.\n\n Note, the energy density for a CMB spectrum is 0.26 eV/cm^3:\n \n >>> cmb = CMB()\n >>> print u.repr(cmb.integrate(units=True,e_weight=1),'eV/cm^3','%.2f')\n 0.26 eV/cm^3\n \"\"\"\n\n def __init__(self):\n super(CMB, self).__init__(T=2.725 * u.kelvin)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ThermalSpectrum(Spectrum):\n vectorized = True\n\n def __init__(self, energy_density, kT=None, T=None):\n \"\"\" A thermal spectrum has the sameself):\n spectral shape as the blackbody\n spectrum but has an arbitrarily \n normalizable energy density.\n\n The thermal spectrum is\n\n n(E) = 15*U/(pi*kT)^4*E^2/(exp(E/kT)-1)\n\n where \n * n(E) is the number of photons per unit energy per unit volume,\n * U is the total energy per unit volume.\n * kT is the temperature of the photons\n\n This formula is equation 33 from Sturner et al 1997\n http://iopscience.iop.org/0004-637X/490/2/619/pdf/35841.pdf\n \n Input can be either 'kT' in energy units or\n 'T' in temperature units.\n\n For example, in XXX et al, the infrared photon\n field has temperature kT=3e-3 eV and energy\n density U=0.9 eV/cm^3\n\n >>> infrared=ThermalSpectrum(kT=3e-3*u.eV, energy_density=0.9*u.eV/u.cm**3)\n\n To convince yourself that this code correctly normalized\n the spectrum, you can explicity integrate E*dN/dE = total energy per unit volume:\n\n >>> print u.repr(infrared.integrate(units=True,e_weight=1),'eV/cm^3','%.2f')\n 0.90 eV/cm^3\n \"\"\"\n if kT is not None:\n kT = kT\n elif T is not None:\n kT = u.boltzmann * kwargs.pop('T')\n else:\n raise Exception('kT or k must be passed to ThermalSpectrum')\n self.kT = float(kT / u.erg)\n self.emin = 0.0001 * self.kT\n self.emax = 100.0 * self.kT\n self.pref = 15 * energy_density / (np.pi * kT) ** 4\n self.pref = float(self.pref / (u.erg ** -3 * u.cm ** -3))\n\n @staticmethod\n def occupation_number(x):\n \"\"\" This is equation 1.49 in R&L. \"\"\"\n return 1 / (np.exp(x) - 1)\n\n def _spectrum(self, energy):\n \"\"\" Return the energy density in units of [1/erg/cm^-3].\"\"\"\n return self.pref * energy ** 2 * self.occupation_number(energy /\n self.kT)\n\n @staticmethod\n def units_string():\n return '1/erg/cm^3'\n\n def integrate(self, units=True, e_weight=0):\n \"\"\" Integrate the thermal spectrum from emin to emax.\n \n Returns the integral in units of [erg^e_weight/cm^-3] \"\"\"\n int = logsimps(lambda e: e ** e_weight * self(e, units=False), self\n .emin, self.emax, sed_config.PER_DECADE)\n return int * (u.erg ** (e_weight + 1) * self.units() if units else 1)\n\n\nclass BlackBody(ThermalSpectrum):\n\n @staticmethod\n def compute_energy_density(kT):\n \"\"\" Comparing the formula for a blackbody spectrum\n with prefactor \n\n pref = 8pi/(hc)^3\n\n to the fomrula for a general thermal spectrum:\n\n pref = 15*U/(pi*kT)^4,\n\n we find that for a blackbody spectrum,\n we have a thermal spectrum with\n\n U = (8*pi/(hc)^3)*(pi*kT)^4/15. \"\"\"\n h = u.planck\n c = u.speed_of_light\n pi = np.pi\n return 8 * pi / (h * c) ** 3 * ((pi * kT) ** 4 / 15)\n\n def __init__(self, kT=None, T=None):\n \"\"\" Implement a blackbody spectrum.\n\n The formula for the blackbody spectrum is \n \n n(E)=((8pi)/(hc)^3)*E^2/(exp(E/kT)-1)\n\n where \n * n(E) is the number of photons per unit energy per unit volume,\n * kT is the temperature of the photons\n\n This formula is on the top of page 208 in R&L\n \"\"\"\n if kT is not None:\n kT = kT\n elif T is not None:\n kT = u.boltzmann * T\n else:\n raise Exception('kT or k must be passed to BlackBody')\n energy_density = BlackBody.compute_energy_density(kT)\n super(BlackBody, self).__init__(energy_density=energy_density, kT=kT)\n\n\nclass CMB(BlackBody):\n \"\"\" The CMB is a blackbody spectrum with temperature 2.725K.\n\n Note, the energy density for a CMB spectrum is 0.26 eV/cm^3:\n \n >>> cmb = CMB()\n >>> print u.repr(cmb.integrate(units=True,e_weight=1),'eV/cm^3','%.2f')\n 0.26 eV/cm^3\n \"\"\"\n\n def __init__(self):\n super(CMB, self).__init__(T=2.725 * u.kelvin)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ThermalSpectrum(Spectrum):\n vectorized = True\n\n def __init__(self, energy_density, kT=None, T=None):\n \"\"\" A thermal spectrum has the sameself):\n spectral shape as the blackbody\n spectrum but has an arbitrarily \n normalizable energy density.\n\n The thermal spectrum is\n\n n(E) = 15*U/(pi*kT)^4*E^2/(exp(E/kT)-1)\n\n where \n * n(E) is the number of photons per unit energy per unit volume,\n * U is the total energy per unit volume.\n * kT is the temperature of the photons\n\n This formula is equation 33 from Sturner et al 1997\n http://iopscience.iop.org/0004-637X/490/2/619/pdf/35841.pdf\n \n Input can be either 'kT' in energy units or\n 'T' in temperature units.\n\n For example, in XXX et al, the infrared photon\n field has temperature kT=3e-3 eV and energy\n density U=0.9 eV/cm^3\n\n >>> infrared=ThermalSpectrum(kT=3e-3*u.eV, energy_density=0.9*u.eV/u.cm**3)\n\n To convince yourself that this code correctly normalized\n the spectrum, you can explicity integrate E*dN/dE = total energy per unit volume:\n\n >>> print u.repr(infrared.integrate(units=True,e_weight=1),'eV/cm^3','%.2f')\n 0.90 eV/cm^3\n \"\"\"\n if kT is not None:\n kT = kT\n elif T is not None:\n kT = u.boltzmann * kwargs.pop('T')\n else:\n raise Exception('kT or k must be passed to ThermalSpectrum')\n self.kT = float(kT / u.erg)\n self.emin = 0.0001 * self.kT\n self.emax = 100.0 * self.kT\n self.pref = 15 * energy_density / (np.pi * kT) ** 4\n self.pref = float(self.pref / (u.erg ** -3 * u.cm ** -3))\n\n @staticmethod\n def occupation_number(x):\n \"\"\" This is equation 1.49 in R&L. \"\"\"\n return 1 / (np.exp(x) - 1)\n\n def _spectrum(self, energy):\n \"\"\" Return the energy density in units of [1/erg/cm^-3].\"\"\"\n return self.pref * energy ** 2 * self.occupation_number(energy /\n self.kT)\n\n @staticmethod\n def units_string():\n return '1/erg/cm^3'\n\n def integrate(self, units=True, e_weight=0):\n \"\"\" Integrate the thermal spectrum from emin to emax.\n \n Returns the integral in units of [erg^e_weight/cm^-3] \"\"\"\n int = logsimps(lambda e: e ** e_weight * self(e, units=False), self\n .emin, self.emax, sed_config.PER_DECADE)\n return int * (u.erg ** (e_weight + 1) * self.units() if units else 1)\n\n\nclass BlackBody(ThermalSpectrum):\n\n @staticmethod\n def compute_energy_density(kT):\n \"\"\" Comparing the formula for a blackbody spectrum\n with prefactor \n\n pref = 8pi/(hc)^3\n\n to the fomrula for a general thermal spectrum:\n\n pref = 15*U/(pi*kT)^4,\n\n we find that for a blackbody spectrum,\n we have a thermal spectrum with\n\n U = (8*pi/(hc)^3)*(pi*kT)^4/15. \"\"\"\n h = u.planck\n c = u.speed_of_light\n pi = np.pi\n return 8 * pi / (h * c) ** 3 * ((pi * kT) ** 4 / 15)\n\n def __init__(self, kT=None, T=None):\n \"\"\" Implement a blackbody spectrum.\n\n The formula for the blackbody spectrum is \n \n n(E)=((8pi)/(hc)^3)*E^2/(exp(E/kT)-1)\n\n where \n * n(E) is the number of photons per unit energy per unit volume,\n * kT is the temperature of the photons\n\n This formula is on the top of page 208 in R&L\n \"\"\"\n if kT is not None:\n kT = kT\n elif T is not None:\n kT = u.boltzmann * T\n else:\n raise Exception('kT or k must be passed to BlackBody')\n energy_density = BlackBody.compute_energy_density(kT)\n super(BlackBody, self).__init__(energy_density=energy_density, kT=kT)\n\n\nclass CMB(BlackBody):\n \"\"\" The CMB is a blackbody spectrum with temperature 2.725K.\n\n Note, the energy density for a CMB spectrum is 0.26 eV/cm^3:\n \n >>> cmb = CMB()\n >>> print u.repr(cmb.integrate(units=True,e_weight=1),'eV/cm^3','%.2f')\n 0.26 eV/cm^3\n \"\"\"\n\n def __init__(self):\n super(CMB, self).__init__(T=2.725 * u.kelvin)\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n",
"step-5": "\"\"\" sed_thermal.py\n\n Author: Joshua Lande <joshualande@gmail.com>\n\"\"\"\nimport numpy as np\nfrom scipy import integrate\n\nfrom . sed_integrate import logsimps\nfrom . sed_spectrum import Spectrum\nfrom . import sed_config\nfrom . import units as u\n\nclass ThermalSpectrum(Spectrum):\n\n vectorized = True\n\n def __init__(self, energy_density, kT=None, T=None):\n \"\"\" A thermal spectrum has the sameself):\n spectral shape as the blackbody\n spectrum but has an arbitrarily \n normalizable energy density.\n\n The thermal spectrum is\n\n n(E) = 15*U/(pi*kT)^4*E^2/(exp(E/kT)-1)\n\n where \n * n(E) is the number of photons per unit energy per unit volume,\n * U is the total energy per unit volume.\n * kT is the temperature of the photons\n\n This formula is equation 33 from Sturner et al 1997\n http://iopscience.iop.org/0004-637X/490/2/619/pdf/35841.pdf\n \n Input can be either 'kT' in energy units or\n 'T' in temperature units.\n\n For example, in XXX et al, the infrared photon\n field has temperature kT=3e-3 eV and energy\n density U=0.9 eV/cm^3\n\n >>> infrared=ThermalSpectrum(kT=3e-3*u.eV, energy_density=0.9*u.eV/u.cm**3)\n\n To convince yourself that this code correctly normalized\n the spectrum, you can explicity integrate E*dN/dE = total energy per unit volume:\n\n >>> print u.repr(infrared.integrate(units=True,e_weight=1),'eV/cm^3','%.2f')\n 0.90 eV/cm^3\n \"\"\"\n if kT is not None: kT = kT\n elif T is not None: kT = u.boltzmann*kwargs.pop('T')\n else: raise Exception(\"kT or k must be passed to ThermalSpectrum\")\n\n self.kT = float(kT/u.erg)\n\n # function is essentially 0 outside of this energy range.\n self.emin=1e-4*self.kT\n self.emax=1e2*self.kT\n\n # equation 33 in Sturner et al 1997\n # Note, prefactor*E^2/(exp(E/kT)-1) has units\n # of photons/energy/volume, so prefactor has units\n # of photons/energy^3/volume.\n self.pref = 15*energy_density/(np.pi*kT)**4\n self.pref = float(self.pref/(u.erg**-3*u.cm**-3))\n\n @staticmethod\n def occupation_number(x):\n \"\"\" This is equation 1.49 in R&L. \"\"\"\n return 1/(np.exp(x)-1)\n\n def _spectrum(self, energy):\n \"\"\" Return the energy density in units of [1/erg/cm^-3].\"\"\"\n return self.pref*energy**2*self.occupation_number(energy/self.kT)\n\n @staticmethod \n def units_string(): return '1/erg/cm^3'\n\n def integrate(self, units=True, e_weight=0):\n \"\"\" Integrate the thermal spectrum from emin to emax.\n \n Returns the integral in units of [erg^e_weight/cm^-3] \"\"\"\n int = logsimps(lambda e: e**e_weight*self(e, units=False), self.emin, self.emax, sed_config.PER_DECADE)\n return int*(u.erg**(e_weight+1)*self.units() if units else 1)\n\nclass BlackBody(ThermalSpectrum):\n\n @staticmethod\n def compute_energy_density(kT):\n \"\"\" Comparing the formula for a blackbody spectrum\n with prefactor \n\n pref = 8pi/(hc)^3\n\n to the fomrula for a general thermal spectrum:\n\n pref = 15*U/(pi*kT)^4,\n\n we find that for a blackbody spectrum,\n we have a thermal spectrum with\n\n U = (8*pi/(hc)^3)*(pi*kT)^4/15. \"\"\"\n h=u.planck\n c=u.speed_of_light\n pi=np.pi\n return (8*pi/(h*c)**3)*((pi*kT)**4/15)\n\n\n def __init__(self,kT=None,T=None):\n \"\"\" Implement a blackbody spectrum.\n\n The formula for the blackbody spectrum is \n \n n(E)=((8pi)/(hc)^3)*E^2/(exp(E/kT)-1)\n\n where \n * n(E) is the number of photons per unit energy per unit volume,\n * kT is the temperature of the photons\n\n This formula is on the top of page 208 in R&L\n \"\"\"\n if kT is not None: kT = kT\n elif T is not None: kT = u.boltzmann*T\n else: raise Exception(\"kT or k must be passed to BlackBody\")\n\n energy_density=BlackBody.compute_energy_density(kT)\n super(BlackBody,self).__init__(energy_density=energy_density, kT=kT)\n\n\nclass CMB(BlackBody):\n \"\"\" The CMB is a blackbody spectrum with temperature 2.725K.\n\n Note, the energy density for a CMB spectrum is 0.26 eV/cm^3:\n \n >>> cmb = CMB()\n >>> print u.repr(cmb.integrate(units=True,e_weight=1),'eV/cm^3','%.2f')\n 0.26 eV/cm^3\n \"\"\"\n def __init__(self): super(CMB,self).__init__(T=2.725*u.kelvin)\n\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n\n",
"step-ids": [
9,
12,
13,
14,
16
]
}
|
[
9,
12,
13,
14,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_word_score(word_1, n_1):
"""string"""
sum_1 = 0
dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,
'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p':
3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8,
'y': 4, 'z': 10}
length_1 = len(word_1)
for i in word_1:
if i in dictionary_.keys():
sum_1 = sum_1 + dictionary_[i]
sum_1 = sum_1 * length_1
if n_1 == length_1:
sum_1 += 50
return sum_1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_word_score(word_1, n_1):
"""string"""
sum_1 = 0
dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,
'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p':
3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8,
'y': 4, 'z': 10}
length_1 = len(word_1)
for i in word_1:
if i in dictionary_.keys():
sum_1 = sum_1 + dictionary_[i]
sum_1 = sum_1 * length_1
if n_1 == length_1:
sum_1 += 50
return sum_1
def main():
"""
Main function for the given problem
"""
data = input()
data = data.split(' ')
print(get_word_score(data[0], int(data[1])))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_word_score(word_1, n_1):
"""string"""
sum_1 = 0
dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,
'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p':
3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8,
'y': 4, 'z': 10}
length_1 = len(word_1)
for i in word_1:
if i in dictionary_.keys():
sum_1 = sum_1 + dictionary_[i]
sum_1 = sum_1 * length_1
if n_1 == length_1:
sum_1 += 50
return sum_1
def main():
"""
Main function for the given problem
"""
data = input()
data = data.split(' ')
print(get_word_score(data[0], int(data[1])))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
"""game"""
def get_word_score(word_1, n_1):
"""string"""
# import string
# key = list(string.ascii_lowercase)
# value = []
# x=1
sum_1 = 0
# for i in range(0, 26):
# value.append(x)
# x+=1
# dictionary_ = dict(zip(key, value))
# print(dictionary_)
dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10}
length_1 = len(word_1)
# if length_1 <= n_1:
for i in word_1:
if i in dictionary_.keys():
sum_1 = sum_1 + dictionary_[i]
sum_1 = sum_1*length_1
if n_1 == length_1:
sum_1 += 50
return sum_1
# print("worng inputs")
def main():
'''
Main function for the given problem
'''
data = input()
data = data.split(" ")
print(get_word_score(data[0], int(data[1])))
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "325708d5e8b71bad4806b59f3f86a737c1baef8d",
"index": 3976,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_word_score(word_1, n_1):\n \"\"\"string\"\"\"\n sum_1 = 0\n dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,\n 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p':\n 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8,\n 'y': 4, 'z': 10}\n length_1 = len(word_1)\n for i in word_1:\n if i in dictionary_.keys():\n sum_1 = sum_1 + dictionary_[i]\n sum_1 = sum_1 * length_1\n if n_1 == length_1:\n sum_1 += 50\n return sum_1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_word_score(word_1, n_1):\n \"\"\"string\"\"\"\n sum_1 = 0\n dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,\n 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p':\n 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8,\n 'y': 4, 'z': 10}\n length_1 = len(word_1)\n for i in word_1:\n if i in dictionary_.keys():\n sum_1 = sum_1 + dictionary_[i]\n sum_1 = sum_1 * length_1\n if n_1 == length_1:\n sum_1 += 50\n return sum_1\n\n\ndef main():\n \"\"\"\n Main function for the given problem\n \"\"\"\n data = input()\n data = data.split(' ')\n print(get_word_score(data[0], int(data[1])))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_word_score(word_1, n_1):\n \"\"\"string\"\"\"\n sum_1 = 0\n dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,\n 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p':\n 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8,\n 'y': 4, 'z': 10}\n length_1 = len(word_1)\n for i in word_1:\n if i in dictionary_.keys():\n sum_1 = sum_1 + dictionary_[i]\n sum_1 = sum_1 * length_1\n if n_1 == length_1:\n sum_1 += 50\n return sum_1\n\n\ndef main():\n \"\"\"\n Main function for the given problem\n \"\"\"\n data = input()\n data = data.split(' ')\n print(get_word_score(data[0], int(data[1])))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"game\"\"\"\ndef get_word_score(word_1, n_1):\n \"\"\"string\"\"\"\n # import string\n # key = list(string.ascii_lowercase)\n # value = []\n # x=1\n sum_1 = 0\n # for i in range(0, 26):\n # value.append(x)\n # x+=1\n # dictionary_ = dict(zip(key, value))\n # print(dictionary_)\n dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10}\n length_1 = len(word_1)\n # if length_1 <= n_1:\n for i in word_1:\n if i in dictionary_.keys():\n sum_1 = sum_1 + dictionary_[i]\n sum_1 = sum_1*length_1\n if n_1 == length_1:\n sum_1 += 50\n return sum_1\n # print(\"worng inputs\")\ndef main():\n '''\n Main function for the given problem\n '''\n data = input()\n data = data.split(\" \")\n print(get_word_score(data[0], int(data[1])))\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Bookings(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Airlines(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Users(models.Model):
user_id = models.CharField(max_length=16)
email = models.EmailField(max_length=50, unique=True)
password = models.CharField(max_length=20)
phone_number = models.IntegerField()
gender = models.CharField(max_length=10)
def __str__(self):
return self.email
class Bookings(models.Model):
booking_id = models.AutoField(primary_key=True)
email = models.ForeignKey(Users, on_delete=models.CASCADE)
flight_num = models.ForeignKey(Airlines, on_delete=models.CASCADE,
default='00000', editable=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Airlines(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.flight_number
class Users(models.Model):
user_id = models.CharField(max_length=16)
email = models.EmailField(max_length=50, unique=True)
password = models.CharField(max_length=20)
phone_number = models.IntegerField()
gender = models.CharField(max_length=10)
def __str__(self):
return self.email
class Bookings(models.Model):
booking_id = models.AutoField(primary_key=True)
email = models.ForeignKey(Users, on_delete=models.CASCADE)
flight_num = models.ForeignKey(Airlines, on_delete=models.CASCADE,
default='00000', editable=True)
<|reserved_special_token_1|>
from django.db import models
class Airlines(models.Model):
flight_number = models.CharField(max_length=8, unique=True)
airlines_id = models.CharField(max_length=10)
source = models.CharField(max_length=20)
destination = models.CharField(max_length=20)
departure = models.TimeField()
arrival = models.TimeField()
base_price = models.DecimalField(decimal_places=2, max_digits=10)
def __str__(self):
return self.flight_number
class Users(models.Model):
user_id = models.CharField(max_length=16)
email = models.EmailField(max_length=50, unique=True)
password = models.CharField(max_length=20)
phone_number = models.IntegerField()
gender = models.CharField(max_length=10)
def __str__(self):
return self.email
class Bookings(models.Model):
booking_id = models.AutoField(primary_key=True)
email = models.ForeignKey(Users, on_delete=models.CASCADE)
flight_num = models.ForeignKey(Airlines, on_delete=models.CASCADE,
default='00000', editable=True)
<|reserved_special_token_1|>
from django.db import models
# Create your models here.
class Airlines(models.Model):
flight_number=models.CharField(max_length=8,unique=True)
airlines_id=models.CharField(max_length=10)
source=models.CharField(max_length=20)
destination=models.CharField(max_length=20)
departure=models.TimeField()
arrival=models.TimeField()
base_price=models.DecimalField(decimal_places=2,max_digits=10)
def __str__(self):
return self.flight_number
class Users(models.Model):
user_id=models.CharField(max_length=16)
email=models.EmailField(max_length=50,unique=True)
password=models.CharField(max_length=20)
phone_number=models.IntegerField()
gender=models.CharField(max_length=10)
def __str__(self):
return self.email
class Bookings(models.Model):
booking_id=models.AutoField(primary_key=True)
email=models.ForeignKey(Users,on_delete=models.CASCADE)
flight_num=models.ForeignKey(Airlines,on_delete=models.CASCADE,default='00000',editable=True)
|
flexible
|
{
"blob_id": "e57b30a7a1cf987918abfb3cb7d612bdead2ddcd",
"index": 406,
"step-1": "<mask token>\n\n\nclass Bookings(models.Model):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Airlines(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Users(models.Model):\n user_id = models.CharField(max_length=16)\n email = models.EmailField(max_length=50, unique=True)\n password = models.CharField(max_length=20)\n phone_number = models.IntegerField()\n gender = models.CharField(max_length=10)\n\n def __str__(self):\n return self.email\n\n\nclass Bookings(models.Model):\n booking_id = models.AutoField(primary_key=True)\n email = models.ForeignKey(Users, on_delete=models.CASCADE)\n flight_num = models.ForeignKey(Airlines, on_delete=models.CASCADE,\n default='00000', editable=True)\n",
"step-3": "<mask token>\n\n\nclass Airlines(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.flight_number\n\n\nclass Users(models.Model):\n user_id = models.CharField(max_length=16)\n email = models.EmailField(max_length=50, unique=True)\n password = models.CharField(max_length=20)\n phone_number = models.IntegerField()\n gender = models.CharField(max_length=10)\n\n def __str__(self):\n return self.email\n\n\nclass Bookings(models.Model):\n booking_id = models.AutoField(primary_key=True)\n email = models.ForeignKey(Users, on_delete=models.CASCADE)\n flight_num = models.ForeignKey(Airlines, on_delete=models.CASCADE,\n default='00000', editable=True)\n",
"step-4": "from django.db import models\n\n\nclass Airlines(models.Model):\n flight_number = models.CharField(max_length=8, unique=True)\n airlines_id = models.CharField(max_length=10)\n source = models.CharField(max_length=20)\n destination = models.CharField(max_length=20)\n departure = models.TimeField()\n arrival = models.TimeField()\n base_price = models.DecimalField(decimal_places=2, max_digits=10)\n\n def __str__(self):\n return self.flight_number\n\n\nclass Users(models.Model):\n user_id = models.CharField(max_length=16)\n email = models.EmailField(max_length=50, unique=True)\n password = models.CharField(max_length=20)\n phone_number = models.IntegerField()\n gender = models.CharField(max_length=10)\n\n def __str__(self):\n return self.email\n\n\nclass Bookings(models.Model):\n booking_id = models.AutoField(primary_key=True)\n email = models.ForeignKey(Users, on_delete=models.CASCADE)\n flight_num = models.ForeignKey(Airlines, on_delete=models.CASCADE,\n default='00000', editable=True)\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Airlines(models.Model):\n\tflight_number=models.CharField(max_length=8,unique=True)\n\tairlines_id=models.CharField(max_length=10)\n\tsource=models.CharField(max_length=20)\n\tdestination=models.CharField(max_length=20)\n\tdeparture=models.TimeField()\n\tarrival=models.TimeField()\n\tbase_price=models.DecimalField(decimal_places=2,max_digits=10)\n\n\tdef __str__(self):\n\t\treturn self.flight_number\n\nclass Users(models.Model):\n\tuser_id=models.CharField(max_length=16)\n\temail=models.EmailField(max_length=50,unique=True)\n\tpassword=models.CharField(max_length=20)\n\tphone_number=models.IntegerField()\n\tgender=models.CharField(max_length=10)\n\tdef __str__(self):\n\t\treturn self.email\n\nclass Bookings(models.Model):\n\tbooking_id=models.AutoField(primary_key=True)\n\temail=models.ForeignKey(Users,on_delete=models.CASCADE)\n\tflight_num=models.ForeignKey(Airlines,on_delete=models.CASCADE,default='00000',editable=True)\n\n",
"step-ids": [
1,
6,
7,
9,
10
]
}
|
[
1,
6,
7,
9,
10
] |
Relevance
Thus, designing an automatic MWP solver, with semantic understanding and
inference capability, has been considered as a crucial step towards general AI.
Solving a math problem manually involves too many steps. So MWP will reduc
Attachment final.pdf added.Conversation opened. 1 read message.
Skip to content
Using Gmail with screen readers
jithin
5 of about 62
Code
Inbox
x
jithin p <jithinappu.p6@gmail.com>
Attachments
Tue, 6 Mar, 23:44
to me
Attachments area
import wx
import MySQLdb
import nltk
import string
from string import punctuation
from nltk.corpus import stopwords
from nltk.corpus import wordnet as w
from wx import grid as gr
from itertools import chain
from nltk.corpus.reader import NOUN
from scipy import spatial
import os
import sys
dbc=''
database_name=''
#natural_query=''
query=''
path=os.path.dirname(os.path.realpath(sys.argv[0]))
class MainWindow(wx.Frame) :
def __init__(self,parent,id) :
wx.Frame.__init__(self,parent,id,'Natural Query To SQL Translator',size=(500,400))
panel = wx.Panel(self)
panel.SetBackgroundColour(wx.Colour(200,200,225))
font1 = wx.Font(30, wx.DEFAULT, wx.MODERN, wx.FONTWEIGHT_BOLD)
#name_top = wx.StaticText(panel, -1, "Natural Query To", (42,50), (360,-1),wx.ALIGN_CENTER)
#name_top.SetFont(font)
name = wx.StaticText(panel, -1, "Query Translator", (59,50), (360,-1),wx.ALIGN_CENTER)
name.SetFont(font1)
font2 = wx.Font(12, wx.DEFAULT, wx.MODERN, wx.FONTWEIGHT_BOLD)
name.SetForegroundColour('blue')
translator_button = wx.Button(panel,label="Translator",pos=(160,200),size=(175,60))
translator_button.SetBackgroundColour(wx.Colour(220,220,230))
translator_button.SetFont(font2)
self.Bind(wx.EVT_BUTTON, self.translating_window, translator_button)
#self.Bind(wx.EVT_BUTTON, self.database_window, translator_button)
statusbar = self.CreateStatusBar()
menubar = wx.MenuBar()
first = wx.Menu()
second = wx.Menu()
first.Append(wx.NewId(),"New Window","This is new window")
second.Append(wx.NewId(),"Open...","Open new window")
menubar.Append(first,"File")
menubar.Append(second,"Edit")
self.SetMenuBar(menubar)
'''
def databse_window(self,event):
dtbase_window = create_databse_window(parent=None,id=-1)
dtbase_window.Show()
'''
def translating_window(self,event):
translate_window = create_translate_window(parent=None,id=-1)
translate_window.Show()
'''
class create_databse_window(wx.Frame) :
def __init__(self,parent,id) :
wx.Frame.__init__(self,parent,id,'Query Translator',size=(500,300))
self.panel = wx.Panel(self)
self.panel.SetBackgroundColour(wx.Colour(200,200,225))
font = wx.Font(12, wx.DEFAULT, wx.DEFAULT, wx.FONTWEIGHT_NORMAL)
self.database_name = wx.StaticText(self.panel, -3, "Database Name", (42,85), (360,-1))
self.database_name.SetFont(font)
self.database_name_text = wx.TextCtrl(self.panel, -1, "", pos=(200,75), size=(400,42))
self.natural_query_text.SetInsertionPoint(0)
'''
class create_translate_window(wx.Frame) :
global dbc
global database_name
global natural_query
global query
def __init__(self,parent,id) :
wx.Frame.__init__(self,parent,id,'Query Translator',size=(650,600))
self.panel = wx.Panel(self)
self.panel.SetBackgroundColour(wx.Colour(200,200,225))
font = wx.Font(12, wx.DEFAULT, wx.DEFAULT, wx.FONTWEIGHT_NORMAL)
database_connect_button = wx.Button(self.panel, label="Connect", pos=(262,50), size=(120,40))
database_connect_button.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.connect_database, database_connect_button)
database_button = wx.Button(self.panel, label="Select Database", pos=(250,130), size=(150,40))
database_button.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.select_database, database_button)
self.selected_dtname = wx.StaticText(self.panel, -3, "Database", (42,215), (360,-1))
self.selected_dtname.SetFont(font)
self.sel_dtname = wx.TextCtrl(self.panel, -1, pos=(207,210), size=(250,-1))
self.sel_dtname.SetInsertionPoint(0)
self.natural_query = wx.StaticText(self.panel, -3, "English query", (42,290), (360,-1))
self.natural_query.SetFont(font)
self.natural_query_text = wx.TextCtrl(self.panel, -1, pos=(185,280), size=(300,42), style=wx.TE_MULTILINE)
self.natural_query_text.SetInsertionPoint(0)
generate_button = wx.Button(self.panel, label="Generate", pos=(265,360), size=(120,40))
generate_button.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.generate_query, generate_button)
self.sql_query = wx.StaticText(self.panel, -3, "SQL query", (42,450), (360,-1))
self.sql_query.SetFont(font)
self.sql_query_text = wx.TextCtrl(self.panel, -1, pos=(185,440), size=(300,42), style=wx.TE_MULTILINE)
self.sql_query_text.SetInsertionPoint(0)
result_button = wx.Button(self.panel, label="Result", pos=(265,519), size=(120,40))
result_button.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.show_result, result_button)
def connect_database(self,event):
global dbc
try:
self.dbc=MySQLdb.connect("localhost","root","")
dbc=self.dbc
#print dbc
box=wx.MessageDialog(None,"Connection Established",'Alert',wx.OK)
ans=box.ShowModal()
box.Destroy()
except:
box=wx.MessageDialog(None,"Error occured while establishing connection",'Alert',wx.OK)
ans=box.ShowModal()
box.Destroy()
#def generate_query(self,event):
# t=self.natural_query_text.GetValue()
# print t
def select_database(self,event):
#lobal dbc
try:
temp=self.dtbase_window.GetSize()
except:
self.dtbase_window = self.create_databse_window(parent=None,id=1)
self.dtbase_window.Show()
self.dtbase_window.Bind(wx.EVT_CLOSE,self.addDatabase,self.dtbase_window)
#print dbc
def addDatabase(self,event):
try:
global database_name
#print database_name
self.dt_name=database_name
self.sel_dtname.SetValue(self.dt_name)
self.dtbase_window.Destroy()
except:
self.dtbase_window.Destroy()
def generate_query(self,event):
global query
self.n_query_feature_file=[]
#global natural_query
#print "hdgfhgf"
t=self.natural_query_text.GetValue()
self.natural_queryy=t
#print self.natural_queryy
self.n_query_feature_file.append(feature(self.natural_queryy))
#print self.n_query_feature_file
for f in self.n_query_feature_file:
f.extract_feature()
f.csv_file()
f.mapping()
print "query"
print query
self.queryy=query
if len(self.queryy) != 0:
self.sql_query_text.SetValue(self.queryy)
def show_result(self,event):
#global query
try:
temp=self.reslt_window.GetSize()
except:
self.reslt_window = self.create_result_window(parent=None,id=1)
self.reslt_window.Show()
#self.reslt_window.Bind(wx.EVT_CLOSE,self.addDatabase,self.dtbase_window)
class create_databse_window(wx.Frame):
global dbc
global database_name
def __init__(self,parent,id) :
wx.Frame.__init__(self,parent,id,'Select Database',size=(590,350))
self.panel = wx.Panel(self)
self.panel.SetBackgroundColour(wx.Colour(200,200,225))
font = wx.Font(12, wx.DEFAULT, wx.DEFAULT, wx.FONTWEIGHT_NORMAL)
self.sel_dtbase = wx.StaticText(self.panel, -3, "Select Database", (42,100), (360,-1))
self.sel_dtbase.SetFont(font)
self.dt_choice=wx.Choice(self.panel,-1,pos=(190,95),size=(250,30))
self.dt_choice.SetSelection(0)
refresh_button = wx.Button(self.panel, label="Refresh", pos=(450,95), size=(90,30))
refresh_button.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.list_dt_base, refresh_button)
select_button = wx.Button(self.panel, label="Select", pos=(250,200), size=(95,30))
select_button.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.database_return, select_button)
#t = self.dt_choice.GetSelection()
#print t
#print dbc
def list_dt_base(self,event):
global dbc
global database_name
#try:
self.list_dtnames=[]
self.dbc=dbc
#print dbc
cursor=self.dbc.cursor()
cursor.execute("SHOW DATABASES")
self.dt_names=cursor.fetchall()
#print self.dt_names
for i in self.dt_names:
#cursor.execute("DESC "+i[0])
name_t=i[0]
#t=(i[0],det)
self.list_dtnames.append(name_t)
#self.dt_choice.SetItems(name_t)
#database_name=self.list_dtnames
self.dt_choice.SetItems(self.list_dtnames)
#print self.list_dtnames
#except:
# box=wx.MessageDialog(None,"Error occured. Connect database",'Alert',wx.OK)
# ans=box.ShowModal()
# box.Destroy()
def database_return(self,event):
try:
global dbc
global database_name
self.dbc=dbc
t = self.dt_choice.GetSelection()
#print self.list_dtnames[t]
cursor=self.dbc.cursor()
cursor.execute("USE "+self.list_dtnames[t])
dt_choose=cursor.fetchall()
print dt_choose
database_name=self.list_dtnames[t]
#self.sel_dtname.SetValue(database_name)
self.Close()
except:
box=wx.MessageDialog(None,"Database no longer exist. Hit the refresh button",'Alert',wx.OK)
ans=box.ShowModal()
box.Destroy()
class create_result_window(wx.Frame):
global dbc
global database_name
global query
def __init__(self,parent,id) :
wx.Frame.__init__(self,parent,id,'Result',size=(500,600))
self.panel = wx.Panel(self)
self.panel.SetBackgroundColour(wx.Colour(200,200,225))
font = wx.Font(12, wx.DEFAULT, wx.DEFAULT, wx.FONTWEIGHT_NORMAL)
self.queryy=query
self.dbc=dbc
attribute_name=[]
t=self.queryy.split(' ')
tt=[]
for i in t:
tt.append(i.split(','))
print tt
for i in range(len(tt)):
if 'FROM' in tt[i]:
s=i
#s=tt.index('FROM')
#if len(tt) > 0:
#for i in range(len(tt)):
# attribute_name.append(tt[i])
#else:
for i in tt[1:s]:
for j in i:
attribute_name.append(j)
if '*' in attribute_name:
cursor=self.dbc.cursor()
cursor.execute("DESC "+tt[s+1][0])
det=cursor.fetchall()
attribute_name=[]
for i in range(len(det)):
attribute_name.append(det[i][0])
#try:
cursor=self.dbc.cursor()
cursor.execute(self.queryy)
result=cursor.fetchall()
print result
n_rows=len(result)
n_cols=len(result[0])
table=gr.Grid(self.panel, -1, size=(500,600))
#print attribute_name
table.CreateGrid(n_rows,n_cols)
for i in range(len(attribute_name)):
table.SetColLabelValue(i,attribute_name[i])
for i in range(len(result)):
for j in range(len(result[i])):
table.SetCellValue(i,j,str(result[i][j]))
#except:
#print "grid error"
class feature():
global dbc
global database_name
global query
def __init__(self,query):
self.natural_query=query
self.token=nltk.tokenize.word_tokenize(self.natural_query)
print self.token
def extract_feature(self):
global query
self.natural_query_features=[]
self.list1=self.token
#Removing punctuations
remov_p=[]
for i in self.list1:
if i in punctuation:
remov_p.append(self.list1.index(i))
remov_p.reverse()
for j in remov_p[:]:
#print j
del(self.list1[j])
#print self.list2
self.featuress=self.list1
#print self.featuress
#word co-occurrence matrix
self.occurr=[]
self.words=[]
self.list2=self.featuress
for i in self.list2:
if i not in self.words:
self.words.append(i)
w=5
#self.list4 = self.list3
self.occurr_val=[]
for i in range(len(self.list2)):
self.occurr=[0 for x in range(len(self.words)+1)]
self.occurr[0]=self.list2[i]
j=i
#while (j+w) <= (len(self.list1)-1):
if (j+w+1) <= (len(self.list2)-1):
j=j+w+1
else:
j=len(self.list2)
#print "j"
#print j
for k in range(i+1,j):
#print "i"
#print i
#self.occurr_val.append(self.occurr)
self.word=self.list2[k]
try:
for p in range(len(self.words)):
if self.words[p] == self.list2[i]:
ind_row_word=p
if self.list2[k] == self.list2[i]:
occ=w-(k-i-1)
ind=self.words.index(self.word)
#self.occurr[ind]+=occ
self.occurr_val[ind_row_word][ind+1]+=occ
else:
occ=w-(k-i-1)
#print k
ind=self.words.index(self.word)
#self.occurr[ind]+=occ
self.occurr_val[ind_row_word][ind+1]+=occ
except:
if self.list2[k] == self.list2[i]:
occ=w-(k-i-1)
ind=self.words.index(self.word)
self.occurr[ind+1]+=occ
#if k+1 > j-1:
# self.occurr[k]+=0
else:
occ=w-(k-i-1)
#print k
ind=self.words.index(self.word)
self.occurr[ind+1]+=occ
#self.occurr_val.append(self.occurr)
#print self.words[i]
if len(self.occurr_val) != len(self.words):
self.occurr_val.append(self.occurr)
print self.occurr_val
#Postagging
self.list3=self.featuress
tagged_string=nltk.pos_tag(self.list3)
self.featuress=tagged_string
print self.featuress
#Noun clause extracting
self.noun_clause_list=[]
self.list4=self.featuress
for i in range(len(self.list4)):
if self.list4[i][1] == 'NN' or self.list4[i][1] == 'NNS':
self.noun_clause_list.append(self.list4[i][0])
print self.noun_clause_list
'''
#Removing stopwords
self.list5=self.featuress
remov_s=[]
stop_words = set(stopwords.words('english'))
for i in range(len(self.list5)):
if self.list5[i][0] in stop_words:
remov_s.append(i)
#print remov_s
remov_s.reverse()
#print remov_s
for j in range(len(remov_s)):
#print self.list4
t=remov_s[j]
#print self.list5[t]
del(self.list5[t])
#print self.list5
self.featuress=self.list5
print self.featuress
'''
#Finding Cosine-similarity of noun-pro noun
self.list6=self.featuress
self.list7=self.occurr_val
self.list_pro_noun=[]
#self.temp_occ_val=[]
self.occ_values_n_p=[]
for i in range(len(self.list6)):
self.list_noun=[]
if self.list6[i][1] == 'NN' or self.list6[i][1] == 'NNS':
ind=self.words.index(self.list6[i][0])
for j in self.list7[ind][1:]:
self.list_noun.append(j)
for k in self.list7:
self.list_noun.append(k[ind+1])
#print self.list_noun
for j in range(i+1,len(self.list6)):
self.temp_occ_val=[]
#self.list_pro_noun=[]
if self.list6[j][1] == 'NNP':
ind1=self.words.index(self.list6[j][0])
for l in self.list7[ind1][1:]:
self.list_pro_noun.append(l)
for m in self.list7:
self.list_pro_noun.append(m[ind1+1])
#print self.list_pro_noun
#self.list_pro_noun=[]
occ_value=1-spatial.distance.cosine(self.list_noun,self.list_pro_noun)
self.temp_occ_val.append(self.list6[i][0])
self.temp_occ_val.append(self.list6[j][0])
self.temp_occ_val.append(occ_value)
#print occ_value
self.list_pro_noun=[]
self.occ_values_n_p.append(self.temp_occ_val)
self.occ_values_n_p.sort()
#Remove empty lists
del_list=[]
#self.occ_values_n_p
for i in range(len(self.occ_values_n_p)):
if len(self.occ_values_n_p[i]) == 0:
del_list.append(i)
del_list.reverse()
for j in del_list[:]:
print del_list
del(self.occ_values_n_p[j])
#self.occ_values_n_p.sort(reverse=True)
#Sorting the list
sort_t=[]
sort_tt=self.occ_values_n_p
self.occ_values_n_p=[]
for i in sort_tt:
sort_t.append(i[2])
sort_t.sort(reverse=True)
for i in sort_t:
for j in sort_tt:
if i == j[2]:
self.occ_values_n_p.append(j)
print self.occ_values_n_p
#Finding cosine similarity of verb-noun
self.list8=self.featuress
self.list9=self.occurr_val
#self.list_noun=[]
self.list_noun1=[]
#self.temp_occ_val=[]
self.occ_values_v_n=[]
for i in range(len(self.list8)):
self.list_verb=[]
if self.list8[i][1] == 'VB' or self.list8[i][1] == 'VBP':
ind=self.words.index(self.list8[i][0])
for j in self.list9[ind][1:]:
self.list_verb.append(j)
for k in self.list9:
self.list_verb.append(k[ind+1])
#print self.list_verb
for j in range(i+1,len(self.list8)):
self.temp_occ_val=[]
#self.list_pro_noun=[]
if self.list8[j][1] == 'NN' or self.list8[j][1]=='NNS' or self.list8[j][1]=='NNP':
ind1=self.words.index(self.list8[j][0])
for l in self.list9[ind1][1:]:
self.list_noun1.append(l)
for m in self.list9:
self.list_noun1.append(m[ind1+1])
#print self.list_noun1
#self.list_pro_noun=[]
occ_value=1-spatial.distance.cosine(self.list_verb,self.list_noun1)
self.temp_occ_val.append(self.list8[i][0])
self.temp_occ_val.append(self.list8[j][0])
self.temp_occ_val.append(occ_value)
#print self.temp_occ_val
self.list_noun1=[]
self.occ_values_v_n.append(self.temp_occ_val)
self.occ_values_v_n.sort()
#Remove empty lists
del_list=[]
for i in range(len(self.occ_values_v_n)):
if len(self.occ_values_v_n[i]) == 0:
del_list.append(i)
del_list.reverse()
for j in del_list:
del(self.occ_values_v_n[j])
#self.occ_values_v_n.sort(reverse=True)
#Sorting the list
sort_t=[]
sort_tt=self.occ_values_v_n
self.occ_values_v_n=[]
for i in sort_tt:
sort_t.append(i[2])
sort_t.sort(reverse=True)
for i in sort_t:
for j in sort_tt:
if i == j[2]:
self.occ_values_v_n.append(j)
print self.occ_values_v_n
#Finding cosine-similarity of noun-number
self.list10=self.featuress
self.list11=self.occurr_val
#self.list_noun=[]
self.list_number=[]
#self.temp_occ_val=[]
self.occ_values_n_num=[]
for i in range(len(self.list10)):
self.list_noun2=[]
if self.list10[i][1] == 'NN' or self.list10[i][1] == 'NNS':
ind=self.words.index(self.list10[i][0])
for j in self.list11[ind][1:]:
self.list_noun2.append(j)
for k in self.list11:
self.list_noun2.append(k[ind+1])
#print self.list_noun
for j in range(i+1,len(self.list10)):
self.temp_occ_val=[]
#self.list_pro_noun=[]
if self.list10[j][1] == 'CD':
ind1=self.words.index(self.list10[j][0])
for l in self.list11[ind1][1:]:
self.list_number.append(l)
for m in self.list11:
self.list_number.append(m[ind1+1])
#print self.list_pro_noun
#self.list_pro_noun=[]
occ_value=1-spatial.distance.cosine(self.list_noun2,self.list_number)
self.temp_occ_val.append(self.list10[i][0])
self.temp_occ_val.append(self.list10[j][0])
self.temp_occ_val.append(occ_value)
#print occ_value
self.list_number=[]
self.occ_values_n_num.append(self.temp_occ_val)
self.occ_values_n_num.sort()
#Remove empty lists
del_list=[]
for i in range(len(self.occ_values_n_num)):
if len(self.occ_values_n_num[i]) == 0:
del_list.append(i)
del_list.reverse()
for j in del_list:
del(self.occ_values_n_num[j])
#self.occ_values_n_num.sort(reverse=True)
#Sorting the list
sort_t=[]
sort_tt=self.occ_values_n_num
self.occ_values_n_num=[]
for i in sort_tt:
sort_t.append(i[2])
sort_t.sort(reverse=True)
for i in sort_t:
for j in sort_tt:
if i == j[2]:
self.occ_values_n_num.append(j)
print self.occ_values_n_num
#Find cosine-similarity of noun-noun
self.list12=self.featuress
self.list13=self.occurr_val
#self.list_nounN=[]
self.list_nounn=[]
#self.temp_occ_val=[]
self.occ_values_n_n=[]
for i in range(len(self.list12)):
self.list_noun3=[]
if self.list12[i][1] == 'NN' or self.list12[i][1] == 'NNS':
ind=self.words.index(self.list12[i][0])
for j in self.list13[ind][1:]:
self.list_noun3.append(j)
for k in self.list13:
self.list_noun3.append(k[ind+1])
#print self.list_noun
for j in range(i+1,len(self.list12)):
self.temp_occ_val=[]
#self.list_pro_noun=[]
if self.list12[j][1] == 'NN' or self.list12[j][1] == 'NNS':
ind1=self.words.index(self.list12[j][0])
for l in self.list13[ind1][1:]:
self.list_nounn.append(l)
for m in self.list13:
self.list_nounn.append(m[ind1+1])
occ_value=1-spatial.distance.cosine(self.list_noun3,self.list_nounn)
self.temp_occ_val.append(self.list12[i][0])
self.temp_occ_val.append(self.list12[j][0])
self.temp_occ_val.append(occ_value)
#print self.temp_occ_val
self.list_nounn=[]
self.occ_values_n_n.append(self.temp_occ_val)
self.occ_values_n_n.sort()
#Remove empty lists
del_list=[]
for i in range(len(self.occ_values_n_n)):
if len(self.occ_values_n_n[i]) == 0:
del_list.append(i)
del_list.reverse()
for j in del_list:
del(self.occ_values_n_n[j])
#self.occ_values_n_n.sort(reverse=True)
#Sorting the list
sort_t=[]
sort_tt=self.occ_values_n_n
self.occ_values_n_n=[]
for i in sort_tt:
sort_t.append(i[2])
sort_t.sort(reverse=True)
for i in sort_t:
for j in sort_tt:
if i == j[2]:
self.occ_values_n_n.append(j)
print self.occ_values_n_n
#Find cosine values of wh-noun
self.list15=self.featuress
self.list16=self.occurr_val
self.list_Noun=[]
#self.temp_occ_val=[]
self.occ_values_w_n=[]
for i in range(len(self.list15)):
self.list_wh=[]
if self.list15[i][1] == 'WDT' or self.list15[i][1] == 'WP' or self.list15[i][1] == 'WP$' or self.list15[i][1] == 'WRB':
ind=self.words.index(self.list15[i][0])
for j in self.list16[ind][1:]:
self.list_wh.append(j)
for k in self.list16:
self.list_wh.append(k[ind+1])
#print self.list_noun
for j in range(i+1,len(self.list15)):
self.temp_occ_val=[]
#self.list_pro_noun=[]
if self.list15[j][1] == 'NN' or self.list15[j][1] == 'NNS' or self.list15[j][1] == 'NNP':
ind1=self.words.index(self.list15[j][0])
for l in self.list16[ind1][1:]:
self.list_Noun.append(l)
for m in self.list16:
self.list_Noun.append(m[ind1+1])
occ_value=1-spatial.distance.cosine(self.list_wh,self.list_Noun)
self.temp_occ_val.append(self.list15[i][0])
self.temp_occ_val.append(self.list15[j][0])
self.temp_occ_val.append(occ_value)
#print self.temp_occ_val
self.list_Noun=[]
self.occ_values_w_n.append(self.temp_occ_val)
self.occ_values_w_n.sort()
#Remove empty lists
del_list=[]
for i in range(len(self.occ_values_w_n)):
if len(self.occ_values_w_n[i]) == 0:
del_list.append(i)
del_list.reverse()
for j in del_list:
del(self.occ_values_w_n[j])
#self.occ_values_w_n.sort(reverse=True)
#print self.occ_values_w_n
#Sorting the list
sort_t=[]
sort_tt=self.occ_values_w_n
self.occ_values_w_n=[]
for i in sort_tt:
sort_t.append(i[2])
sort_t.sort(reverse=True)
for i in sort_t:
for j in sort_tt:
if i == j[2]:
self.occ_values_w_n.append(j)
print self.occ_values_w_n
def mapping(self):
global dbc
global database_name
global query
self.dbc=dbc
self.table_names=[]
name_synonyms=[]
syn_set=[]
syn_set_noun_t=[]
self.extract_table_name=[]
self.table_names_t=[]
syn_set_table_t=[]
self.lower_noun=[]
syn_set_table=[]
self.maped_table_names=[]
self.query=[]
self.select_clause='SELECT'
self.from_clause='FROM'
self.where_clause=''
self.nouns=self.noun_clause_list
cursor=self.dbc.cursor()
cursor.execute("SHOW TABLES")
table_name=cursor.fetchall()
#print table_name
#Finding table names
for i in range(len(table_name)):
self.table_names.append(table_name[i][0])
print self.table_names
table_det=[]
cursor=self.dbc.cursor()
for i in range(len(self.table_names)):
cursor.execute("DESC "+self.table_names[i])
det=cursor.fetchall()
t=(self.table_names[i],det)
table_det.append(t)
print table_det
'''
#Finding synonyms and tables
for i in range(len(self.nouns)):
if self.nouns[i] not in self.table_names:
syns=w.synsets(self.nouns[i])
#print syns
#print syns[0].name()
for j in syns:
syn_set=list(chain.from_iterable([j.lemma_names()]))
#print syn_set
for k in range(len(syn_set)):
if syn_set[k] in self.table_names:
self.extract_table_name.append(syn_set[k])
#print "found"
'''
#Converting to lower case
for i in range(len(self.table_names)):
l_name=self.table_names[i].lower()
self.table_names_t.append(l_name)
for j in range(len(self.nouns)):
l_noun=self.nouns[j].lower()
self.lower_noun.append(l_noun)
for i in range(len(self.table_names_t)):
syns_table=w.synsets(self.table_names_t[i],NOUN)
syn_set_table_t=[]
for j in syns_table:
syn_set_table_t.append(list(chain.from_iterable([j.lemma_names()])))
syn_set_table.append(syn_set_table_t)
#print syn_set_table
#print self.table_names_t
#Finding synonyms and tables
for i in range(len(self.lower_noun)):
#lower_case_name=self.noun[i].lower()
if self.lower_noun[i] not in self.table_names_t:
syns_noun=w.synsets(self.nouns[i],NOUN)
#print syns
#print syns[0].name()
for j in syns_noun:
syn_set_noun=list(chain.from_iterable([j.lemma_names()]))
print syn_set_noun
for k in range(len(syn_set_noun)):
for l in range(len(syn_set_table)):
for m in range(len(syn_set_table[l])):
if syn_set_noun[k] in syn_set_table[l][m]:
try:
self.noun_table=self.lower_noun[i]
self.extract_table_name.append(self.table_names[l])
#print self.table_names[l]
#print self.extract_table_name
#print "found"
except:
pass
else:
self.noun_table=self.lower_noun[i]
ind=self.table_names_t.index(self.lower_noun[i])
self.extract_table_name.append(self.table_names[ind])
#print self.extract_table_name
for i in self.extract_table_name:
if i not in self.maped_table_names:
self.maped_table_names.append(i)
#print self.maped_table_names
#print self.noun_table
#Attribute mapping
syn_set_attribute=[]
table_attr=[]
self.extract_table_attr=[]
self.mapped_attr=[]
self.list14=[]
self.from_clause+=' '
self.from_clause+=self.maped_table_names[0]
if len(self.maped_table_names) == 1:
'''
self.list14=self.featuress
for i in range(len(self.list14)):
if self.list14[i][1] == 'WDT' or self.list14[i][1] == 'WP' or self.list14[i][1] == 'WP$' or self.list14[i][1] == 'WRB':
attribute_name=self.occ_values_w_n[0][1]
for i in table_det:
if i[0] == self.maped_table_names[0]:
for j in i[1]:
table_attr.append(j[0])
#print table_attr
syns_attribute=w.synsets(j[0],NOUN)
syn_set_attribute_t=[]
for k in syns_attribute:
syn_set_attribute_t.append(list(chain.from_iterable([k.lemma_names()])))
syn_set_attribute.append(syn_set_attribute_t)
#print syn_set_attribute
attr_l=attribute_name.lower()
if attr_l not in table_attr:
syns_attr=w.synsets(attr_l,NOUN)
for k in syns_attr:
syn_set_attr=list(chain.from_iterable([k.lemma_names()]))
#print syn_set_attr
for l in range(len(syn_set_attr)):
for m in range(len(syn_set_attribute)):
for n in range(len(syn_set_attribute[m])):
#print syn_set_attr[l]
#print syn_set_attribute[m][n]
if syn_set_attr[l] in syn_set_attribute[m][n]:
#print syn_set_attribute[m][n]
#print m
try:
self.extract_table_attr.append(table_attr[m])
except:
pass
for i in self.extract_table_attr:
if i not in self.mapped_attr:
self.mapped_attr.append(i)
print self.mapped_attr
self.where_clause+=' '
self.where_clause+=self.mapped_attr[0]
self.where_clause+='='
self.where_clause=self.where_clause+"'"+self.list14[i+1][1]+"'"
'''
#attribute_name=self.occ_values_v_n[0][1]
#self.select_clause+=self.self.occ_values_v_n[0]
#print attribute_name
#self.from_clause+=' '
#self.from_clause+=self.maped_table_names[0]
#Converting to lower case
try:
self.list14=self.featuress
for wh in range(len(self.list14)):
if self.list14[wh][1] == 'WDT' or self.list14[wh][1] == 'WP' or self.list14[wh][1] == 'WP$' or self.list14[wh][1] == 'WRB':
self.where_clause+='WHERE'
attribute_name=self.occ_values_w_n[0][1]
print "attribute_name"
print attribute_name
for i in table_det:
if i[0] == self.maped_table_names[0]:
for j in i[1]:
table_attr.append(j[0])
#print table_attr
syns_attribute=w.synsets(j[0],NOUN)
syn_set_attribute_t=[]
for k in syns_attribute:
syn_set_attribute_t.append(list(chain.from_iterable([k.lemma_names()])))
syn_set_attribute.append(syn_set_attribute_t)
print syn_set_attribute
attr_l=attribute_name.lower()
if attr_l not in table_attr:
syns_attr=w.synsets(attr_l,NOUN)
for k in syns_attr:
syn_set_attr=list(chain.from_iterable([k.lemma_names()]))
print syn_set_attr
for l in range(len(syn_set_attr)):
for m in range(len(syn_set_attribute)):
for n in range(len(syn_set_attribute[m])):
#print syn_set_attr[l]
#print syn_set_attribute[m][n]
if syn_set_attr[l] in syn_set_attribute[m][n]:
#print syn_set_attribute[m][n]
#print m
try:
self.extract_table_attr.append(table_attr[m])
except:
pass
for i in self.extract_table_attr:
#print i
#print self.mapped_attr
if i not in self.mapped_attr:
self.mapped_attr.append(i)
#print "i"
#print i
#print "self.mapped_attr"
#print self.mapped_attr
print "list"
print self.list14
occ_val_temp=0
for val in self.occ_values_n_n:
#print "333333"
if val[0] == self.occ_values_w_n[0][1]:
if val[2] >= occ_val_temp:
occ_val_temp=val[2]
val_temp=val[1]
#print val_temp
#print "val_temp"
for val in self.occ_values_n_num:
#print "333333"
if val[0] == self.occ_values_w_n[0][1]:
if val[2] >= occ_val_temp:
occ_val_temp=val[2]
val_temp=val[1]
#print val_temp
#print "val_temp"
for val in self.occ_values_n_p:
#print "333333"
if val[0] == self.occ_values_w_n[0][1]:
if val[2] >= occ_val_temp:
occ_val_temp=val[2]
val_temp=val[1]
print val_temp
print "val_temp"
print self.mapped_attr[0]
if not self.mapped_attr:
box=wx.MessageDialog(None,"Invalid Attribute name",'Alert',wx.OK)
ans=box.ShowModal()
box.Destroy()
else:
self.where_clause+=' '
self.where_clause+=self.mapped_attr[0]
#print "mapped_attr"
#print self.mapped_attr
#print self.where_clause
#self.where_clause+='='
print self.list14[wh+3][0]
#Finding where clause condition
try:
syn_set_con_t=[]
syn_set_con_q=[]
syn_set_con_q_g=[]
syns_con=w.synsets(self.list14[wh+3][0])
for c in syns_con:
syn_set_con_t=list(chain.from_iterable([c.lemma_names()]))
print syn_set_con_t
syns_q_con=w.synsets('lesser')
for c in syns_q_con:
syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))
syns_q_con=w.synsets('below')
for c in syns_q_con:
syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))
syns_q_con=w.synsets('lower')
for c in syns_q_con:
syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))
syns_q_con=w.synsets('fewer')
for c in syns_q_con:
syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))
syns_q_con=w.synsets('smaller')
for c in syns_q_con:
syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))
#print "error"
print syn_set_con_q
syns_q_con=w.synsets('greater')
for c in syns_q_con:
syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))
#print syn_set_con_q_g
syns_q_con=w.synsets('larger')
for c in syns_q_con:
syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))
syns_q_con=w.synsets('above')
for c in syns_q_con:
syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))
#print syn_set_con_q_g
syns_q_con=w.synsets('higher')
for c in syns_q_con:
syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))
#print syn_set_con_q_g
syns_q_con=w.synsets('more')
for c in syns_q_con:
syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))
#print syn_set_con_q_g
#print "condition entered"
#print self.list14
#print syn_set_con_q
for c in range(len(syn_set_con_t)):
#print syn_set_con_t[c]
for x in range(len(syn_set_con_q)):
#print syn_set_con_q[x]
for y in range(len(syn_set_con_q[x])):
#print "dgfjhdjfhjdhfjhfjdfhjhqqqqqqqq"
#print syn_set_con_t[c]
#print syn_set_con_q[x][y]
if syn_set_con_t[c] in syn_set_con_q[x][y]:
#print syn_set_con_t[c]
try:
print "try"
print self.list14[wh+6][0]
if self.list14[wh+6][0] == 'equal':
#self.where_clause+='<='
condition='<='
print condition
print "condition"
#else:
#self.where_clause+='<'
# condition='<'
except:
condition='<'
#print condition
#print "condition"
#else:
# condition='='
for c in range(len(syn_set_con_t)):
for x in range(len(syn_set_con_q_g)):
for y in range(len(syn_set_con_q_g[x])):
if syn_set_con_t[c] in syn_set_con_q_g[x][y]:
print syn_set_con_q_g[x][y]
print syn_set_con_t[c]
#print self.list14[wh+6][0]
try:
if self.list14[wh+6][0] == 'equal':
#self.where_clause+='<='
condition='>='
#else:
#self.where_clause+='<'
# condition='>'
except:
condition='>'
#print condition
#print "condition"
#else:
# condition='='
if len(condition) < 1:
condition='='
except:
condition='='
#print "condition"
#print condition
self.where_clause+=condition
self.where_clause=self.where_clause+"'"+str(val_temp)+"'"
#print self.list14
#print "where clause"
print self.where_clause
syn_set_attribute=[]
table_attr=[]
self.extract_table_attr=[]
self.mapped_attr=[]
self.list14=[]
attribute_name_t=[]
attribute_name=[]
attr_l=[]
#self.from_clause+=' '
#attribute_name=self.occ_values_v_n[0][1]
for i in self.occ_values_v_n[:]:
attribute_name_t.append(i[1])
print attribute_name_t
#print "attribute_name_t"
print self.noun_table[0]
#print len(attribute_name_t)
if len(attribute_name_t) > 1:
#print "entered"
for i in attribute_name_t:
#print i
if i != self.noun_table:
#print i
attribute_name.append(i)
print attribute_name
#print "ghfggfhgefhgehfehfghefgehfg"
#Removing nouns after wh from attributes list
try:
del_ind=[]
for d in range(len(attribute_name)):
if attribute_name[d] == self.occ_values_w_n[0][1]:
del_ind.append(d)
#del(attribute_name[del_ind])
print attribute_name[d]
del_ind.reverse()
print del_ind
for d in del_ind:
del(attribute_name[d])
except:
print "pass"
pass
#Removing table names if other attributes present
#self.select_clause+=self.self.occ_values_v_n[0]
#print "attribute_name 111"
#print attribute_name
for i in table_det:
if i[0] == self.maped_table_names[0]:
for j in i[1]:
table_attr.append(j[0])
#print table_attr
syns_attribute=w.synsets(j[0],NOUN)
syn_set_attribute_t=[]
for k in syns_attribute:
syn_set_attribute_t.append(list(chain.from_iterable([k.lemma_names()])))
syn_set_attribute.append(syn_set_attribute_t)
#print syn_set_attribute
for atn in attribute_name:
attr_l.append(atn.lower())
for atn in attr_l:
if atn not in table_attr:
syns_attr=w.synsets(atn,NOUN)
for k in syns_attr:
syn_set_attr=list(chain.from_iterable([k.lemma_names()]))
#print syn_set_attr
for l in range(len(syn_set_attr)):
for m in range(len(syn_set_attribute)):
for n in range(len(syn_set_attribute[m])):
#print syn_set_attr[l]
#print syn_set_attribute[m][n]
if syn_set_attr[l] in syn_set_attribute[m][n]:
#print syn_set_attribute[m][n]
#print m
try:
self.extract_table_attr.append(table_attr[m])
except:
pass
#print "self.extract_table_attr"
#print self.extract_table_attr
if len(self.extract_table_attr) < 1:
#print "fgvfhhfghfjghfjghfu"
select_attr=self.occ_values_v_n[0][1]
#print select_attr
if select_attr == 'details' or select_attr == 'contents' or select_attr == 'detail' or select_attr == 'content':
self.select_clause+=' '
self.select_clause+='*'
self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause
print self.query
else:
syns_tb=w.synsets(select_attr,NOUN)
for i in syns_tb:
syns_tbb=list(chain.from_iterable([i.lemma_names()]))
syns_tb_q=w.synsets(self.maped_table_names[0],NOUN)
#print self.maped_table_names[0]
#print syns_tb_q
for i in syns_tb_q:
syns_tbb_q=list(chain.from_iterable([i.lemma_names()]))
#print syns_tbb
#print syns_tbb_q
for i in range(len(syns_tbb)):
#for j in range(len(sysns_tbb_q)):
if syns_tbb[i] in syns_tbb_q:
#print "hgfhg"
self.select_clause+=' '
self.select_clause+='*'
self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause
print self.query
break
else:
for i in self.extract_table_attr:
if i not in self.mapped_attr:
self.mapped_attr.append(i)
print self.mapped_attr
self.select_clause+=' '
for i in range(len(self.mapped_attr)):
self.select_clause+=self.mapped_attr[i]
if i < (len(self.mapped_attr)-1):
self.select_clause+=','
#print self.select_clause
self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause
print self.query
except:
syn_set_attribute=[]
table_attr=[]
self.extract_table_attr=[]
self.mapped_attr=[]
self.list14=[]
attribute_name=[]
attr_l=[]
#print self.where_clause
#pass
#print "bgjgjshfcjhj"
#for i in self.occ_values_n_n:
#if self.maped_table_names[0] in i:
#print i
#attribute_name=i[1]
#attribute_name=attribute_name.lower()
try:
print "self.noun_table"
print self.noun_table
#attribute_name=self.occ_values_n_n[0][1]
for i in self.occ_values_n_n:
if self.noun_table in i:
for j in i:
if j != self.noun_table and isinstance(j,float) == False:
attribute_name.append(j)
try:
del_ind=attribute_name.index(self.occ_values_w_n[0][1])
del(attribute_name[del_ind])
except:
pass
if attribute_name[0] == 'details' or attribute_name[0] == 'detail' or attribute_name[0] == 'contents' or attribute_name[0] == 'content':
self.select_clause+=' '
self.select_clause+='*'
self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause
print self.query
else:
for i in table_det:
if i[0] == self.maped_table_names[0]:
for j in i[1]:
table_attr.append(j[0])
#print table_attr
syns_attribute=w.synsets(j[0],NOUN)
syn_set_attribute_t=[]
for k in syns_attribute:
syn_set_attribute_t.append(list(chain.from_iterable([k.lemma_names()])))
syn_set_attribute.append(syn_set_attribute_t)
#print syn_set_attribute
for atn in attribute_name:
attr_l.append(atn.lower())
for atn in attr_l:
if atn not in table_attr:
syns_attr=w.synsets(atn,NOUN)
for k in syns_attr:
syn_set_attr=list(chain.from_iterable([k.lemma_names()]))
print syn_set_attr
for l in range(len(syn_set_attr)):
for m in range(len(syn_set_attribute)):
for n in range(len(syn_set_attribute[m])):
#print syn_set_attr[l]
#print syn_set_attribute[m][n]
if syn_set_attr[l] in syn_set_attribute[m][n]:
#print syn_set_attribute[m][n]
#print m
try:
self.extract_table_attr.append(table_attr[m])
except:
pass
for i in self.extract_table_attr:
if i not in self.mapped_attr:
self.mapped_attr.append(i)
print self.mapped_attr
self.select_clause+=' '
if not self.mapped_attr:
box=wx.MessageDialog(None,"Invalid Attribute name",'Alert',wx.OK)
ans=box.ShowModal()
box.Destroy()
else :
#self.select_clause+=' '
#self.select_clause+=self.mapped_attr[0]
for i in range(len(self.mapped_attr)):
self.select_clause+=self.mapped_attr[i]
if i < (len(self.mapped_attr)-1):
self.select_clause+=','
self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause
print self.query
except:
self.select_clause+=' '
self.select_clause+='*'
self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause
print self.query
query=self.query
def csv_file(self):
#global path
if not os.path.exists(os.path.dirname("matrix/matrix.csv")):
os.makedirs(os.path.dirname("matrix/matrix.csv"))
try:
os.remove("./matrix/matrix.csv")
file1 = open("./matrix/matrix.csv","a+")
except:
file1 = open("./matrix/matrix.csv","a+")
t = ","
for i in self.words:
t += i
t +=","
t+="\n"
file1.write(t)
for l in range(len(self.occurr_val)):
tt=''
for m in range(len(self.occurr_val[l])):
tt+=str(self.occurr_val[l][m])
tt+=','
tt+='\n'
file1.write(tt)
file1.close()
if __name__=='__main__' :
app=wx.PySimpleApp()
main_window=MainWindow(parent=None,id=-1)
main_window.Show()
app.MainLoop()
1.txt
Displaying 1.txt.e the manual work and time. As for children and
adults, people are most challenged by word problem solving
not because of their mathematical skills but because of text
comprehension. Regularly, incorrect answers to word
problems are because of correct calculations to incorrect
problem representation.
|
normal
|
{
"blob_id": "eb6a4170e5427f10eda4d650996c2cbd8a34ca21",
"index": 2667,
"step-1": "Relevance\r\n\r\nThus, designing an automatic MWP solver, with semantic understanding and\r\n inference capability, has been considered as a crucial step towards general AI. \r\n Solving a math problem manually involves too many steps. So MWP will reduc\r\nAttachment final.pdf added.Conversation opened. 1 read message.\r\n\r\nSkip to content\r\nUsing Gmail with screen readers\r\njithin \r\n\r\n5 of about 62\r\nCode\r\nInbox\r\nx\r\n\r\njithin p <jithinappu.p6@gmail.com>\r\nAttachments\r\nTue, 6 Mar, 23:44\r\nto me\r\n\r\nAttachments area\r\n\r\nimport wx\r\nimport MySQLdb\r\nimport nltk\r\nimport string\r\nfrom string import punctuation\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.corpus import wordnet as w\r\nfrom wx import grid as gr\r\nfrom itertools import chain\r\nfrom nltk.corpus.reader import NOUN\r\nfrom scipy import spatial\r\nimport os\r\nimport sys\r\n\r\n\r\n\r\n\r\n\r\ndbc=''\r\ndatabase_name=''\r\n#natural_query=''\r\nquery=''\r\npath=os.path.dirname(os.path.realpath(sys.argv[0]))\r\nclass MainWindow(wx.Frame) :\r\n\t\r\n\tdef __init__(self,parent,id) :\r\n\t\twx.Frame.__init__(self,parent,id,'Natural Query To SQL Translator',size=(500,400))\r\n\t\tpanel = wx.Panel(self)\r\n\t\tpanel.SetBackgroundColour(wx.Colour(200,200,225))\r\n\t\tfont1 = wx.Font(30, wx.DEFAULT, wx.MODERN, wx.FONTWEIGHT_BOLD)\r\n\t\t#name_top = wx.StaticText(panel, -1, \"Natural Query To\", (42,50), (360,-1),wx.ALIGN_CENTER)\r\n\t\t#name_top.SetFont(font)\r\n\t\tname = wx.StaticText(panel, -1, \"Query Translator\", (59,50), (360,-1),wx.ALIGN_CENTER)\r\n name.SetFont(font1)\r\n\t\tfont2 = wx.Font(12, wx.DEFAULT, wx.MODERN, wx.FONTWEIGHT_BOLD)\r\n\t\tname.SetForegroundColour('blue')\r\n\t\ttranslator_button = wx.Button(panel,label=\"Translator\",pos=(160,200),size=(175,60))\r\n\t\ttranslator_button.SetBackgroundColour(wx.Colour(220,220,230))\r\n\t\ttranslator_button.SetFont(font2)\r\n\t\tself.Bind(wx.EVT_BUTTON, self.translating_window, translator_button)\r\n\t\t#self.Bind(wx.EVT_BUTTON, self.database_window, translator_button)\r\n\t\tstatusbar = self.CreateStatusBar()\r\n\t\tmenubar = wx.MenuBar()\r\n\t\tfirst = wx.Menu()\r\n\t\tsecond = wx.Menu()\r\n\t\tfirst.Append(wx.NewId(),\"New Window\",\"This is new window\")\r\n\t\tsecond.Append(wx.NewId(),\"Open...\",\"Open new window\")\r\n\t\tmenubar.Append(first,\"File\")\r\n\t\tmenubar.Append(second,\"Edit\")\r\n\t\tself.SetMenuBar(menubar)\r\n\r\n\r\n\t'''\r\n\tdef databse_window(self,event):\r\n\t\tdtbase_window = create_databse_window(parent=None,id=-1)\r\n\t\tdtbase_window.Show()\r\n\t'''\r\n\r\n\r\n\t\r\n\tdef translating_window(self,event):\r\n\t\ttranslate_window = create_translate_window(parent=None,id=-1)\r\n\t\ttranslate_window.Show()\r\n\t\r\n\r\n\r\n\r\n'''\r\nclass create_databse_window(wx.Frame) :\r\n\tdef __init__(self,parent,id) :\r\n\t\twx.Frame.__init__(self,parent,id,'Query Translator',size=(500,300))\r\n\t\tself.panel = wx.Panel(self)\r\n self.panel.SetBackgroundColour(wx.Colour(200,200,225))\r\n font = wx.Font(12, wx.DEFAULT, wx.DEFAULT, wx.FONTWEIGHT_NORMAL)\r\n\t\tself.database_name = wx.StaticText(self.panel, -3, \"Database Name\", (42,85), (360,-1))\r\n\t\tself.database_name.SetFont(font)\r\n\t\tself.database_name_text = wx.TextCtrl(self.panel, -1, \"\", pos=(200,75), size=(400,42))\r\n self.natural_query_text.SetInsertionPoint(0)\r\n'''\t\t\r\n\r\n\t \r\n\r\n\r\n\t\r\nclass create_translate_window(wx.Frame) :\r\n\tglobal dbc\r\n\tglobal database_name\r\n\tglobal natural_query\r\n\tglobal query\r\n\tdef __init__(self,parent,id) :\r\n\t\twx.Frame.__init__(self,parent,id,'Query Translator',size=(650,600))\r\n\t\tself.panel = wx.Panel(self)\r\n\t\tself.panel.SetBackgroundColour(wx.Colour(200,200,225))\r\n\t\tfont = wx.Font(12, wx.DEFAULT, wx.DEFAULT, wx.FONTWEIGHT_NORMAL)\r\n\t\tdatabase_connect_button = wx.Button(self.panel, label=\"Connect\", pos=(262,50), size=(120,40))\r\n database_connect_button.SetFont(font)\r\n\t\tself.Bind(wx.EVT_BUTTON, self.connect_database, database_connect_button)\r\n\t\tdatabase_button = wx.Button(self.panel, label=\"Select Database\", pos=(250,130), size=(150,40))\r\n\t\tdatabase_button.SetFont(font)\r\n\t\tself.Bind(wx.EVT_BUTTON, self.select_database, database_button)\r\n\t\tself.selected_dtname = wx.StaticText(self.panel, -3, \"Database\", (42,215), (360,-1))\r\n\t\tself.selected_dtname.SetFont(font)\r\n\t\tself.sel_dtname = wx.TextCtrl(self.panel, -1, pos=(207,210), size=(250,-1))\r\n self.sel_dtname.SetInsertionPoint(0)\r\n\t\tself.natural_query = wx.StaticText(self.panel, -3, \"English query\", (42,290), (360,-1))\r\n\t\tself.natural_query.SetFont(font)\r\n\t\tself.natural_query_text = wx.TextCtrl(self.panel, -1, pos=(185,280), size=(300,42), style=wx.TE_MULTILINE)\r\n\t\tself.natural_query_text.SetInsertionPoint(0)\r\n\t\tgenerate_button = wx.Button(self.panel, label=\"Generate\", pos=(265,360), size=(120,40))\r\n\t\tgenerate_button.SetFont(font)\r\n\t\tself.Bind(wx.EVT_BUTTON, self.generate_query, generate_button)\r\n\t\tself.sql_query = wx.StaticText(self.panel, -3, \"SQL query\", (42,450), (360,-1))\r\n\t\tself.sql_query.SetFont(font)\r\n\t\tself.sql_query_text = wx.TextCtrl(self.panel, -1, pos=(185,440), size=(300,42), style=wx.TE_MULTILINE)\r\n\t\tself.sql_query_text.SetInsertionPoint(0)\r\n\t\tresult_button = wx.Button(self.panel, label=\"Result\", pos=(265,519), size=(120,40))\r\n result_button.SetFont(font)\r\n\t\tself.Bind(wx.EVT_BUTTON, self.show_result, result_button)\r\n\t\t\r\n\r\n\r\n\r\n\r\n\r\n\tdef connect_database(self,event):\r\n\t\tglobal dbc\r\n\t\ttry:\r\n\t\t\tself.dbc=MySQLdb.connect(\"localhost\",\"root\",\"\")\r\n\t\t\tdbc=self.dbc\r\n\t\t\t#print dbc\r\n\t\t\tbox=wx.MessageDialog(None,\"Connection Established\",'Alert',wx.OK)\r\n\t\t\tans=box.ShowModal()\r\n \t\tbox.Destroy()\r\n\t\texcept:\r\n\t\t\tbox=wx.MessageDialog(None,\"Error occured while establishing connection\",'Alert',wx.OK)\r\n ans=box.ShowModal()\r\n box.Destroy()\r\n\r\n\r\n\r\n\t#def generate_query(self,event):\r\n\t#\tt=self.natural_query_text.GetValue()\r\n\t#\tprint t\r\n\r\n\r\n\r\n\tdef select_database(self,event):\r\n\t\t#lobal dbc\r\n\t\ttry:\r\n\t\t\ttemp=self.dtbase_window.GetSize()\r\n\t\texcept:\r\n\t\t\tself.dtbase_window = self.create_databse_window(parent=None,id=1)\r\n \tself.dtbase_window.Show()\r\n\t\t\tself.dtbase_window.Bind(wx.EVT_CLOSE,self.addDatabase,self.dtbase_window)\r\n\t\t\t#print dbc\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef addDatabase(self,event):\r\n\t\ttry:\r\n\t\t\tglobal database_name\r\n\t\t\t#print database_name\r\n\t\t\tself.dt_name=database_name\r\n\t\t\tself.sel_dtname.SetValue(self.dt_name)\r\n\t\t\tself.dtbase_window.Destroy()\r\n\t\texcept:\r\n\t\t\tself.dtbase_window.Destroy()\r\n\r\n\r\n\r\n\r\n\r\n\tdef generate_query(self,event):\r\n\t\tglobal query\r\n\t\tself.n_query_feature_file=[]\r\n\t\t#global natural_query\r\n\t\t#print \"hdgfhgf\"\r\n \tt=self.natural_query_text.GetValue()\r\n\t\tself.natural_queryy=t\r\n \t#print self.natural_queryy\r\n\t\tself.n_query_feature_file.append(feature(self.natural_queryy))\r\n\t\t#print self.n_query_feature_file\r\n\t\tfor f in self.n_query_feature_file:\r\n\t\t\tf.extract_feature()\r\n\t\t\tf.csv_file()\r\n\t\t\tf.mapping()\r\n\t\tprint \"query\"\r\n\t\tprint query\r\n\t\tself.queryy=query\r\n\t\tif len(self.queryy) != 0:\r\n\t\t\tself.sql_query_text.SetValue(self.queryy)\r\n\t\r\n\t\r\n\t\r\n\t\r\n\tdef show_result(self,event):\r\n\t\t#global query\r\n\t\ttry:\r\n temp=self.reslt_window.GetSize()\r\n except:\r\n self.reslt_window = self.create_result_window(parent=None,id=1)\r\n self.reslt_window.Show()\r\n #self.reslt_window.Bind(wx.EVT_CLOSE,self.addDatabase,self.dtbase_window)\r\n\r\n\t\t\r\n\t\t\t\r\n\r\n\r\n\t\r\n\t\r\n\r\n\r\n\r\n\tclass create_databse_window(wx.Frame):\r\n\t\tglobal dbc\r\n\t\tglobal database_name\r\n\t\tdef __init__(self,parent,id) :\r\n \twx.Frame.__init__(self,parent,id,'Select Database',size=(590,350))\r\n \tself.panel = wx.Panel(self)\r\n \tself.panel.SetBackgroundColour(wx.Colour(200,200,225))\r\n \tfont = wx.Font(12, wx.DEFAULT, wx.DEFAULT, wx.FONTWEIGHT_NORMAL)\r\n\t\t\tself.sel_dtbase = wx.StaticText(self.panel, -3, \"Select Database\", (42,100), (360,-1))\r\n \tself.sel_dtbase.SetFont(font)\r\n\t\t\tself.dt_choice=wx.Choice(self.panel,-1,pos=(190,95),size=(250,30))\r\n\t\t\tself.dt_choice.SetSelection(0)\r\n\t\t\trefresh_button = wx.Button(self.panel, label=\"Refresh\", pos=(450,95), size=(90,30))\r\n \trefresh_button.SetFont(font)\r\n\t\t\tself.Bind(wx.EVT_BUTTON, self.list_dt_base, refresh_button)\r\n\t\t\tselect_button = wx.Button(self.panel, label=\"Select\", pos=(250,200), size=(95,30))\r\n select_button.SetFont(font)\r\n\t\t\tself.Bind(wx.EVT_BUTTON, self.database_return, select_button)\r\n\t\t\t\r\n\t\t\t#t = self.dt_choice.GetSelection()\r\n\t\t\t#print t\r\n\t\t\t#print dbc\r\n\r\n\r\n\r\n\r\n\r\n\t\t\r\n\t\tdef list_dt_base(self,event):\r\n\t\t\tglobal dbc\r\n\t\t\tglobal database_name\r\n\t\t\t#try:\r\n\t\t\tself.list_dtnames=[]\r\n\t\t\tself.dbc=dbc\r\n\t\t\t#print dbc\r\n\t\t\tcursor=self.dbc.cursor()\r\n\t\t\tcursor.execute(\"SHOW DATABASES\")\r\n\t\t\tself.dt_names=cursor.fetchall()\r\n\t\t\t#print self.dt_names\r\n\t\t\tfor i in self.dt_names:\r\n\t\t\t#cursor.execute(\"DESC \"+i[0])\r\n\t\t\t\tname_t=i[0]\r\n\t\t\t\t#t=(i[0],det)\r\n\t\t\t\tself.list_dtnames.append(name_t)\r\n\t\t\t\t#self.dt_choice.SetItems(name_t)\r\n\t\t\t#database_name=self.list_dtnames\r\n\t\t\tself.dt_choice.SetItems(self.list_dtnames)\r\n\t\t\t#print self.list_dtnames\r\n\t\t\t#except:\r\n\t\t\t#\tbox=wx.MessageDialog(None,\"Error occured. Connect database\",'Alert',wx.OK)\r\n #\tans=box.ShowModal()\r\n #\tbox.Destroy()\r\n\r\n\r\n\r\n\r\n\r\n\t\tdef database_return(self,event):\r\n\t\t\ttry:\r\n\t\t\t\tglobal dbc\r\n\t\t\t\tglobal database_name\r\n\t\t\t\tself.dbc=dbc\r\n\t\t\t\tt = self.dt_choice.GetSelection()\r\n\t\t\t\t#print self.list_dtnames[t]\r\n\t\t\t\tcursor=self.dbc.cursor()\r\n \tcursor.execute(\"USE \"+self.list_dtnames[t])\r\n\t\t\t\tdt_choose=cursor.fetchall()\r\n\t\t\t\tprint dt_choose\r\n\t\t\t\tdatabase_name=self.list_dtnames[t]\r\n\t\t\t\t#self.sel_dtname.SetValue(database_name)\r\n\t\t\t\tself.Close()\r\n\t\t\texcept:\r\n\t\t\t\tbox=wx.MessageDialog(None,\"Database no longer exist. Hit the refresh button\",'Alert',wx.OK)\r\n \tans=box.ShowModal()\r\n \tbox.Destroy()\r\n\r\n\r\n\r\n\r\n\tclass create_result_window(wx.Frame):\r\n\t\tglobal dbc\r\n global database_name\r\n\t\tglobal query\r\n def __init__(self,parent,id) :\r\n\t\t\twx.Frame.__init__(self,parent,id,'Result',size=(500,600))\r\n self.panel = wx.Panel(self)\r\n self.panel.SetBackgroundColour(wx.Colour(200,200,225))\r\n font = wx.Font(12, wx.DEFAULT, wx.DEFAULT, wx.FONTWEIGHT_NORMAL)\r\n\t\t\tself.queryy=query\r\n\t\t\tself.dbc=dbc\r\n\t\t\tattribute_name=[]\r\n\t\t\tt=self.queryy.split(' ')\r\n\t\t\ttt=[]\r\n\t\t\tfor i in t:\r\n\t\t\t\ttt.append(i.split(','))\r\n\t\t\tprint tt\r\n\t\t\tfor i in range(len(tt)):\r\n\t\t\t\tif 'FROM' in tt[i]:\r\n\t\t\t\t\ts=i\r\n\t\t\t#s=tt.index('FROM')\r\n\t\t\t#if len(tt) > 0:\r\n\t\t\t#for i in range(len(tt)):\r\n\t\t\t#\tattribute_name.append(tt[i])\r\n\t\t\t#else:\r\n\t\t\tfor i in tt[1:s]:\r\n\t\t\t\tfor j in i:\r\n\t\t\t\t\tattribute_name.append(j)\r\n\t\t\tif '*' in attribute_name:\r\n\t\t\t\tcursor=self.dbc.cursor()\r\n\t\t\t\tcursor.execute(\"DESC \"+tt[s+1][0])\r\n\t\t\t\tdet=cursor.fetchall()\r\n\t\t\t\tattribute_name=[]\r\n\t\t\t\tfor i in range(len(det)):\r\n\t\t\t\t\tattribute_name.append(det[i][0])\r\n\t\t\t\r\n\t\t\t#try:\r\n\t\t\tcursor=self.dbc.cursor()\r\n\t\t\tcursor.execute(self.queryy)\r\n\t\t\tresult=cursor.fetchall()\r\n\t\t\tprint result\r\n\t\t\tn_rows=len(result)\r\n\t\t\tn_cols=len(result[0])\r\n\t\t\ttable=gr.Grid(self.panel, -1, size=(500,600))\r\n\t\t\t#print attribute_name\r\n\t\t\ttable.CreateGrid(n_rows,n_cols)\r\n\t\t\tfor i in range(len(attribute_name)):\r\n\t\t\t\ttable.SetColLabelValue(i,attribute_name[i])\r\n\r\n\t\t\tfor i in range(len(result)):\r\n\t\t\t\tfor j in range(len(result[i])):\r\n\t\t\t\t\ttable.SetCellValue(i,j,str(result[i][j]))\r\n\t\t\t#except:\r\n\t\t\t#print \"grid error\"\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass feature():\r\n\tglobal dbc\r\n\tglobal database_name\r\n\tglobal query\r\n\tdef __init__(self,query):\r\n\t\tself.natural_query=query\r\n\t\tself.token=nltk.tokenize.word_tokenize(self.natural_query)\r\n\t\tprint self.token\r\n\r\n\r\n\r\n\t\r\n\r\n\r\n\tdef extract_feature(self):\r\n\t\tglobal query\r\n\t\tself.natural_query_features=[]\r\n\t\tself.list1=self.token\r\n\r\n\r\n\t\t#Removing punctuations\r\n\t\tremov_p=[]\r\n\t\tfor i in self.list1:\r\n\t\t\tif i in punctuation:\r\n\t\t\t\tremov_p.append(self.list1.index(i))\r\n\t\tremov_p.reverse()\r\n\t\tfor j in remov_p[:]:\r\n\t\t\t#print j\r\n\t\t\tdel(self.list1[j])\r\n\t\t#print self.list2\r\n\t\tself.featuress=self.list1\r\n\t\t#print self.featuress\r\n\r\n\t\t\r\n\t\t#word co-occurrence matrix\r\n\t\tself.occurr=[]\r\n\t\tself.words=[]\r\n\t\tself.list2=self.featuress\r\n\t\tfor i in self.list2:\r\n\t\t\tif i not in self.words:\r\n\t\t\t\tself.words.append(i)\r\n\t\t\r\n\t\tw=5\r\n\t\t#self.list4 = self.list3\r\n\t\tself.occurr_val=[]\r\n\t\tfor i in range(len(self.list2)):\r\n\t\t\tself.occurr=[0 for x in range(len(self.words)+1)]\r\n\t\t\tself.occurr[0]=self.list2[i]\r\n\t\t\tj=i\r\n\t\t\t#while (j+w) <= (len(self.list1)-1):\r\n\t\t\tif (j+w+1) <= (len(self.list2)-1):\r\n\t\t\t\tj=j+w+1\r\n\t\t\telse:\r\n\t\t\t\tj=len(self.list2)\r\n\t\t\t#print \"j\"\r\n\t\t\t#print j\r\n\t\t\t\r\n\t\t\tfor k in range(i+1,j):\r\n\t\t\t\t#print \"i\"\r\n\t\t\t\t#print i\r\n\t\t\t\t#self.occurr_val.append(self.occurr)\r\n\t\t\t\tself.word=self.list2[k]\r\n\t\t\t\ttry:\r\n\t\t\t\t\tfor p in range(len(self.words)):\r\n\t\t\t\t\t\tif self.words[p] == self.list2[i]:\r\n\t\t\t\t\t\t\tind_row_word=p\r\n\r\n\t\t\t\t\tif self.list2[k] == self.list2[i]:\r\n\t \tocc=w-(k-i-1)\r\n \t\t \tind=self.words.index(self.word)\r\n \t \t#self.occurr[ind]+=occ\r\n\t\t\t\t\t\tself.occurr_val[ind_row_word][ind+1]+=occ\r\n\t\t\t\t\telse:\r\n \tocc=w-(k-i-1)\r\n \t#print k\r\n \t\tind=self.words.index(self.word)\r\n \t\t#self.occurr[ind]+=occ\r\n\t\t\t\t\t\tself.occurr_val[ind_row_word][ind+1]+=occ\r\n\t\t\t\t\r\n\t\t\t\texcept:\r\n\t\t\t\t\tif self.list2[k] == self.list2[i]:\r\n\t\t\t\t\t\tocc=w-(k-i-1)\r\n\t\t\t\t\t\tind=self.words.index(self.word)\r\n\t\t\t\t\t\tself.occurr[ind+1]+=occ\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\t\t#if k+1 > j-1:\r\n\t\t\t\t\t#\tself.occurr[k]+=0\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tocc=w-(k-i-1)\r\n\t\t\t\t\t\t#print k\r\n\t\t\t\t\t\tind=self.words.index(self.word)\r\n\t\t\t\t\t\tself.occurr[ind+1]+=occ\r\n\t\t\t\t\t\t#self.occurr_val.append(self.occurr)\r\n\t\t\t\t\t\t#print self.words[i]\r\n\t\t\t\r\n\t\t\tif len(self.occurr_val) != len(self.words):\r\n\t\t\t\tself.occurr_val.append(self.occurr)\r\n\t\t\t\r\n\t\t\tprint self.occurr_val\r\n\r\n\t\t\t\r\n\t\t#Postagging\r\n\t\tself.list3=self.featuress\r\n\t\ttagged_string=nltk.pos_tag(self.list3)\r\n\t\tself.featuress=tagged_string\r\n print self.featuress\r\n\r\n\r\n\t\t#Noun clause extracting\r\n\t\tself.noun_clause_list=[]\r\n\t\tself.list4=self.featuress\r\n\t\tfor i in range(len(self.list4)):\r\n\t\t\tif self.list4[i][1] == 'NN' or self.list4[i][1] == 'NNS':\r\n\t\t\t\tself.noun_clause_list.append(self.list4[i][0])\r\n\t\tprint self.noun_clause_list\r\n\r\n\t\t'''\r\n\t\t#Removing stopwords\r\n\t\tself.list5=self.featuress\r\n remov_s=[]\r\n stop_words = set(stopwords.words('english'))\r\n for i in range(len(self.list5)):\r\n if self.list5[i][0] in stop_words:\r\n remov_s.append(i)\r\n\t\t#print remov_s\r\n remov_s.reverse()\r\n\t\t#print remov_s\r\n for j in range(len(remov_s)):\r\n #print self.list4\r\n\t\t\tt=remov_s[j]\r\n\t\t\t#print self.list5[t]\r\n del(self.list5[t])\r\n #print self.list5\r\n self.featuress=self.list5\r\n\t\tprint self.featuress\r\n\t\t'''\r\n\t\t\r\n\t\t#Finding Cosine-similarity of noun-pro noun\r\n\t\tself.list6=self.featuress\r\n\t\tself.list7=self.occurr_val\r\n\t\tself.list_pro_noun=[]\r\n\t\t#self.temp_occ_val=[]\r\n\t\tself.occ_values_n_p=[]\r\n\t\tfor i in range(len(self.list6)):\r\n\t\t\tself.list_noun=[]\r\n\t\t\tif self.list6[i][1] == 'NN' or self.list6[i][1] == 'NNS':\r\n\t\t\t\tind=self.words.index(self.list6[i][0])\r\n\t\t\t\tfor j in self.list7[ind][1:]:\r\n\t\t\t\t\tself.list_noun.append(j)\t\r\n\t\t\t\tfor k in self.list7:\r\n\t\t\t\t\tself.list_noun.append(k[ind+1])\r\n\t\t\t\t#print self.list_noun\r\n\t\t\t\tfor j in range(i+1,len(self.list6)):\r\n\t\t\t\t\tself.temp_occ_val=[]\r\n\t\t\t\t#self.list_pro_noun=[]\r\n\t\t\t\t\tif self.list6[j][1] == 'NNP':\r\n\t\t\t\t\t\tind1=self.words.index(self.list6[j][0])\r\n\t\t\t\t\t\tfor l in self.list7[ind1][1:]:\r\n\t\t\t\t\t\t\tself.list_pro_noun.append(l)\r\n\t\t\t\t\t\tfor m in self.list7:\r\n\t\t\t\t\t\t\tself.list_pro_noun.append(m[ind1+1])\r\n\t\t\t\t\t\t#print self.list_pro_noun\r\n\t\t\t\t\t\t#self.list_pro_noun=[]\r\n\t\t\t\t\t\tocc_value=1-spatial.distance.cosine(self.list_noun,self.list_pro_noun)\r\n\t\t\t\t\t\tself.temp_occ_val.append(self.list6[i][0])\r\n\t\t\t\t\t\tself.temp_occ_val.append(self.list6[j][0])\r\n\t\t\t\t\t\tself.temp_occ_val.append(occ_value)\r\n\t\t\t\t\t\t#print occ_value\r\n\t\t\t\t\t\tself.list_pro_noun=[]\r\n\t\t\t\t\tself.occ_values_n_p.append(self.temp_occ_val)\r\n\t\tself.occ_values_n_p.sort()\r\n\t\t#Remove empty lists\r\n\t\tdel_list=[]\r\n\t\t#self.occ_values_n_p\r\n\t\tfor i in range(len(self.occ_values_n_p)):\r\n\t\t\tif len(self.occ_values_n_p[i]) == 0:\r\n\t\t\t\tdel_list.append(i)\r\n\t\tdel_list.reverse()\r\n\t\tfor j in del_list[:]:\r\n\t\t\tprint del_list\r\n\t\t\tdel(self.occ_values_n_p[j])\r\n\t\t#self.occ_values_n_p.sort(reverse=True)\r\n\t\t#Sorting the list\r\n sort_t=[]\r\n sort_tt=self.occ_values_n_p\r\n self.occ_values_n_p=[]\r\n for i in sort_tt:\r\n sort_t.append(i[2])\r\n sort_t.sort(reverse=True)\r\n for i in sort_t:\r\n for j in sort_tt:\r\n if i == j[2]:\r\n self.occ_values_n_p.append(j)\r\n\r\n\t\tprint self.occ_values_n_p\r\n\r\n\r\n\t\t#Finding cosine similarity of verb-noun\r\n\t\tself.list8=self.featuress\r\n self.list9=self.occurr_val\r\n #self.list_noun=[]\r\n self.list_noun1=[]\r\n #self.temp_occ_val=[]\r\n self.occ_values_v_n=[]\r\n for i in range(len(self.list8)):\r\n\t\t\tself.list_verb=[]\r\n if self.list8[i][1] == 'VB' or self.list8[i][1] == 'VBP':\r\n ind=self.words.index(self.list8[i][0])\r\n for j in self.list9[ind][1:]:\r\n self.list_verb.append(j)\r\n for k in self.list9:\r\n self.list_verb.append(k[ind+1])\r\n #print self.list_verb\r\n for j in range(i+1,len(self.list8)):\r\n self.temp_occ_val=[]\r\n #self.list_pro_noun=[]\r\n\t\t\t\t\tif self.list8[j][1] == 'NN' or self.list8[j][1]=='NNS' or self.list8[j][1]=='NNP':\r\n ind1=self.words.index(self.list8[j][0])\r\n for l in self.list9[ind1][1:]:\r\n self.list_noun1.append(l)\r\n for m in self.list9:\r\n self.list_noun1.append(m[ind1+1])\r\n #print self.list_noun1\r\n #self.list_pro_noun=[]\r\n occ_value=1-spatial.distance.cosine(self.list_verb,self.list_noun1)\r\n self.temp_occ_val.append(self.list8[i][0])\r\n self.temp_occ_val.append(self.list8[j][0])\r\n self.temp_occ_val.append(occ_value)\r\n #print self.temp_occ_val\r\n self.list_noun1=[]\r\n \tself.occ_values_v_n.append(self.temp_occ_val)\r\n\t\tself.occ_values_v_n.sort()\r\n\t\t#Remove empty lists\r\n del_list=[]\r\n for i in range(len(self.occ_values_v_n)):\r\n if len(self.occ_values_v_n[i]) == 0:\r\n del_list.append(i)\r\n\t\tdel_list.reverse()\r\n for j in del_list:\r\n \tdel(self.occ_values_v_n[j])\r\n\r\n\t\t#self.occ_values_v_n.sort(reverse=True)\r\n\t\t#Sorting the list\r\n sort_t=[]\r\n sort_tt=self.occ_values_v_n\r\n self.occ_values_v_n=[]\r\n for i in sort_tt:\r\n sort_t.append(i[2])\r\n sort_t.sort(reverse=True)\r\n for i in sort_t:\r\n for j in sort_tt:\r\n if i == j[2]:\r\n self.occ_values_v_n.append(j)\r\n\r\n print self.occ_values_v_n\r\n\r\n\r\n\t\t#Finding cosine-similarity of noun-number\r\n\t\tself.list10=self.featuress\r\n self.list11=self.occurr_val\r\n #self.list_noun=[]\r\n self.list_number=[]\r\n #self.temp_occ_val=[]\r\n self.occ_values_n_num=[]\r\n for i in range(len(self.list10)):\r\n\t\t\tself.list_noun2=[]\r\n if self.list10[i][1] == 'NN' or self.list10[i][1] == 'NNS':\r\n ind=self.words.index(self.list10[i][0])\r\n for j in self.list11[ind][1:]:\r\n self.list_noun2.append(j)\r\n for k in self.list11:\r\n self.list_noun2.append(k[ind+1])\r\n #print self.list_noun\r\n for j in range(i+1,len(self.list10)):\r\n self.temp_occ_val=[]\r\n #self.list_pro_noun=[]\r\n\t\t\t\t\tif self.list10[j][1] == 'CD':\r\n ind1=self.words.index(self.list10[j][0])\r\n for l in self.list11[ind1][1:]:\r\n self.list_number.append(l)\r\n for m in self.list11:\r\n self.list_number.append(m[ind1+1])\r\n #print self.list_pro_noun\r\n #self.list_pro_noun=[]\r\n occ_value=1-spatial.distance.cosine(self.list_noun2,self.list_number)\r\n self.temp_occ_val.append(self.list10[i][0])\r\n self.temp_occ_val.append(self.list10[j][0])\r\n self.temp_occ_val.append(occ_value)\r\n #print occ_value\r\n self.list_number=[]\r\n self.occ_values_n_num.append(self.temp_occ_val)\r\n\t\tself.occ_values_n_num.sort()\r\n\t\t#Remove empty lists\r\n del_list=[]\r\n for i in range(len(self.occ_values_n_num)):\r\n if len(self.occ_values_n_num[i]) == 0:\r\n del_list.append(i)\r\n\t\tdel_list.reverse()\r\n for j in del_list:\r\n \tdel(self.occ_values_n_num[j])\r\n\r\n\t\t#self.occ_values_n_num.sort(reverse=True)\r\n\t\t#Sorting the list\r\n sort_t=[]\r\n sort_tt=self.occ_values_n_num\r\n self.occ_values_n_num=[]\r\n for i in sort_tt:\r\n sort_t.append(i[2])\r\n sort_t.sort(reverse=True)\r\n for i in sort_t:\r\n for j in sort_tt:\r\n if i == j[2]:\r\n self.occ_values_n_num.append(j)\r\n\r\n print self.occ_values_n_num\r\n\t\t\r\n\t\t#Find cosine-similarity of noun-noun\r\n\t\tself.list12=self.featuress\r\n self.list13=self.occurr_val\r\n #self.list_nounN=[]\r\n self.list_nounn=[]\r\n #self.temp_occ_val=[]\r\n self.occ_values_n_n=[]\r\n for i in range(len(self.list12)):\r\n self.list_noun3=[]\r\n if self.list12[i][1] == 'NN' or self.list12[i][1] == 'NNS':\r\n ind=self.words.index(self.list12[i][0])\r\n for j in self.list13[ind][1:]:\r\n self.list_noun3.append(j)\r\n for k in self.list13:\r\n self.list_noun3.append(k[ind+1])\r\n #print self.list_noun\r\n for j in range(i+1,len(self.list12)):\r\n self.temp_occ_val=[]\r\n #self.list_pro_noun=[]\r\n if self.list12[j][1] == 'NN' or self.list12[j][1] == 'NNS':\r\n ind1=self.words.index(self.list12[j][0])\r\n for l in self.list13[ind1][1:]:\r\n self.list_nounn.append(l)\r\n for m in self.list13:\r\n self.list_nounn.append(m[ind1+1])\r\n\t\t\t\t\t\tocc_value=1-spatial.distance.cosine(self.list_noun3,self.list_nounn)\r\n self.temp_occ_val.append(self.list12[i][0])\r\n self.temp_occ_val.append(self.list12[j][0])\r\n self.temp_occ_val.append(occ_value)\r\n #print self.temp_occ_val\r\n self.list_nounn=[]\r\n self.occ_values_n_n.append(self.temp_occ_val)\r\n\t\t\r\n\t\tself.occ_values_n_n.sort()\r\n #Remove empty lists\r\n del_list=[]\r\n for i in range(len(self.occ_values_n_n)):\r\n if len(self.occ_values_n_n[i]) == 0:\r\n del_list.append(i)\r\n del_list.reverse()\r\n for j in del_list:\r\n \tdel(self.occ_values_n_n[j])\r\n\r\n\t\t#self.occ_values_n_n.sort(reverse=True)\r\n\t\t#Sorting the list\r\n sort_t=[]\r\n sort_tt=self.occ_values_n_n\r\n self.occ_values_n_n=[]\r\n for i in sort_tt:\r\n sort_t.append(i[2])\r\n sort_t.sort(reverse=True)\r\n for i in sort_t:\r\n for j in sort_tt:\r\n if i == j[2]:\r\n self.occ_values_n_n.append(j)\r\n\r\n print self.occ_values_n_n\r\n\r\n\t\t#Find cosine values of wh-noun\r\n\t\tself.list15=self.featuress\r\n self.list16=self.occurr_val\r\n\t\tself.list_Noun=[]\r\n #self.temp_occ_val=[]\r\n self.occ_values_w_n=[]\r\n\t\tfor i in range(len(self.list15)):\r\n self.list_wh=[]\r\n if self.list15[i][1] == 'WDT' or self.list15[i][1] == 'WP' or self.list15[i][1] == 'WP$' or self.list15[i][1] == 'WRB':\r\n ind=self.words.index(self.list15[i][0])\r\n for j in self.list16[ind][1:]:\r\n self.list_wh.append(j)\r\n for k in self.list16:\r\n self.list_wh.append(k[ind+1])\r\n #print self.list_noun\r\n\t\t\t\tfor j in range(i+1,len(self.list15)):\r\n self.temp_occ_val=[]\r\n #self.list_pro_noun=[]\r\n if self.list15[j][1] == 'NN' or self.list15[j][1] == 'NNS' or self.list15[j][1] == 'NNP':\r\n ind1=self.words.index(self.list15[j][0])\r\n for l in self.list16[ind1][1:]:\r\n self.list_Noun.append(l)\r\n for m in self.list16:\r\n self.list_Noun.append(m[ind1+1])\r\n\t\t\t\t\t\tocc_value=1-spatial.distance.cosine(self.list_wh,self.list_Noun)\r\n self.temp_occ_val.append(self.list15[i][0])\r\n self.temp_occ_val.append(self.list15[j][0])\r\n self.temp_occ_val.append(occ_value)\r\n #print self.temp_occ_val\r\n self.list_Noun=[]\r\n self.occ_values_w_n.append(self.temp_occ_val)\r\n\t\tself.occ_values_w_n.sort()\r\n #Remove empty lists\r\n del_list=[]\r\n for i in range(len(self.occ_values_w_n)):\r\n if len(self.occ_values_w_n[i]) == 0:\r\n del_list.append(i)\r\n del_list.reverse()\r\n for j in del_list:\r\n del(self.occ_values_w_n[j])\r\n\t\t#self.occ_values_w_n.sort(reverse=True)\r\n #print self.occ_values_w_n\r\n\t\t\r\n\t\t#Sorting the list\r\n\t\tsort_t=[]\r\n\t\tsort_tt=self.occ_values_w_n\r\n\t\tself.occ_values_w_n=[]\r\n\t\tfor i in sort_tt:\r\n\t\t\tsort_t.append(i[2])\r\n\t\tsort_t.sort(reverse=True)\r\n\t\tfor i in sort_t:\r\n\t\t\tfor j in sort_tt:\r\n\t\t\t\tif i == j[2]:\r\n\t\t\t\t\tself.occ_values_w_n.append(j)\r\n\t\tprint self.occ_values_w_n\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\r\n\t\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\r\n\tdef mapping(self):\r\n\t\tglobal dbc\r\n\t\tglobal database_name\r\n\t\tglobal query\r\n\t\tself.dbc=dbc\r\n\t\tself.table_names=[]\r\n\t\tname_synonyms=[]\r\n\t\tsyn_set=[]\r\n\t\tsyn_set_noun_t=[]\r\n\t\tself.extract_table_name=[]\r\n\t\tself.table_names_t=[]\r\n\t\tsyn_set_table_t=[]\r\n\t\tself.lower_noun=[]\r\n\t\tsyn_set_table=[]\r\n\t\tself.maped_table_names=[]\r\n\t\tself.query=[]\r\n\t\tself.select_clause='SELECT'\r\n\t\tself.from_clause='FROM'\r\n\t\tself.where_clause=''\r\n\t\tself.nouns=self.noun_clause_list\r\n\t\tcursor=self.dbc.cursor()\r\n cursor.execute(\"SHOW TABLES\")\r\n table_name=cursor.fetchall()\r\n\t\t#print table_name\r\n\r\n\t\t#Finding table names\r\n\t\tfor i in range(len(table_name)):\r\n\t\t\tself.table_names.append(table_name[i][0])\r\n\t\tprint self.table_names\r\n\t\t\r\n\t\ttable_det=[]\r\n\t\tcursor=self.dbc.cursor()\r\n\t\tfor i in range(len(self.table_names)):\r\n\t\t\tcursor.execute(\"DESC \"+self.table_names[i])\r\n\t\t\tdet=cursor.fetchall()\r\n\t\t\tt=(self.table_names[i],det)\r\n\t\t\ttable_det.append(t)\r\n\t\tprint table_det\r\n\t\t'''\r\n\t\t#Finding synonyms and tables\r\n\t\tfor i in range(len(self.nouns)):\r\n\t\t\tif self.nouns[i] not in self.table_names:\r\n\t\t\t\tsyns=w.synsets(self.nouns[i])\r\n\t\t\t\t#print syns\r\n\t\t\t\t#print syns[0].name()\r\n\t\t\t\tfor j in syns:\r\n\t\t\t\t\tsyn_set=list(chain.from_iterable([j.lemma_names()]))\r\n\t\t\t\t#print syn_set\r\n\t\t\t\tfor k in range(len(syn_set)):\r\n\t\t\t\t\tif syn_set[k] in self.table_names:\r\n\t\t\t\t\t\tself.extract_table_name.append(syn_set[k])\r\n\t\t\t\t\t\t#print \"found\"\r\n\t\t'''\r\n\t\t#Converting to lower case\r\n\t\tfor i in range(len(self.table_names)):\r\n\t\t\tl_name=self.table_names[i].lower()\r\n\t\t\tself.table_names_t.append(l_name)\r\n\t\t\r\n\t\tfor j in range(len(self.nouns)):\r\n l_noun=self.nouns[j].lower()\r\n self.lower_noun.append(l_noun)\r\n\r\n\t\tfor i in range(len(self.table_names_t)):\r\n\t\t\tsyns_table=w.synsets(self.table_names_t[i],NOUN)\r\n\t\t\tsyn_set_table_t=[]\r\n\t\t\tfor j in syns_table:\r\n \tsyn_set_table_t.append(list(chain.from_iterable([j.lemma_names()])))\r\n\t\t\tsyn_set_table.append(syn_set_table_t)\r\n\t\t#print syn_set_table\r\n\t\t\t\r\n\r\n\t\t#print self.table_names_t\r\n\t\t#Finding synonyms and tables\r\n for i in range(len(self.lower_noun)):\r\n\t\t\t#lower_case_name=self.noun[i].lower()\r\n if self.lower_noun[i] not in self.table_names_t:\r\n syns_noun=w.synsets(self.nouns[i],NOUN)\r\n #print syns\r\n #print syns[0].name()\r\n for j in syns_noun:\r\n syn_set_noun=list(chain.from_iterable([j.lemma_names()]))\r\n \tprint syn_set_noun\r\n \tfor k in range(len(syn_set_noun)):\r\n\t\t\t\t\t\tfor l in range(len(syn_set_table)):\r\n\t\t\t\t\t\t\tfor m in range(len(syn_set_table[l])):\r\n \t\t\tif syn_set_noun[k] in syn_set_table[l][m]:\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\tself.noun_table=self.lower_noun[i]\r\n\t\t\t\t\t\t\t\t\t\tself.extract_table_name.append(self.table_names[l])\r\n\t\t\t\t\t\t\t\t\t\t#print self.table_names[l]\r\n\t\t\t\t\t\t\t#print self.extract_table_name\r\n \t \t\t\t#print \"found\"\r\n\t\t\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\telse:\r\n\t\t\t\tself.noun_table=self.lower_noun[i]\r\n\t\t\t\tind=self.table_names_t.index(self.lower_noun[i])\r\n\t\t\t\tself.extract_table_name.append(self.table_names[ind])\r\n\t\t#print self.extract_table_name\r\n\t\tfor i in self.extract_table_name:\r\n\t\t\tif i not in self.maped_table_names:\r\n\t\t\t\tself.maped_table_names.append(i)\r\n\t\t#print self.maped_table_names\r\n\t\t#print self.noun_table\r\n\t\t\r\n\t\t#Attribute mapping\r\n\t\tsyn_set_attribute=[]\r\n\t\ttable_attr=[]\r\n\t\tself.extract_table_attr=[]\r\n\t\tself.mapped_attr=[]\r\n\t\tself.list14=[]\r\n\t\tself.from_clause+=' '\r\n self.from_clause+=self.maped_table_names[0]\r\n\t\tif len(self.maped_table_names) == 1:\r\n\t\t\t'''\r\n\t\t\tself.list14=self.featuress\r\n\t\t\tfor i in range(len(self.list14)):\r\n\t\t\t\tif self.list14[i][1] == 'WDT' or self.list14[i][1] == 'WP' or self.list14[i][1] == 'WP$' or self.list14[i][1] == 'WRB':\r\n\t\t\t\t\tattribute_name=self.occ_values_w_n[0][1]\r\n\t\t\t\t\tfor i in table_det:\r\n \tif i[0] == self.maped_table_names[0]:\r\n \t \tfor j in i[1]:\r\n \t\ttable_attr.append(j[0])\r\n \t#print table_attr\r\n syns_attribute=w.synsets(j[0],NOUN)\r\n \t syn_set_attribute_t=[]\r\n for k in syns_attribute:\r\n \t\t\tsyn_set_attribute_t.append(list(chain.from_iterable([k.lemma_names()])))\r\n \tsyn_set_attribute.append(syn_set_attribute_t)\r\n #print syn_set_attribute\r\n \tattr_l=attribute_name.lower()\r\n if attr_l not in table_attr:\r\n \tsyns_attr=w.synsets(attr_l,NOUN)\r\n for k in syns_attr:\r\n syn_set_attr=list(chain.from_iterable([k.lemma_names()]))\r\n #print syn_set_attr\r\n\t\t\t\t\t\t\t\tfor l in range(len(syn_set_attr)):\r\n \tfor m in range(len(syn_set_attribute)):\r\n \tfor n in range(len(syn_set_attribute[m])):\r\n \t#print syn_set_attr[l]\r\n #print syn_set_attribute[m][n]\r\n if syn_set_attr[l] in syn_set_attribute[m][n]:\r\n \t#print syn_set_attribute[m][n]\r\n #print m\r\n try:\r\n \tself.extract_table_attr.append(table_attr[m])\r\n \texcept:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\tfor i in self.extract_table_attr:\r\n \tif i not in self.mapped_attr:\r\n \tself.mapped_attr.append(i)\r\n print self.mapped_attr\r\n\t\t\t\t\tself.where_clause+=' '\r\n self.where_clause+=self.mapped_attr[0]\r\n\t\t\t\t\tself.where_clause+='='\r\n\t\t\t\t\tself.where_clause=self.where_clause+\"'\"+self.list14[i+1][1]+\"'\"\r\n\t\t\t'''\t\r\n\r\n\t\r\n\t\t\t#attribute_name=self.occ_values_v_n[0][1]\r\n #self.select_clause+=self.self.occ_values_v_n[0]\r\n #print attribute_name\r\n\t\t\t\r\n\t\t\t#self.from_clause+=' '\r\n\t\t\t#self.from_clause+=self.maped_table_names[0]\r\n\t\t\t#Converting to lower case\r\n\t\t\ttry:\r\n\t\t\t\tself.list14=self.featuress\r\n \tfor wh in range(len(self.list14)):\r\n \tif self.list14[wh][1] == 'WDT' or self.list14[wh][1] == 'WP' or self.list14[wh][1] == 'WP$' or self.list14[wh][1] == 'WRB':\r\n\t\t\t\t\t\tself.where_clause+='WHERE'\r\n \tattribute_name=self.occ_values_w_n[0][1]\r\n\t\t\t\t\t\tprint \"attribute_name\"\r\n\t\t\t\t\t\tprint attribute_name\r\n \tfor i in table_det:\r\n \tif i[0] == self.maped_table_names[0]:\r\n \tfor j in i[1]:\r\n \ttable_attr.append(j[0])\r\n \t#print table_attr\r\n \tsyns_attribute=w.synsets(j[0],NOUN)\r\n \tsyn_set_attribute_t=[]\r\n \t\tfor k in syns_attribute:\r\n \t\tsyn_set_attribute_t.append(list(chain.from_iterable([k.lemma_names()])))\r\n \tsyn_set_attribute.append(syn_set_attribute_t)\r\n \tprint syn_set_attribute\r\n \tattr_l=attribute_name.lower()\r\n \tif attr_l not in table_attr:\r\n \tsyns_attr=w.synsets(attr_l,NOUN)\r\n \tfor k in syns_attr:\r\n \t\tsyn_set_attr=list(chain.from_iterable([k.lemma_names()]))\r\n \tprint syn_set_attr\r\n \t\tfor l in range(len(syn_set_attr)):\r\n \tfor m in range(len(syn_set_attribute)):\r\n \tfor n in range(len(syn_set_attribute[m])):\r\n \t#print syn_set_attr[l]\r\n \t#print syn_set_attribute[m][n]\r\n \tif syn_set_attr[l] in syn_set_attribute[m][n]:\r\n \t#print syn_set_attribute[m][n]\r\n \t#print m\r\n \ttry:\r\n \tself.extract_table_attr.append(table_attr[m])\r\n \texcept:\r\n \tpass\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\tfor i in self.extract_table_attr:\r\n\t\t\t\t\t\t\t#print i\r\n\t\t\t\t\t\t\t#print self.mapped_attr\r\n \tif i not in self.mapped_attr:\r\n \tself.mapped_attr.append(i)\r\n\t\t\t\t\t\t\t\t#print \"i\"\r\n\t\t\t\t\t\t\t\t#print i\r\n\t\t\t\t\t\t\t#print \"self.mapped_attr\"\r\n \t\t#print self.mapped_attr\r\n\t\t\t\t\t\tprint \"list\"\r\n\t\t\t\t\t\tprint self.list14\r\n\t\t\t\t\t\tocc_val_temp=0\r\n\t\t\t\t\t\tfor val in self.occ_values_n_n:\r\n\t\t\t\t\t\t\t#print \"333333\"\r\n\t\t\t\t\t\t\tif val[0] == self.occ_values_w_n[0][1]:\r\n\t\t\t\t\t\t\t\tif val[2] >= occ_val_temp:\r\n\t\t\t\t\t\t\t\t\tocc_val_temp=val[2]\r\n\t\t\t\t\t\t\t\t\tval_temp=val[1]\r\n\t\t\t\t\t\t#print val_temp\r\n #print \"val_temp\"\r\n\t\t\t\t\t\tfor val in self.occ_values_n_num:\r\n\t\t\t\t\t\t\t#print \"333333\"\r\n\t\t\t\t\t\t\tif val[0] == self.occ_values_w_n[0][1]:\r\n\t\t\t\t\t\t\t\tif val[2] >= occ_val_temp:\r\n\t\t\t\t\t\t\t\t\tocc_val_temp=val[2]\r\n\t\t\t\t\t\t\t\t\tval_temp=val[1]\r\n\t\t\t\t\t\t#print val_temp\r\n #print \"val_temp\"\r\n\r\n\r\n\r\n\t\t\t\t\t\tfor val in self.occ_values_n_p:\r\n\t\t\t\t\t\t\t#print \"333333\"\r\n\t\t\t\t\t\t\tif val[0] == self.occ_values_w_n[0][1]:\r\n\t\t\t\t\t\t\t\tif val[2] >= occ_val_temp:\r\n occ_val_temp=val[2]\r\n val_temp=val[1]\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\tprint val_temp\r\n\t\t\t\t\t\tprint \"val_temp\"\r\n\t\t\t\t\t\tprint self.mapped_attr[0]\r\n\t\t\t\t\t\tif not self.mapped_attr:\r\n\t\t\t\t\t\t\tbox=wx.MessageDialog(None,\"Invalid Attribute name\",'Alert',wx.OK)\r\n \t\t\t\tans=box.ShowModal()\r\n \t\t\t\tbox.Destroy()\r\n\t\t\t\t\t\telse:\r\n \t\tself.where_clause+=' '\r\n \t\tself.where_clause+=self.mapped_attr[0]\r\n\t\t\t\t\t\t#print \"mapped_attr\"\t\r\n\t\t\t\t\t\t#print self.mapped_attr\r\n\t\t\t\t\t\t#print self.where_clause\r\n \t#self.where_clause+='='\r\n\t\t\t\t\t\tprint self.list14[wh+3][0]\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t#Finding where clause condition\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tsyn_set_con_t=[]\r\n\t\t\t\t\t\t\tsyn_set_con_q=[]\r\n\t\t\t\t\t\t\tsyn_set_con_q_g=[]\r\n\t\t\t\t\t\t\tsyns_con=w.synsets(self.list14[wh+3][0])\r\n\r\n\r\n for c in syns_con:\r\n \tsyn_set_con_t=list(chain.from_iterable([c.lemma_names()]))\r\n\t\t\t\t\t\t\tprint syn_set_con_t\r\n\r\n\r\n\t\t\t\t\t\t\tsyns_q_con=w.synsets('lesser')\r\n\t\t\t\t\t\t\tfor c in syns_q_con:\r\n\t\t\t\t\t\t\t\tsyn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))\r\n\r\n\r\n\t\t\t\t\t\t\tsyns_q_con=w.synsets('below')\r\n\t\t\t\t\t\t\tfor c in syns_q_con:\r\n \t syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))\r\n\r\n\r\n\t\t\t\t\t\t\tsyns_q_con=w.synsets('lower')\r\n\t\t\t\t\t\t\tfor c in syns_q_con:\r\n \t syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))\r\n\r\n\r\n\t\t\t\t\t\t\tsyns_q_con=w.synsets('fewer')\r\n\t\t\t\t\t\t\tfor c in syns_q_con:\r\n \t syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))\r\n\r\n\r\n\t\t\t\t\t\t\tsyns_q_con=w.synsets('smaller')\r\n\t\t\t\t\t\t\tfor c in syns_q_con:\r\n \t syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))\r\n\r\n\r\n\t\t\t\t\t\t\t#print \"error\"\r\n\t\t\t\t\t\t\tprint syn_set_con_q\r\n\t\t\t\t\t\t\tsyns_q_con=w.synsets('greater')\r\n \tfor c in syns_q_con:\r\n \t syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))\r\n\r\n\r\n\t\t\t\t\t\t\t#print syn_set_con_q_g\r\n\t\t\t\t\t\t\tsyns_q_con=w.synsets('larger')\r\n\t\t\t\t\t\t\tfor c in syns_q_con:\r\n \t syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))\r\n\r\n\r\n\t\t\t\t\t\t\tsyns_q_con=w.synsets('above')\r\n\t\t\t\t\t\t\tfor c in syns_q_con:\r\n \t syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))\r\n\r\n\r\n\t\t\t\t\t\t\t#print syn_set_con_q_g\r\n\t\t\t\t\t\t\tsyns_q_con=w.synsets('higher')\r\n\t\t\t\t\t\t\tfor c in syns_q_con:\r\n \t syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))\r\n\r\n\r\n\t\t\t\t\t\t\t#print syn_set_con_q_g\r\n\t\t\t\t\t\t\tsyns_q_con=w.synsets('more')\r\n\t\t\t\t\t\t\tfor c in syns_q_con:\r\n \t syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))\r\n\t\t\t\t\t\t\t#print syn_set_con_q_g\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t#print \"condition entered\"\r\n\t\t\t\t\t\t\t#print self.list14\r\n\t\t\t\t\t\t\t#print syn_set_con_q\r\n\t\t\t\t\t\t\tfor c in range(len(syn_set_con_t)):\r\n\t\t\t\t\t\t\t\t#print syn_set_con_t[c]\r\n for x in range(len(syn_set_con_q)):\r\n\t\t\t\t\t\t\t\t\t#print syn_set_con_q[x]\r\n \tfor y in range(len(syn_set_con_q[x])):\r\n\t\t\t\t\t\t\t\t\t\t#print \"dgfjhdjfhjdhfjhfjdfhjhqqqqqqqq\"\r\n\t\t\t\t\t\t\t\t\t\t#print syn_set_con_t[c]\r\n\t\t\t\t\t\t\t\t\t\t#print syn_set_con_q[x][y]\r\n if syn_set_con_t[c] in syn_set_con_q[x][y]:\r\n\t\t\t\t\t\t\t\t\t\t\t#print syn_set_con_t[c]\r\n\t\t\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tprint \"try\"\r\n\t\t\t\t\t\t\t\t\t\t\t\tprint self.list14[wh+6][0]\r\n\r\n \tif self.list14[wh+6][0] == 'equal':\r\n \t #self.where_clause+='<='\r\n \t condition='<='\r\n\t\t\t\t\t\t\t\t\t\t\t\tprint condition\r\n print \"condition\"\r\n \t#else:\r\n \t #self.where_clause+='<'\r\n \t# condition='<'\r\n\t\t\t\t\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\t\t\t\t\tcondition='<'\r\n\t\t\t\t\t\t\t\t\t\t\t\t#print condition\r\n #print \"condition\"\r\n\t\t\t\t\t\t\t\t\t\t#else:\r\n\t\t\t\t\t\t\t\t\t\t#\tcondition='='\r\n\r\n\t\t\t\t\t\t\tfor c in range(len(syn_set_con_t)):\r\n for x in range(len(syn_set_con_q_g)):\r\n for y in range(len(syn_set_con_q_g[x])):\r\n if syn_set_con_t[c] in syn_set_con_q_g[x][y]:\r\n\t\t\t\t\t\t\t\t\t\t\tprint syn_set_con_q_g[x][y]\r\n\t\t\t\t\t\t\t\t\t\t\tprint syn_set_con_t[c]\r\n\t\t\t\t\t\t\t\t\t\t\t#print self.list14[wh+6][0]\r\n\t\t\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tif self.list14[wh+6][0] == 'equal':\r\n \t #self.where_clause+='<='\r\n \t condition='>='\r\n \t#else:\r\n \t #self.where_clause+='<'\r\n \t# condition='>'\r\n\t\t\t\t\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\t\t\t\t\tcondition='>'\r\n\t\t\t\t\t\t\t\t\t\t\t\t#print condition\r\n\t\t\t\t\t\t\t\t\t\t\t\t#print \"condition\"\r\n\t\t\t\t\t\t\t\t\t\t#else:\r\n\t\t\t\t\t\t\t\t\t\t#\tcondition='='\r\n\r\n\r\n\t\t\t\t\t\t\tif len(condition) < 1:\t\r\n\t\t\t\t\t\t\t\tcondition='='\r\n\t\t\t\t\t\t\r\n except:\r\n\t\t\t\t\t\t\tcondition='=' \r\n\r\n\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t \r\n\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\r\n\t\t\t\t\t\t#print \"condition\"\t\r\n\t\t\t\t\t\t#print condition\t\t\t\t\t\r\n\t\t\t\t\t\tself.where_clause+=condition\t\r\n \t\tself.where_clause=self.where_clause+\"'\"+str(val_temp)+\"'\"\r\n\t\t\t\t\t\t#print self.list14\r\n\t\t\t\t\t\t#print \"where clause\"\r\n\t\t\t\t\t\tprint self.where_clause\r\n\t\t\t\tsyn_set_attribute=[]\r\n \t\ttable_attr=[]\r\n \t\tself.extract_table_attr=[]\r\n \t\tself.mapped_attr=[]\r\n \t\tself.list14=[]\r\n\t\t\t\tattribute_name_t=[]\r\n\t\t\t\tattribute_name=[]\r\n\t\t\t\tattr_l=[]\r\n \t\t#self.from_clause+=' '\r\n\r\n\t\t\t\t\r\n\t\t\t\t#attribute_name=self.occ_values_v_n[0][1]\r\n\t\t\t\tfor i in self.occ_values_v_n[:]:\r\n\t\t\t\t\tattribute_name_t.append(i[1])\r\n\t\t\t\tprint attribute_name_t\r\n\t\t\t\t#print \"attribute_name_t\"\r\n\t\t\t\tprint self.noun_table[0]\r\n\t\t\t\t#print len(attribute_name_t)\r\n\t\t\t\tif len(attribute_name_t) > 1:\r\n\t\t\t\t\t#print \"entered\"\r\n\t\t\t\t\tfor i in attribute_name_t:\r\n\t\t\t\t\t\t#print i\r\n\t\t\t\t\t\tif i != self.noun_table:\r\n\t\t\t\t\t\t\t#print i\r\n\t\t\t\t\t\t\tattribute_name.append(i)\r\n print attribute_name\r\n\t\t\t\t#print \"ghfggfhgefhgehfehfghefgehfg\"\r\n\t\t\t\t#Removing nouns after wh from attributes list\r\n\t\t\t\ttry:\r\n\t\t\t\t\tdel_ind=[]\r\n\t\t\t\t\tfor d in range(len(attribute_name)):\r\n\t\t\t\t\t\tif attribute_name[d] == self.occ_values_w_n[0][1]:\r\n\t\t\t\t\t\t\tdel_ind.append(d)\r\n \t\t#del(attribute_name[del_ind])\r\n\t\t\t\t\t\t\tprint attribute_name[d]\r\n\t\t\t\t\tdel_ind.reverse()\r\n\t\t\t\t\tprint del_ind\r\n\t\t\t\t\tfor d in del_ind:\r\n\t\t\t\t\t\tdel(attribute_name[d])\r\n\t\t\t\texcept:\r\n\t\t\t\t\tprint \"pass\"\r\n\t\t\t\t\tpass\r\n\t\t\t\t\r\n\t\t\t\t#Removing table names if other attributes present\r\n\r\n\t\t\t\t\t\r\n\t\t\t\t#self.select_clause+=self.self.occ_values_v_n[0]\r\n\t\t\t\t#print \"attribute_name 111\"\r\n\t\t\t\t#print attribute_name\r\n\t\t\t\tfor i in table_det:\r\n\t\t\t\t\tif i[0] == self.maped_table_names[0]:\r\n\t\t\t\t\t\tfor j in i[1]:\r\n\t\t\t\t\t\t\ttable_attr.append(j[0])\r\n\t\t\t\t\t\t\t#print table_attr\r\n \t\t\t\tsyns_attribute=w.synsets(j[0],NOUN)\r\n \t\t\t\tsyn_set_attribute_t=[]\r\n \t\t\t\tfor k in syns_attribute:\r\n \t\t\t\tsyn_set_attribute_t.append(list(chain.from_iterable([k.lemma_names()])))\r\n \t\t\t\tsyn_set_attribute.append(syn_set_attribute_t)\r\n\t\t\t\t\t\t\t#print syn_set_attribute\r\n\t\t\t\t\t\tfor atn in attribute_name:\r\n\t\t\t\t\t\t\tattr_l.append(atn.lower())\r\n\t\t\t\t\t\tfor atn in attr_l:\r\n\t\t\t\t\t\t\tif atn not in table_attr:\r\n\t\t\t\t\t\t\t\tsyns_attr=w.synsets(atn,NOUN)\r\n\t\t\t\t\t\t\t\tfor k in syns_attr:\r\n \t\t\t\t\tsyn_set_attr=list(chain.from_iterable([k.lemma_names()]))\r\n\t\t\t\t\t\t\t\t#print syn_set_attr\r\n\t\t\t\t\t\t\t\tfor l in range(len(syn_set_attr)):\r\n\t\t\t\t\t\t\t\t\tfor m in range(len(syn_set_attribute)):\r\n \t\t \t for n in range(len(syn_set_attribute[m])):\r\n\t\t\t\t\t\t\t\t\t\t\t#print syn_set_attr[l]\r\n\t\t\t\t\t\t\t\t\t\t\t#print syn_set_attribute[m][n]\r\n\t\t\t\t\t\t\t\t\t\t\tif syn_set_attr[l] in syn_set_attribute[m][n]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t#print syn_set_attribute[m][n]\r\n\t\t\t\t\t\t\t\t\t\t\t\t#print m\r\n\t\t\t\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.extract_table_attr.append(table_attr[m])\r\n\t\t\t\t\t\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t#print \"self.extract_table_attr\"\r\n\t\t\t\t#print self.extract_table_attr\r\n\t\t\t\tif len(self.extract_table_attr) < 1:\r\n\t\t\t\t\t#print \"fgvfhhfghfjghfjghfu\"\r\n\t\t\t\t\tselect_attr=self.occ_values_v_n[0][1]\r\n\t\t\t\t\t#print select_attr\r\n\t\t\t\t\tif select_attr == 'details' or select_attr == 'contents' or select_attr == 'detail' or select_attr == 'content':\r\n\t\t\t\t\t\tself.select_clause+=' '\r\n\t\t\t\t\t\tself.select_clause+='*'\r\n\t\t\t\t\t\tself.query=self.select_clause+' '+self.from_clause+' '+self.where_clause\r\n\t\t\t\t\t\tprint self.query\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tsyns_tb=w.synsets(select_attr,NOUN)\r\n\t\t\t\t\t\tfor i in syns_tb:\r\n \t\t\tsyns_tbb=list(chain.from_iterable([i.lemma_names()]))\r\n\t\t\t\t\t\tsyns_tb_q=w.synsets(self.maped_table_names[0],NOUN)\r\n\t\t\t\t\t\t#print self.maped_table_names[0]\r\n\t\t\t\t\t\t#print syns_tb_q\r\n \t\tfor i in syns_tb_q:\r\n \t\tsyns_tbb_q=list(chain.from_iterable([i.lemma_names()]))\r\n\t\t\t\t\t\t#print syns_tbb\r\n\t\t\t\t\t\t#print syns_tbb_q\r\n\t\t\t\t\t\tfor i in range(len(syns_tbb)):\r\n\t\t\t\t\t\t\t#for j in range(len(sysns_tbb_q)):\r\n\t\t\t\t\t\t\tif syns_tbb[i] in syns_tbb_q:\r\n\t\t\t\t\t\t\t\t#print \"hgfhg\"\r\n\t\t\t\t\t\t\t\tself.select_clause+=' '\r\n \t\t self.select_clause+='*'\r\n \t\t self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause\r\n \t\t print self.query\r\n\t\t\t\t\t\t\t\tbreak\r\n\t\t\r\n\t\t\t\t\t\t\r\n\r\n\r\n\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\tfor i in self.extract_table_attr:\r\n\t\t\t\t\t\tif i not in self.mapped_attr:\r\n\t\t\t\t\t\t\tself.mapped_attr.append(i)\r\n\t\t\t\t\tprint self.mapped_attr\r\n\t\t\t\t\tself.select_clause+=' '\r\n\t\t\t\t\tfor i in range(len(self.mapped_attr)):\r\n\t\t\t\t\t\tself.select_clause+=self.mapped_attr[i]\r\n\t\t\t\t\t\tif i < (len(self.mapped_attr)-1):\r\n\t\t\t\t\t\t\tself.select_clause+=','\r\n\t\t\t\t\t\t#print self.select_clause\r\n\t\t\t\t\tself.query=self.select_clause+' '+self.from_clause+' '+self.where_clause\r\n\t\t\t\t\tprint self.query\r\n\t\t\t\t\t\r\n\t\t\t\t\r\n\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\texcept:\r\n\t\t\t\tsyn_set_attribute=[]\r\n table_attr=[]\r\n self.extract_table_attr=[]\r\n self.mapped_attr=[]\r\n self.list14=[]\r\n\t\t\t\tattribute_name=[]\r\n\t\t\t\tattr_l=[]\r\n\t\t\t\t#print self.where_clause\r\n\t\t\t\t#pass\r\n\t\t\t\t#print \"bgjgjshfcjhj\"\r\n\t\t\t\t#for i in self.occ_values_n_n:\r\n\t\t\t\t#if self.maped_table_names[0] in i:\r\n\t\t\t\t\t\t#print i\r\n\t\t\t\t\t\t#attribute_name=i[1]\r\n\t\t\t\t\t\t#attribute_name=attribute_name.lower()\r\n\t\t\t\ttry:\r\n\t\t\t\t\tprint \"self.noun_table\"\r\n\t\t\t\t\tprint self.noun_table\r\n\t\t\t\t\t#attribute_name=self.occ_values_n_n[0][1]\r\n\t\t\t\t\tfor i in self.occ_values_n_n:\r\n\t\t\t\t\t\tif self.noun_table in i:\r\n\t\t\t\t\t\t\tfor j in i:\r\n\t\t\t\t\t\t\t\tif j != self.noun_table and isinstance(j,float) == False:\r\n\t\t\t\t\t\t\t\t\tattribute_name.append(j)\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tdel_ind=attribute_name.index(self.occ_values_w_n[0][1])\r\n\t\t\t\t\t\tdel(attribute_name[del_ind])\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\tif attribute_name[0] == 'details' or attribute_name[0] == 'detail' or attribute_name[0] == 'contents' or attribute_name[0] == 'content':\r\n\t\t\t\t\t\tself.select_clause+=' '\r\n \t\tself.select_clause+='*'\r\n \t\t\tself.query=self.select_clause+' '+self.from_clause+' '+self.where_clause\r\n \t\tprint self.query\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tfor i in table_det:\r\n \t\t\tif i[0] == self.maped_table_names[0]:\r\n \t \t\t\tfor j in i[1]:\r\n \t\t \t \t\ttable_attr.append(j[0])\r\n \t\t\t \t#print table_attr\r\n \t \t\t \tsyns_attribute=w.synsets(j[0],NOUN)\r\n \t \t\t \tsyn_set_attribute_t=[]\r\n \t \t\t \tfor k in syns_attribute:\r\n \t \t\t\t\tsyn_set_attribute_t.append(list(chain.from_iterable([k.lemma_names()])))\r\n \t \t\tsyn_set_attribute.append(syn_set_attribute_t)\r\n \t \t\t#print syn_set_attribute\r\n\t\t\t\t\t\t\t\tfor atn in attribute_name:\r\n \t\tattr_l.append(atn.lower())\r\n\t\t\t\t\t\t\t\tfor atn in attr_l:\r\n\t\t\t\t\t\t\t\t\tif atn not in table_attr:\r\n \t\t\t \t \t\tsyns_attr=w.synsets(atn,NOUN)\r\n \t\t\t \t \tfor k in syns_attr:\r\n \t\t\t\t \tsyn_set_attr=list(chain.from_iterable([k.lemma_names()]))\r\n \t \t\t\t print syn_set_attr\r\n\t\t\t\t\t\t\t\t\t\tfor l in range(len(syn_set_attr)):\r\n \t \t\t\t\tfor m in range(len(syn_set_attribute)):\r\n \t \t\t\t \tfor n in range(len(syn_set_attribute[m])):\r\n \t \t \t\t\t#print syn_set_attr[l]\r\n \t \t\t\t \t#print syn_set_attribute[m][n]\r\n \t \t \t\tif syn_set_attr[l] in syn_set_attribute[m][n]:\r\n \t \t \t\t\t\t#print syn_set_attribute[m][n]\r\n \t \t \t\t\t#print m\r\n \t \t \ttry:\r\n \t\t\t\t \t\tself.extract_table_attr.append(table_attr[m])\r\n \t\t\t \texcept:\r\n \t\t\t pass\r\n\t\t\t\t\t\tfor i in self.extract_table_attr:\t\r\n \t if i not in self.mapped_attr:\r\n \t \tself.mapped_attr.append(i)\r\n \tprint self.mapped_attr\r\n\t\t\t\t\t\tself.select_clause+=' '\r\n\t\t\t\t\t\tif not self.mapped_attr:\r\n box=wx.MessageDialog(None,\"Invalid Attribute name\",'Alert',wx.OK)\r\n ans=box.ShowModal()\r\n box.Destroy()\r\n\t\t\t\t\t\telse :\r\n \t\t#self.select_clause+=' '\r\n \t\t#self.select_clause+=self.mapped_attr[0]\r\n\t\t\t\t\t\t\tfor i in range(len(self.mapped_attr)):\r\n \t\tself.select_clause+=self.mapped_attr[i]\r\n \t\tif i < (len(self.mapped_attr)-1):\r\n \t \tself.select_clause+=','\r\n \t\tself.query=self.select_clause+' '+self.from_clause+' '+self.where_clause\r\n \t\tprint self.query\r\n\t\t\t\texcept:\r\n\t\t\t\t\tself.select_clause+=' '\r\n self.select_clause+='*'\r\n self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause\r\n print self.query\r\n\t\tquery=self.query\r\n\r\n\t\t\t\t\t\r\n\t\t\t\r\n\t\r\n\t\r\n\t\t\t\t\t\t\t\t\r\n\t\r\n\t\t\t\t\t\t\r\n\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\r\n\t\t\t\t\t\t\r\n\r\n\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\r\n\t\t\r\n\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\r\n\t\t\r\n\t\t\r\n\t\t\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\r\n\r\n\r\n\r\n\tdef csv_file(self):\r\n\t\t#global path\r\n\t\tif not os.path.exists(os.path.dirname(\"matrix/matrix.csv\")):\r\n os.makedirs(os.path.dirname(\"matrix/matrix.csv\"))\r\n\r\n\t\ttry:\r\n\t\t\tos.remove(\"./matrix/matrix.csv\")\r\n\t\t\tfile1 = open(\"./matrix/matrix.csv\",\"a+\")\r\n\t\texcept:\r\n\t\t\tfile1 = open(\"./matrix/matrix.csv\",\"a+\")\r\n\t\tt = \",\"\r\n\t\tfor i in self.words:\r\n\t\t\tt += i\r\n\t\t\tt +=\",\"\r\n\t\tt+=\"\\n\"\r\n\t\tfile1.write(t)\r\n\t\tfor l in range(len(self.occurr_val)):\r\n\t\t\ttt=''\r\n\t\t\tfor m in range(len(self.occurr_val[l])):\r\n\t\t\t\ttt+=str(self.occurr_val[l][m])\r\n\t\t\t\ttt+=','\r\n\t\t\ttt+='\\n'\r\n\t\t\tfile1.write(tt)\r\n\t\tfile1.close()\r\n\t\t\t\r\n\t\t\r\n\r\n\t\t\r\n\r\n\t\t\t\r\n\t\t\t \r\n\r\n\t\t\t\r\n\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\r\n\t\r\n\r\n\r\n\r\n\r\n\r\nif __name__=='__main__' :\r\n\tapp=wx.PySimpleApp()\r\n\tmain_window=MainWindow(parent=None,id=-1)\r\n\tmain_window.Show()\r\n\tapp.MainLoop()\r\n1.txt\r\nDisplaying 1.txt.e the manual work and time. As for children and\r\nadults, people are most challenged by word problem solving\r\nnot because of their mathematical skills but because of text\r\ncomprehension. Regularly, incorrect answers to word\r\nproblems are because of correct calculations to incorrect\r\nproblem representation.\r\n\r\n\r\n\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Model:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def form(self):
"""Contains the data send from the client."""
return security.get_field_storage()
@property
def cookie(self):
"""The client cookie"""
return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))
@property
def url(self):
"""The url of request"""
url = os.environ.get('PATH_INFO') or os.environ.get('REQUEST_URI')
return url if url else ''
@property
def serverProtocol(self):
"""The server protocol"""
serverProtocol = os.environ.get('SERVER_PROTOCOL')
return serverProtocol if serverProtocol else 'HTTP/1.1'
@property
def protocol(self):
"""Te protocol (HTTP or HTTPS)"""
return helpers.get_protocol()
@property
def ip(self):
"""The ip of the client"""
return os.environ.get('REMOTE_ADDR')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Model:
<|reserved_special_token_0|>
def __init__(self):
"""Puth the os.environ dict into the namespace"""
self.__dict__.update(itertools.starmap(lambda key, value: (key[0].
lower() + key.title().replace('_', '')[1:], value), os.environ.
items()))
@property
def form(self):
"""Contains the data send from the client."""
return security.get_field_storage()
@property
def cookie(self):
"""The client cookie"""
return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))
@property
def url(self):
"""The url of request"""
url = os.environ.get('PATH_INFO') or os.environ.get('REQUEST_URI')
return url if url else ''
@property
def serverProtocol(self):
"""The server protocol"""
serverProtocol = os.environ.get('SERVER_PROTOCOL')
return serverProtocol if serverProtocol else 'HTTP/1.1'
@property
def protocol(self):
"""Te protocol (HTTP or HTTPS)"""
return helpers.get_protocol()
@property
def ip(self):
"""The ip of the client"""
return os.environ.get('REMOTE_ADDR')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Model:
"""Manages the information received by the client"""
def __init__(self):
"""Puth the os.environ dict into the namespace"""
self.__dict__.update(itertools.starmap(lambda key, value: (key[0].
lower() + key.title().replace('_', '')[1:], value), os.environ.
items()))
@property
def form(self):
"""Contains the data send from the client."""
return security.get_field_storage()
@property
def cookie(self):
"""The client cookie"""
return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))
@property
def url(self):
"""The url of request"""
url = os.environ.get('PATH_INFO') or os.environ.get('REQUEST_URI')
return url if url else ''
@property
def serverProtocol(self):
"""The server protocol"""
serverProtocol = os.environ.get('SERVER_PROTOCOL')
return serverProtocol if serverProtocol else 'HTTP/1.1'
@property
def protocol(self):
"""Te protocol (HTTP or HTTPS)"""
return helpers.get_protocol()
@property
def ip(self):
"""The ip of the client"""
return os.environ.get('REMOTE_ADDR')
<|reserved_special_token_1|>
import http.cookies
import json
import os
import itertools
import types
from framework import helpers
from framework import security
class Model:
"""Manages the information received by the client"""
def __init__(self):
"""Puth the os.environ dict into the namespace"""
self.__dict__.update(itertools.starmap(lambda key, value: (key[0].
lower() + key.title().replace('_', '')[1:], value), os.environ.
items()))
@property
def form(self):
"""Contains the data send from the client."""
return security.get_field_storage()
@property
def cookie(self):
"""The client cookie"""
return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))
@property
def url(self):
"""The url of request"""
url = os.environ.get('PATH_INFO') or os.environ.get('REQUEST_URI')
return url if url else ''
@property
def serverProtocol(self):
"""The server protocol"""
serverProtocol = os.environ.get('SERVER_PROTOCOL')
return serverProtocol if serverProtocol else 'HTTP/1.1'
@property
def protocol(self):
"""Te protocol (HTTP or HTTPS)"""
return helpers.get_protocol()
@property
def ip(self):
"""The ip of the client"""
return os.environ.get('REMOTE_ADDR')
<|reserved_special_token_1|>
import http.cookies
import json
import os
import itertools
import types
from framework import helpers
from framework import security
class Model:
"""Manages the information received by the client"""
def __init__(self):
"""Puth the os.environ dict into the namespace"""
self.__dict__.update(
itertools.starmap(
lambda key, value: (
key[0].lower() + # upper case the first letter and add
key.title() # title case all text
.replace('_', '') # remove undersore
[1:] # all text without the first char
, value
) #lambda
,os.environ.items()
) #itertools.starmap
) #update
@property
def form(self):
"""Contains the data send from the client."""
return security.get_field_storage()
@property
def cookie(self):
"""The client cookie"""
return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))
@property
def url(self):
"""The url of request"""
url = os.environ.get('PATH_INFO')\
or os.environ.get('REQUEST_URI')
return url if url else ''
@property
def serverProtocol(self):
"""The server protocol"""
serverProtocol = os.environ.get('SERVER_PROTOCOL')
return serverProtocol if serverProtocol else 'HTTP/1.1'
@property
def protocol(self):
"""Te protocol (HTTP or HTTPS)"""
return helpers.get_protocol()
@property
def ip(self):
"""The ip of the client"""
return os.environ.get('REMOTE_ADDR')
|
flexible
|
{
"blob_id": "7f21ab8d332d169226ef17276abbdd373e3a62c2",
"index": 8544,
"step-1": "<mask token>\n\n\nclass Model:\n <mask token>\n <mask token>\n\n @property\n def form(self):\n \"\"\"Contains the data send from the client.\"\"\"\n return security.get_field_storage()\n\n @property\n def cookie(self):\n \"\"\"The client cookie\"\"\"\n return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))\n\n @property\n def url(self):\n \"\"\"The url of request\"\"\"\n url = os.environ.get('PATH_INFO') or os.environ.get('REQUEST_URI')\n return url if url else ''\n\n @property\n def serverProtocol(self):\n \"\"\"The server protocol\"\"\"\n serverProtocol = os.environ.get('SERVER_PROTOCOL')\n return serverProtocol if serverProtocol else 'HTTP/1.1'\n\n @property\n def protocol(self):\n \"\"\"Te protocol (HTTP or HTTPS)\"\"\"\n return helpers.get_protocol()\n\n @property\n def ip(self):\n \"\"\"The ip of the client\"\"\"\n return os.environ.get('REMOTE_ADDR')\n",
"step-2": "<mask token>\n\n\nclass Model:\n <mask token>\n\n def __init__(self):\n \"\"\"Puth the os.environ dict into the namespace\"\"\"\n self.__dict__.update(itertools.starmap(lambda key, value: (key[0].\n lower() + key.title().replace('_', '')[1:], value), os.environ.\n items()))\n\n @property\n def form(self):\n \"\"\"Contains the data send from the client.\"\"\"\n return security.get_field_storage()\n\n @property\n def cookie(self):\n \"\"\"The client cookie\"\"\"\n return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))\n\n @property\n def url(self):\n \"\"\"The url of request\"\"\"\n url = os.environ.get('PATH_INFO') or os.environ.get('REQUEST_URI')\n return url if url else ''\n\n @property\n def serverProtocol(self):\n \"\"\"The server protocol\"\"\"\n serverProtocol = os.environ.get('SERVER_PROTOCOL')\n return serverProtocol if serverProtocol else 'HTTP/1.1'\n\n @property\n def protocol(self):\n \"\"\"Te protocol (HTTP or HTTPS)\"\"\"\n return helpers.get_protocol()\n\n @property\n def ip(self):\n \"\"\"The ip of the client\"\"\"\n return os.environ.get('REMOTE_ADDR')\n",
"step-3": "<mask token>\n\n\nclass Model:\n \"\"\"Manages the information received by the client\"\"\"\n\n def __init__(self):\n \"\"\"Puth the os.environ dict into the namespace\"\"\"\n self.__dict__.update(itertools.starmap(lambda key, value: (key[0].\n lower() + key.title().replace('_', '')[1:], value), os.environ.\n items()))\n\n @property\n def form(self):\n \"\"\"Contains the data send from the client.\"\"\"\n return security.get_field_storage()\n\n @property\n def cookie(self):\n \"\"\"The client cookie\"\"\"\n return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))\n\n @property\n def url(self):\n \"\"\"The url of request\"\"\"\n url = os.environ.get('PATH_INFO') or os.environ.get('REQUEST_URI')\n return url if url else ''\n\n @property\n def serverProtocol(self):\n \"\"\"The server protocol\"\"\"\n serverProtocol = os.environ.get('SERVER_PROTOCOL')\n return serverProtocol if serverProtocol else 'HTTP/1.1'\n\n @property\n def protocol(self):\n \"\"\"Te protocol (HTTP or HTTPS)\"\"\"\n return helpers.get_protocol()\n\n @property\n def ip(self):\n \"\"\"The ip of the client\"\"\"\n return os.environ.get('REMOTE_ADDR')\n",
"step-4": "import http.cookies\nimport json\nimport os\nimport itertools\nimport types\nfrom framework import helpers\nfrom framework import security\n\n\nclass Model:\n \"\"\"Manages the information received by the client\"\"\"\n\n def __init__(self):\n \"\"\"Puth the os.environ dict into the namespace\"\"\"\n self.__dict__.update(itertools.starmap(lambda key, value: (key[0].\n lower() + key.title().replace('_', '')[1:], value), os.environ.\n items()))\n\n @property\n def form(self):\n \"\"\"Contains the data send from the client.\"\"\"\n return security.get_field_storage()\n\n @property\n def cookie(self):\n \"\"\"The client cookie\"\"\"\n return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))\n\n @property\n def url(self):\n \"\"\"The url of request\"\"\"\n url = os.environ.get('PATH_INFO') or os.environ.get('REQUEST_URI')\n return url if url else ''\n\n @property\n def serverProtocol(self):\n \"\"\"The server protocol\"\"\"\n serverProtocol = os.environ.get('SERVER_PROTOCOL')\n return serverProtocol if serverProtocol else 'HTTP/1.1'\n\n @property\n def protocol(self):\n \"\"\"Te protocol (HTTP or HTTPS)\"\"\"\n return helpers.get_protocol()\n\n @property\n def ip(self):\n \"\"\"The ip of the client\"\"\"\n return os.environ.get('REMOTE_ADDR')\n",
"step-5": "import http.cookies\nimport json\nimport os\nimport itertools\nimport types\n\nfrom framework import helpers\nfrom framework import security\n\n\nclass Model:\n \"\"\"Manages the information received by the client\"\"\"\n\n def __init__(self):\n \"\"\"Puth the os.environ dict into the namespace\"\"\"\n self.__dict__.update(\n itertools.starmap(\n lambda key, value: (\n key[0].lower() + # upper case the first letter and add\n key.title() # title case all text\n .replace('_', '') # remove undersore\n [1:] # all text without the first char\n , value\n ) #lambda\n ,os.environ.items()\n ) #itertools.starmap\n ) #update\n\n @property\n def form(self):\n \"\"\"Contains the data send from the client.\"\"\"\n return security.get_field_storage()\n\n @property\n def cookie(self):\n \"\"\"The client cookie\"\"\"\n return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))\n\n @property\n def url(self):\n \"\"\"The url of request\"\"\"\n url = os.environ.get('PATH_INFO')\\\n or os.environ.get('REQUEST_URI')\n return url if url else ''\n\n @property\n def serverProtocol(self):\n \"\"\"The server protocol\"\"\"\n serverProtocol = os.environ.get('SERVER_PROTOCOL')\n return serverProtocol if serverProtocol else 'HTTP/1.1'\n\n @property\n def protocol(self):\n \"\"\"Te protocol (HTTP or HTTPS)\"\"\"\n return helpers.get_protocol()\n\n @property\n def ip(self):\n \"\"\"The ip of the client\"\"\"\n return os.environ.get('REMOTE_ADDR')\n ",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
class Datafunction(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_first_time(self):
return self.first_time
def get_last_time(self):
return self.last_time
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Datafunction(object):
def __init__(self):
self.series = []
self.ref_point = RefObject()
self.clAss = None
self.first_time = None
self.last_time = None
self.id = None
def set_series(self, series):
self.series = series
def set_time(self, first, last):
self.first_time = first
self.last_time = last
def set_id(self, Id):
self.id = Id
def set_class(self, clAss):
self.clAss = clAss
def set_ref_object(self, pose, name, time, Id):
self.ref_point.set_data(pose, name, time, Id)
def get_series(self):
return self.series
def get_class(self):
return self.clAss
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_first_time(self):
return self.first_time
def get_last_time(self):
return self.last_time
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Datafunction(object):
def __init__(self):
self.series = []
self.ref_point = RefObject()
self.clAss = None
self.first_time = None
self.last_time = None
self.id = None
def set_series(self, series):
self.series = series
def set_time(self, first, last):
self.first_time = first
self.last_time = last
def set_id(self, Id):
self.id = Id
def set_class(self, clAss):
self.clAss = clAss
def set_ref_object(self, pose, name, time, Id):
self.ref_point.set_data(pose, name, time, Id)
def get_series(self):
return self.series
def get_class(self):
return self.clAss
def get_id(self):
return self.id
<|reserved_special_token_0|>
def get_first_time(self):
return self.first_time
def get_last_time(self):
return self.last_time
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class RefObject(object):
def __init__(self):
self.pose = []
self.name = []
self.time = None
self.id = None
def set_data(self, pose, name, time, Id):
self.pose = pose
self.name = name
self.time = time
self.id = Id
class Datafunction(object):
def __init__(self):
self.series = []
self.ref_point = RefObject()
self.clAss = None
self.first_time = None
self.last_time = None
self.id = None
def set_series(self, series):
self.series = series
def set_time(self, first, last):
self.first_time = first
self.last_time = last
def set_id(self, Id):
self.id = Id
def set_class(self, clAss):
self.clAss = clAss
def set_ref_object(self, pose, name, time, Id):
self.ref_point.set_data(pose, name, time, Id)
def get_series(self):
return self.series
def get_class(self):
return self.clAss
def get_id(self):
return self.id
def get_ref_point(self):
return self.ref_point
def get_first_time(self):
return self.first_time
def get_last_time(self):
return self.last_time
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python
# encoding: utf8
#from __future__ import unicode_literals
class RefObject(object):
def __init__(self,):
self.pose = []
self.name = []
self.time = None
self.id = None
def set_data(self,pose, name, time, Id):
self.pose = pose
self.name = name
self.time = time
self.id = Id
class Datafunction(object):
def __init__(self,):
self.series = []
self.ref_point = RefObject()
self.clAss = None
self.first_time = None
self.last_time = None
self.id = None
def set_series(self,series):
self.series = series
def set_time(self, first, last):
self.first_time = first
self.last_time = last
def set_id(self,Id):
self.id = Id
def set_class(self, clAss):
self.clAss = clAss
def set_ref_object(self,pose, name, time, Id):
self.ref_point.set_data(pose, name, time, Id)
def get_series(self,):
return self.series
def get_class(self,):
return self.clAss
def get_id(self,):
return self.id
def get_ref_point(self,):
return self.ref_point
def get_first_time(self,):
return self.first_time
def get_last_time(self):
return self.last_time
if __name__ == '__main__':
print("Hello")
|
flexible
|
{
"blob_id": "7611a57705939ce456e34d5ae379d6ca748b13c3",
"index": 1884,
"step-1": "<mask token>\n\n\nclass Datafunction(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_first_time(self):\n return self.first_time\n\n def get_last_time(self):\n return self.last_time\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Datafunction(object):\n\n def __init__(self):\n self.series = []\n self.ref_point = RefObject()\n self.clAss = None\n self.first_time = None\n self.last_time = None\n self.id = None\n\n def set_series(self, series):\n self.series = series\n\n def set_time(self, first, last):\n self.first_time = first\n self.last_time = last\n\n def set_id(self, Id):\n self.id = Id\n\n def set_class(self, clAss):\n self.clAss = clAss\n\n def set_ref_object(self, pose, name, time, Id):\n self.ref_point.set_data(pose, name, time, Id)\n\n def get_series(self):\n return self.series\n\n def get_class(self):\n return self.clAss\n <mask token>\n <mask token>\n\n def get_first_time(self):\n return self.first_time\n\n def get_last_time(self):\n return self.last_time\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Datafunction(object):\n\n def __init__(self):\n self.series = []\n self.ref_point = RefObject()\n self.clAss = None\n self.first_time = None\n self.last_time = None\n self.id = None\n\n def set_series(self, series):\n self.series = series\n\n def set_time(self, first, last):\n self.first_time = first\n self.last_time = last\n\n def set_id(self, Id):\n self.id = Id\n\n def set_class(self, clAss):\n self.clAss = clAss\n\n def set_ref_object(self, pose, name, time, Id):\n self.ref_point.set_data(pose, name, time, Id)\n\n def get_series(self):\n return self.series\n\n def get_class(self):\n return self.clAss\n\n def get_id(self):\n return self.id\n <mask token>\n\n def get_first_time(self):\n return self.first_time\n\n def get_last_time(self):\n return self.last_time\n\n\n<mask token>\n",
"step-4": "class RefObject(object):\n\n def __init__(self):\n self.pose = []\n self.name = []\n self.time = None\n self.id = None\n\n def set_data(self, pose, name, time, Id):\n self.pose = pose\n self.name = name\n self.time = time\n self.id = Id\n\n\nclass Datafunction(object):\n\n def __init__(self):\n self.series = []\n self.ref_point = RefObject()\n self.clAss = None\n self.first_time = None\n self.last_time = None\n self.id = None\n\n def set_series(self, series):\n self.series = series\n\n def set_time(self, first, last):\n self.first_time = first\n self.last_time = last\n\n def set_id(self, Id):\n self.id = Id\n\n def set_class(self, clAss):\n self.clAss = clAss\n\n def set_ref_object(self, pose, name, time, Id):\n self.ref_point.set_data(pose, name, time, Id)\n\n def get_series(self):\n return self.series\n\n def get_class(self):\n return self.clAss\n\n def get_id(self):\n return self.id\n\n def get_ref_point(self):\n return self.ref_point\n\n def get_first_time(self):\n return self.first_time\n\n def get_last_time(self):\n return self.last_time\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\r\n# encoding: utf8\r\n#from __future__ import unicode_literals\r\n\r\nclass RefObject(object):\r\n def __init__(self,):\r\n self.pose = []\r\n self.name = []\r\n self.time = None\r\n self.id = None\r\n def set_data(self,pose, name, time, Id):\r\n self.pose = pose\r\n self.name = name\r\n self.time = time \r\n self.id = Id\r\n \r\nclass Datafunction(object):\r\n def __init__(self,):\r\n self.series = []\r\n self.ref_point = RefObject()\r\n self.clAss = None\r\n self.first_time = None\r\n self.last_time = None\r\n self.id = None\r\n \r\n def set_series(self,series):\r\n self.series = series\r\n \r\n def set_time(self, first, last):\r\n self.first_time = first\r\n self.last_time = last\r\n \r\n def set_id(self,Id):\r\n self.id = Id\r\n \r\n def set_class(self, clAss):\r\n self.clAss = clAss\r\n \r\n def set_ref_object(self,pose, name, time, Id):\r\n self.ref_point.set_data(pose, name, time, Id)\r\n \r\n def get_series(self,):\r\n return self.series\r\n \r\n def get_class(self,):\r\n return self.clAss\r\n \r\n def get_id(self,):\r\n return self.id\r\n \r\n def get_ref_point(self,):\r\n return self.ref_point\r\n\r\n def get_first_time(self,):\r\n return self.first_time\r\n\r\n def get_last_time(self):\r\n return self.last_time\r\n \r\nif __name__ == '__main__':\r\n print(\"Hello\")\r\n",
"step-ids": [
3,
11,
12,
16,
18
]
}
|
[
3,
11,
12,
16,
18
] |
from flask import Flask
from flask_bcrypt import Bcrypt
from flask_jwt_extended import JWTManager
from flask_migrate import Migrate
from flask_restful import Api
from flask_apispec.extension import FlaskApiSpec
from server.admin import add_admin
from server.config import Config
from server.db import db
from server.cli import add_commands
from server.login_manager import login_manager
from server.resources import add_routes, register_docs
from server.services import user_service, token_blacklist
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
migrate = Migrate(app, db)
admin = add_admin(app)
api = Api(app, catch_all_404s=True)
jwt = JWTManager(app)
bcrypt = Bcrypt(app)
@jwt.user_lookup_loader
def user_loader_callback(_jwt_header, jwt_data):
return user_service.first(id=jwt_data['sub'])
@jwt.user_identity_loader
def user_identity_lookup(email):
return user_service.first(email=email).id
@jwt.token_in_blocklist_loader
def check_if_token_in_blocklist(jwt_headers, jwt_payload):
return bool(token_blacklist.get(jwt_payload['jti']))
def create_app():
add_routes(api)
add_commands(app)
login_manager.init_app(app)
docs = FlaskApiSpec(app)
register_docs(docs)
return app
|
normal
|
{
"blob_id": "f1d813ccaf49c8941bf594e22d8683c0ab422a22",
"index": 7632,
"step-1": "<mask token>\n\n\n@jwt.user_lookup_loader\ndef user_loader_callback(_jwt_header, jwt_data):\n return user_service.first(id=jwt_data['sub'])\n\n\n@jwt.user_identity_loader\ndef user_identity_lookup(email):\n return user_service.first(email=email).id\n\n\n@jwt.token_in_blocklist_loader\ndef check_if_token_in_blocklist(jwt_headers, jwt_payload):\n return bool(token_blacklist.get(jwt_payload['jti']))\n\n\ndef create_app():\n add_routes(api)\n add_commands(app)\n login_manager.init_app(app)\n docs = FlaskApiSpec(app)\n register_docs(docs)\n return app\n",
"step-2": "<mask token>\napp.config.from_object(Config)\ndb.init_app(app)\n<mask token>\n\n\n@jwt.user_lookup_loader\ndef user_loader_callback(_jwt_header, jwt_data):\n return user_service.first(id=jwt_data['sub'])\n\n\n@jwt.user_identity_loader\ndef user_identity_lookup(email):\n return user_service.first(email=email).id\n\n\n@jwt.token_in_blocklist_loader\ndef check_if_token_in_blocklist(jwt_headers, jwt_payload):\n return bool(token_blacklist.get(jwt_payload['jti']))\n\n\ndef create_app():\n add_routes(api)\n add_commands(app)\n login_manager.init_app(app)\n docs = FlaskApiSpec(app)\n register_docs(docs)\n return app\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config.from_object(Config)\ndb.init_app(app)\nmigrate = Migrate(app, db)\nadmin = add_admin(app)\napi = Api(app, catch_all_404s=True)\njwt = JWTManager(app)\nbcrypt = Bcrypt(app)\n\n\n@jwt.user_lookup_loader\ndef user_loader_callback(_jwt_header, jwt_data):\n return user_service.first(id=jwt_data['sub'])\n\n\n@jwt.user_identity_loader\ndef user_identity_lookup(email):\n return user_service.first(email=email).id\n\n\n@jwt.token_in_blocklist_loader\ndef check_if_token_in_blocklist(jwt_headers, jwt_payload):\n return bool(token_blacklist.get(jwt_payload['jti']))\n\n\ndef create_app():\n add_routes(api)\n add_commands(app)\n login_manager.init_app(app)\n docs = FlaskApiSpec(app)\n register_docs(docs)\n return app\n",
"step-4": "from flask import Flask\nfrom flask_bcrypt import Bcrypt\nfrom flask_jwt_extended import JWTManager\nfrom flask_migrate import Migrate\nfrom flask_restful import Api\nfrom flask_apispec.extension import FlaskApiSpec\nfrom server.admin import add_admin\nfrom server.config import Config\nfrom server.db import db\nfrom server.cli import add_commands\nfrom server.login_manager import login_manager\nfrom server.resources import add_routes, register_docs\nfrom server.services import user_service, token_blacklist\napp = Flask(__name__)\napp.config.from_object(Config)\ndb.init_app(app)\nmigrate = Migrate(app, db)\nadmin = add_admin(app)\napi = Api(app, catch_all_404s=True)\njwt = JWTManager(app)\nbcrypt = Bcrypt(app)\n\n\n@jwt.user_lookup_loader\ndef user_loader_callback(_jwt_header, jwt_data):\n return user_service.first(id=jwt_data['sub'])\n\n\n@jwt.user_identity_loader\ndef user_identity_lookup(email):\n return user_service.first(email=email).id\n\n\n@jwt.token_in_blocklist_loader\ndef check_if_token_in_blocklist(jwt_headers, jwt_payload):\n return bool(token_blacklist.get(jwt_payload['jti']))\n\n\ndef create_app():\n add_routes(api)\n add_commands(app)\n login_manager.init_app(app)\n docs = FlaskApiSpec(app)\n register_docs(docs)\n return app\n",
"step-5": null,
"step-ids": [
4,
5,
6,
7
]
}
|
[
4,
5,
6,
7
] |
i = 0
while i < 10:
print("Hello", 2 * i + 5)
i = i + 1
|
normal
|
{
"blob_id": "e22574b5c458c23c48915274656f95a375cdc0e6",
"index": 6181,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile i < 10:\n print('Hello', 2 * i + 5)\n<mask token>\n",
"step-3": "i = 0\nwhile i < 10:\n print('Hello', 2 * i + 5)\ni = i + 1\n",
"step-4": "\r\ni = 0\r\nwhile i < 10:\r\n print(\"Hello\", 2 * i + 5)\r\ni = i + 1",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def coroutine(func):
def start_coroutine(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start_coroutine
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def coroutine(func):
def start_coroutine(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start_coroutine
@coroutine
def grep(pattern):
print('start grep')
try:
while True:
line = yield
if pattern in line:
print(line)
except GeneratorExit:
print('stop grep')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def coroutine(func):
def start_coroutine(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start_coroutine
@coroutine
def grep(pattern):
print('start grep')
try:
while True:
line = yield
if pattern in line:
print(line)
except GeneratorExit:
print('stop grep')
@coroutine
def grep_python_coroutine():
g = grep('python')
yield from g
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def coroutine(func):
def start_coroutine(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start_coroutine
@coroutine
def grep(pattern):
print('start grep')
try:
while True:
line = yield
if pattern in line:
print(line)
except GeneratorExit:
print('stop grep')
@coroutine
def grep_python_coroutine():
g = grep('python')
yield from g
<|reserved_special_token_0|>
g.send('php is better')
g.send('python is simplier')
g.close()
<|reserved_special_token_1|>
def coroutine(func):
def start_coroutine(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr) #cr.send(None)
return cr
return start_coroutine
@coroutine
def grep(pattern):
print('start grep')
try:
while True:
line = yield
if pattern in line:
print(line)
except GeneratorExit:
print('stop grep')
@coroutine
def grep_python_coroutine():
g = grep('python')
yield from g
g = grep('python')
#next(g) #g.send(None)
g.send("php is better")
g.send("python is simplier")
g.close()
|
flexible
|
{
"blob_id": "bebe098c5abb579eb155a1dc325347d100ddfa8f",
"index": 1805,
"step-1": "def coroutine(func):\n\n def start_coroutine(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start_coroutine\n\n\n<mask token>\n",
"step-2": "def coroutine(func):\n\n def start_coroutine(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start_coroutine\n\n\n@coroutine\ndef grep(pattern):\n print('start grep')\n try:\n while True:\n line = yield\n if pattern in line:\n print(line)\n except GeneratorExit:\n print('stop grep')\n\n\n<mask token>\n",
"step-3": "def coroutine(func):\n\n def start_coroutine(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start_coroutine\n\n\n@coroutine\ndef grep(pattern):\n print('start grep')\n try:\n while True:\n line = yield\n if pattern in line:\n print(line)\n except GeneratorExit:\n print('stop grep')\n\n\n@coroutine\ndef grep_python_coroutine():\n g = grep('python')\n yield from g\n\n\n<mask token>\n",
"step-4": "def coroutine(func):\n\n def start_coroutine(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start_coroutine\n\n\n@coroutine\ndef grep(pattern):\n print('start grep')\n try:\n while True:\n line = yield\n if pattern in line:\n print(line)\n except GeneratorExit:\n print('stop grep')\n\n\n@coroutine\ndef grep_python_coroutine():\n g = grep('python')\n yield from g\n\n\n<mask token>\ng.send('php is better')\ng.send('python is simplier')\ng.close()\n",
"step-5": "def coroutine(func):\n\tdef start_coroutine(*args, **kwargs):\n\t\tcr = func(*args, **kwargs)\n\t\tnext(cr) #cr.send(None)\n\t\treturn cr\n\treturn start_coroutine\n\n@coroutine\ndef grep(pattern):\n\tprint('start grep')\n\ttry:\n\t\twhile True:\n\t\t\tline = yield\n\t\t\tif pattern in line:\n\t\t\t\tprint(line)\n\texcept GeneratorExit:\n\t\tprint('stop grep')\n\n@coroutine\ndef grep_python_coroutine():\n\tg = grep('python') \n\tyield from g\n\ng = grep('python')\n#next(g) #g.send(None)\ng.send(\"php is better\")\ng.send(\"python is simplier\")\ng.close()",
"step-ids": [
1,
2,
3,
4,
6
]
}
|
[
1,
2,
3,
4,
6
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.