code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api_view(['GET', 'POST'])
@renderer_classes([JSONRenderer, BrowsableAPIRenderer])
def feedback_list(request, format=None):
"""
List all feedback or create a new feedback
"""
if request.method == 'GET':
feedback = Feedback.objects.all()
serializer = FeedbackSerializer(feedback, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = FeedbackSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api_view(['GET', 'POST'])
@renderer_classes([JSONRenderer, BrowsableAPIRenderer])
def feedback_list(request, format=None):
"""
List all feedback or create a new feedback
"""
if request.method == 'GET':
feedback = Feedback.objects.all()
serializer = FeedbackSerializer(feedback, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = FeedbackSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def feedback_index(request):
feedback = Feedback.objects.all()
context = {'feedback': feedback}
return render(request, 'feedback_index.html', context)
<|reserved_special_token_1|>
from django.shortcuts import render
from rest_framework import status
from rest_framework.decorators import api_view, renderer_classes
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from rest_framework.response import Response
from feedback.models import Feedback
from feedback.serializers import FeedbackSerializer
@api_view(['GET', 'POST'])
@renderer_classes([JSONRenderer, BrowsableAPIRenderer])
def feedback_list(request, format=None):
"""
List all feedback or create a new feedback
"""
if request.method == 'GET':
feedback = Feedback.objects.all()
serializer = FeedbackSerializer(feedback, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = FeedbackSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def feedback_index(request):
feedback = Feedback.objects.all()
context = {'feedback': feedback}
return render(request, 'feedback_index.html', context)
|
flexible
|
{
"blob_id": "bd6c72c3215265a349c5f47573063a9288f64198",
"index": 5227,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@api_view(['GET', 'POST'])\n@renderer_classes([JSONRenderer, BrowsableAPIRenderer])\ndef feedback_list(request, format=None):\n \"\"\"\n List all feedback or create a new feedback\n \"\"\"\n if request.method == 'GET':\n feedback = Feedback.objects.all()\n serializer = FeedbackSerializer(feedback, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n serializer = FeedbackSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@api_view(['GET', 'POST'])\n@renderer_classes([JSONRenderer, BrowsableAPIRenderer])\ndef feedback_list(request, format=None):\n \"\"\"\n List all feedback or create a new feedback\n \"\"\"\n if request.method == 'GET':\n feedback = Feedback.objects.all()\n serializer = FeedbackSerializer(feedback, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n serializer = FeedbackSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\ndef feedback_index(request):\n feedback = Feedback.objects.all()\n context = {'feedback': feedback}\n return render(request, 'feedback_index.html', context)\n",
"step-4": "from django.shortcuts import render\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, renderer_classes\nfrom rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer\nfrom rest_framework.response import Response\nfrom feedback.models import Feedback\nfrom feedback.serializers import FeedbackSerializer\n\n\n@api_view(['GET', 'POST'])\n@renderer_classes([JSONRenderer, BrowsableAPIRenderer])\ndef feedback_list(request, format=None):\n \"\"\"\n List all feedback or create a new feedback\n \"\"\"\n if request.method == 'GET':\n feedback = Feedback.objects.all()\n serializer = FeedbackSerializer(feedback, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n serializer = FeedbackSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\ndef feedback_index(request):\n feedback = Feedback.objects.all()\n context = {'feedback': feedback}\n return render(request, 'feedback_index.html', context)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestDummy(unittest.TestCase):
pass
<|reserved_special_token_1|>
import unittest
import achemkit.properties_wnx
class TestDummy(unittest.TestCase):
pass
|
flexible
|
{
"blob_id": "5f0e6f6dc645996b486f1292fe05229a7fae9b17",
"index": 2342,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestDummy(unittest.TestCase):\n pass\n",
"step-3": "import unittest\nimport achemkit.properties_wnx\n\n\nclass TestDummy(unittest.TestCase):\n pass\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.shortcuts import render
# from emaillist.models import Emaillist
from emaillist.models import Emaillist
from django.http import HttpResponseRedirect
# Create your views here.
# def index(request):
# emaillist_list = Emaillist.objects.all().order_by('-id') # db에서 objects 전체를 불러와서 변수에 저장
# data = {'emaillist_list':emaillist_list} # 딕션너리 형식으로 데이터에 저장
# return render(request, 'emaillist/index.html', data) # render 라는 임시변수에 url(request)에서 불러온 값으로 emillist/index.html 형식으로 data값을 출력한다.
def test_index(request):
print("test_index 함수 실행하자 ")
emaillist_list = Emaillist.objects.all().order_by('-id') # db에서 objects 전체를 불러와서 변수에 저장
data = {'emaillist_list':emaillist_list} # 딕션너리 형식으로 데이터에 저장
return render(request, 'emaillist/test_index.html', data)
# def form(request):
# return render(request, 'emaillist/form.html')
def test_form(request):
print("test 함수 실행하자 ")
return render(request, 'emaillist/test_form.html')
def add(request):
emaillist = Emaillist()
emaillist.first_name = request.POST['fn'] # 웹에 first_name부분에 작성한 값 (index.html에서 input으로 받은 password) 을 가져와서 데이터베이스(emailist)의 first_name column에 저장
emaillist.last_name = request.POST['ln'] # 웹에 last_name부분에 작성한 값 (index.html에서 input으로 받은 password) 을 가져와서 데이터베이스(emailist)의 last_name column에 저장
emaillist.email = request.POST['email'] # 웹에 email부분에 작성한 값 (index.html에서 input으로 받은 password) 을 가져와서 데이터베이스(emailist)의 email column에 저장
emaillist.save() # 저장된 내역을 DB에 저장
return HttpResponseRedirect('/emaillist') # 저장완료되면 기존 리스트 페이지로 이동
#
# def add2(request):
# emaillist2 = Emaillist2()
# emaillist2.first_name = request.POST['fn']
# emaillist2.last_name = request.POST['ln']
# emaillist2.email = request.POST['email']
#
# emaillist2.save()
#
# return HttpResponseRedirect('/emaillist')
|
normal
|
{
"blob_id": "5220ad793788927e94caf7d6a42df11292851c67",
"index": 2734,
"step-1": "<mask token>\n\n\ndef test_form(request):\n print('test 함수 실행하자 ')\n return render(request, 'emaillist/test_form.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_form(request):\n print('test 함수 실행하자 ')\n return render(request, 'emaillist/test_form.html')\n\n\ndef add(request):\n emaillist = Emaillist()\n emaillist.first_name = request.POST['fn']\n emaillist.last_name = request.POST['ln']\n emaillist.email = request.POST['email']\n emaillist.save()\n return HttpResponseRedirect('/emaillist')\n",
"step-3": "<mask token>\n\n\ndef test_index(request):\n print('test_index 함수 실행하자 ')\n emaillist_list = Emaillist.objects.all().order_by('-id')\n data = {'emaillist_list': emaillist_list}\n return render(request, 'emaillist/test_index.html', data)\n\n\ndef test_form(request):\n print('test 함수 실행하자 ')\n return render(request, 'emaillist/test_form.html')\n\n\ndef add(request):\n emaillist = Emaillist()\n emaillist.first_name = request.POST['fn']\n emaillist.last_name = request.POST['ln']\n emaillist.email = request.POST['email']\n emaillist.save()\n return HttpResponseRedirect('/emaillist')\n",
"step-4": "from django.shortcuts import render\nfrom emaillist.models import Emaillist\nfrom django.http import HttpResponseRedirect\n\n\ndef test_index(request):\n print('test_index 함수 실행하자 ')\n emaillist_list = Emaillist.objects.all().order_by('-id')\n data = {'emaillist_list': emaillist_list}\n return render(request, 'emaillist/test_index.html', data)\n\n\ndef test_form(request):\n print('test 함수 실행하자 ')\n return render(request, 'emaillist/test_form.html')\n\n\ndef add(request):\n emaillist = Emaillist()\n emaillist.first_name = request.POST['fn']\n emaillist.last_name = request.POST['ln']\n emaillist.email = request.POST['email']\n emaillist.save()\n return HttpResponseRedirect('/emaillist')\n",
"step-5": "from django.shortcuts import render\n# from emaillist.models import Emaillist\nfrom emaillist.models import Emaillist\nfrom django.http import HttpResponseRedirect\n\n# Create your views here.\n\n# def index(request):\n# emaillist_list = Emaillist.objects.all().order_by('-id') # db에서 objects 전체를 불러와서 변수에 저장\n# data = {'emaillist_list':emaillist_list} # 딕션너리 형식으로 데이터에 저장\n# return render(request, 'emaillist/index.html', data) # render 라는 임시변수에 url(request)에서 불러온 값으로 emillist/index.html 형식으로 data값을 출력한다.\n\n\ndef test_index(request):\n print(\"test_index 함수 실행하자 \")\n emaillist_list = Emaillist.objects.all().order_by('-id') # db에서 objects 전체를 불러와서 변수에 저장\n data = {'emaillist_list':emaillist_list} # 딕션너리 형식으로 데이터에 저장\n return render(request, 'emaillist/test_index.html', data)\n\n# def form(request):\n# return render(request, 'emaillist/form.html')\n\ndef test_form(request):\n print(\"test 함수 실행하자 \")\n return render(request, 'emaillist/test_form.html')\n\n\ndef add(request):\n emaillist = Emaillist()\n emaillist.first_name = request.POST['fn'] # 웹에 first_name부분에 작성한 값 (index.html에서 input으로 받은 password) 을 가져와서 데이터베이스(emailist)의 first_name column에 저장\n emaillist.last_name = request.POST['ln'] # 웹에 last_name부분에 작성한 값 (index.html에서 input으로 받은 password) 을 가져와서 데이터베이스(emailist)의 last_name column에 저장\n emaillist.email = request.POST['email'] # 웹에 email부분에 작성한 값 (index.html에서 input으로 받은 password) 을 가져와서 데이터베이스(emailist)의 email column에 저장\n\n emaillist.save() # 저장된 내역을 DB에 저장\n\n return HttpResponseRedirect('/emaillist') # 저장완료되면 기존 리스트 페이지로 이동\n#\n# def add2(request):\n# emaillist2 = Emaillist2()\n# emaillist2.first_name = request.POST['fn']\n# emaillist2.last_name = request.POST['ln']\n# emaillist2.email = request.POST['email']\n#\n# emaillist2.save()\n#\n# return HttpResponseRedirect('/emaillist')",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import re
from collections import defaultdict
def count_words(sentence):
# extract all the words as per definition
sentence = re.findall(r"\b[\w'-]+\b", sentence.lower().replace('_', ' '))
counts = defaultdict(lambda: 0)
# Counting the frequency of each words
for word in sentence:
counts[word] += 1
return counts
|
normal
|
{
"blob_id": "7f5f16ea10980e0ade7357cdae38f47f8d7cdf01",
"index": 2446,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef count_words(sentence):\n sentence = re.findall(\"\\\\b[\\\\w'-]+\\\\b\", sentence.lower().replace('_', ' '))\n counts = defaultdict(lambda : 0)\n for word in sentence:\n counts[word] += 1\n return counts\n",
"step-3": "import re\nfrom collections import defaultdict\n\n\ndef count_words(sentence):\n sentence = re.findall(\"\\\\b[\\\\w'-]+\\\\b\", sentence.lower().replace('_', ' '))\n counts = defaultdict(lambda : 0)\n for word in sentence:\n counts[word] += 1\n return counts\n",
"step-4": "import re\nfrom collections import defaultdict\n\ndef count_words(sentence):\n # extract all the words as per definition\n sentence = re.findall(r\"\\b[\\w'-]+\\b\", sentence.lower().replace('_', ' '))\n counts = defaultdict(lambda: 0)\n\n # Counting the frequency of each words\n for word in sentence:\n counts[word] += 1\n \n return counts\n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#----------- writing our for loop
""" number = [1,2,3,4,5]
friends = ['ahmet', 'mehmet','ayşe']
# for n in number:
# print(n)
# for n in friends:
# print(n)
def my_for_loop(my_iterable):
my_iterator = iter(my_iterable)
while True:
try:
print(next(my_iterator))
except StopIteration:
break
my_for_loop(number)
my_for_loop(friends) """
#--------------to show thirth power of given range numbers with iterator class
""" class CubeNumbers:
def __init__(self, start, end):
self.start = start
self.end = end
def __iter__(self):
return self
def __next__(self):
if self.start <= self.end:
result = self.start ** 3
self.start += 1
return result
else:
raise StopIteration
cubed = CubeNumbers(0, 5)
print(next(cubed))
print(next(cubed))
print(next(cubed))
print(next(cubed))
print(next(cubed))
print(next(cubed))
print(next(cubed)) """
#--------to show thirth power of given range numbers with generator
""" cubed = (x**3 for x in range(0, 5))
print(type(cubed))
print(next(cubed))
print(next(cubed))
print(next(cubed))
print(next(cubed))
print(next(cubed))
print(next(cubed))
print(next(cubed)) """
#---------------fibonacci numbers with generator function
""" def fibo(limit):
x = 0
y = 1
while x < limit:
yield x
x, y = y, x + y
my_fib = fibo(1000)
for fib in my_fib:
print(fib) """
#-------------to show index and value together
""" friends = ['john', 'walter', 'henry']
# i = 0
# while i < len(friends):
# v = friends[i]
# print(i, v)
# i += 1
# for n in range(len(friends)):
# v = friends[n]
# print(n, v)
for i, v in enumerate(friends):
print(i, v) """
|
flexible
|
{
"blob_id": "70325d0e5eb9dcd7a065f83eaf14647bc30bd7f3",
"index": 9053,
"step-1": "<mask token>\n",
"step-2": "\n#----------- writing our for loop\n\"\"\" number = [1,2,3,4,5]\nfriends = ['ahmet', 'mehmet','ayşe']\n\n# for n in number:\n# print(n)\n# for n in friends:\n# print(n)\n\ndef my_for_loop(my_iterable):\n my_iterator = iter(my_iterable)\n while True:\n try:\n print(next(my_iterator))\n except StopIteration:\n break\n\nmy_for_loop(number)\nmy_for_loop(friends) \"\"\"\n\n\n#--------------to show thirth power of given range numbers with iterator class\n\n\n\"\"\" class CubeNumbers:\n def __init__(self, start, end):\n self.start = start\n self.end = end\n \n def __iter__(self):\n return self\n \n def __next__(self):\n if self.start <= self.end:\n result = self.start ** 3\n self.start += 1\n return result\n else:\n raise StopIteration\n \ncubed = CubeNumbers(0, 5)\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed)) \"\"\"\n\n\n#--------to show thirth power of given range numbers with generator \n\n\"\"\" cubed = (x**3 for x in range(0, 5))\nprint(type(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed)) \"\"\"\n\n\n#---------------fibonacci numbers with generator function\n\n\"\"\" def fibo(limit):\n x = 0\n y = 1\n while x < limit:\n yield x\n x, y = y, x + y\n \nmy_fib = fibo(1000)\nfor fib in my_fib:\n print(fib) \"\"\"\n \n\n#-------------to show index and value together\n\n\"\"\" friends = ['john', 'walter', 'henry']\n\n# i = 0\n# while i < len(friends):\n# v = friends[i]\n# print(i, v)\n# i += 1\n\n# for n in range(len(friends)):\n# v = friends[n]\n# print(n, v)\n\nfor i, v in enumerate(friends):\n print(i, v) \"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class VirtualChassis(Base):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, parent):
super(VirtualChassis, self).__init__(parent)
<|reserved_special_token_0|>
@property
def Hypervisor(self):
"""An instance of the Hypervisor class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor
return Hypervisor(self)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@EnableLicenseCheck.setter
def EnableLicenseCheck(self, value):
self._set_attribute('enableLicenseCheck', value)
@property
def Hostname(self):
"""Virtual Chassis hostname or IP
Returns:
str
"""
return self._get_attribute('hostname')
@property
def LicenseServer(self):
"""The address of the license server
Returns:
str
"""
return self._get_attribute('licenseServer')
@LicenseServer.setter
def LicenseServer(self, value):
self._set_attribute('licenseServer', value)
@property
def NtpServer(self):
"""The address of the NTP server
Returns:
str
"""
return self._get_attribute('ntpServer')
@NtpServer.setter
def NtpServer(self, value):
self._set_attribute('ntpServer', value)
@property
def StartTxDelay(self):
"""The delay amount for transmit
Returns:
str
"""
return self._get_attribute('startTxDelay')
@StartTxDelay.setter
def StartTxDelay(self, value):
self._set_attribute('startTxDelay', value)
def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer
=None, StartTxDelay=None):
"""Updates a child instance of virtualChassis on the server.
Args:
EnableLicenseCheck (bool): Enables license check on port connect
LicenseServer (str): The address of the license server
NtpServer (str): The address of the NTP server
StartTxDelay (str): The delay amount for transmit
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class VirtualChassis(Base):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, parent):
super(VirtualChassis, self).__init__(parent)
@property
def DiscoveredAppliance(self):
"""An instance of the DiscoveredAppliance class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance
return DiscoveredAppliance(self)
@property
def Hypervisor(self):
"""An instance of the Hypervisor class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor
return Hypervisor(self)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@EnableLicenseCheck.setter
def EnableLicenseCheck(self, value):
self._set_attribute('enableLicenseCheck', value)
@property
def Hostname(self):
"""Virtual Chassis hostname or IP
Returns:
str
"""
return self._get_attribute('hostname')
@property
def LicenseServer(self):
"""The address of the license server
Returns:
str
"""
return self._get_attribute('licenseServer')
@LicenseServer.setter
def LicenseServer(self, value):
self._set_attribute('licenseServer', value)
@property
def NtpServer(self):
"""The address of the NTP server
Returns:
str
"""
return self._get_attribute('ntpServer')
@NtpServer.setter
def NtpServer(self, value):
self._set_attribute('ntpServer', value)
@property
def StartTxDelay(self):
"""The delay amount for transmit
Returns:
str
"""
return self._get_attribute('startTxDelay')
@StartTxDelay.setter
def StartTxDelay(self, value):
self._set_attribute('startTxDelay', value)
def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer
=None, StartTxDelay=None):
"""Updates a child instance of virtualChassis on the server.
Args:
EnableLicenseCheck (bool): Enables license check on port connect
LicenseServer (str): The address of the license server
NtpServer (str): The address of the NTP server
StartTxDelay (str): The delay amount for transmit
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class VirtualChassis(Base):
<|reserved_special_token_0|>
__slots__ = ()
_SDM_NAME = 'virtualChassis'
def __init__(self, parent):
super(VirtualChassis, self).__init__(parent)
@property
def DiscoveredAppliance(self):
"""An instance of the DiscoveredAppliance class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance
return DiscoveredAppliance(self)
@property
def Hypervisor(self):
"""An instance of the Hypervisor class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor
return Hypervisor(self)
@property
def IxVmCard(self):
"""An instance of the IxVmCard class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard.IxVmCard)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard import IxVmCard
return IxVmCard(self)
@property
def EnableLicenseCheck(self):
"""Enables license check on port connect
Returns:
bool
"""
return self._get_attribute('enableLicenseCheck')
@EnableLicenseCheck.setter
def EnableLicenseCheck(self, value):
self._set_attribute('enableLicenseCheck', value)
@property
def Hostname(self):
"""Virtual Chassis hostname or IP
Returns:
str
"""
return self._get_attribute('hostname')
@property
def LicenseServer(self):
"""The address of the license server
Returns:
str
"""
return self._get_attribute('licenseServer')
@LicenseServer.setter
def LicenseServer(self, value):
self._set_attribute('licenseServer', value)
@property
def NtpServer(self):
"""The address of the NTP server
Returns:
str
"""
return self._get_attribute('ntpServer')
@NtpServer.setter
def NtpServer(self, value):
self._set_attribute('ntpServer', value)
@property
def StartTxDelay(self):
"""The delay amount for transmit
Returns:
str
"""
return self._get_attribute('startTxDelay')
@StartTxDelay.setter
def StartTxDelay(self, value):
self._set_attribute('startTxDelay', value)
def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer
=None, StartTxDelay=None):
"""Updates a child instance of virtualChassis on the server.
Args:
EnableLicenseCheck (bool): Enables license check on port connect
LicenseServer (str): The address of the license server
NtpServer (str): The address of the NTP server
StartTxDelay (str): The delay amount for transmit
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class VirtualChassis(Base):
"""Virtual Chassis is used to get and to manage a Virtual Chassis topology and get the list of discovered appliances
The VirtualChassis class encapsulates a required virtualChassis resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'virtualChassis'
def __init__(self, parent):
super(VirtualChassis, self).__init__(parent)
@property
def DiscoveredAppliance(self):
"""An instance of the DiscoveredAppliance class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance
return DiscoveredAppliance(self)
@property
def Hypervisor(self):
"""An instance of the Hypervisor class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor
return Hypervisor(self)
@property
def IxVmCard(self):
"""An instance of the IxVmCard class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard.IxVmCard)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard import IxVmCard
return IxVmCard(self)
@property
def EnableLicenseCheck(self):
"""Enables license check on port connect
Returns:
bool
"""
return self._get_attribute('enableLicenseCheck')
@EnableLicenseCheck.setter
def EnableLicenseCheck(self, value):
self._set_attribute('enableLicenseCheck', value)
@property
def Hostname(self):
"""Virtual Chassis hostname or IP
Returns:
str
"""
return self._get_attribute('hostname')
@property
def LicenseServer(self):
"""The address of the license server
Returns:
str
"""
return self._get_attribute('licenseServer')
@LicenseServer.setter
def LicenseServer(self, value):
self._set_attribute('licenseServer', value)
@property
def NtpServer(self):
"""The address of the NTP server
Returns:
str
"""
return self._get_attribute('ntpServer')
@NtpServer.setter
def NtpServer(self, value):
self._set_attribute('ntpServer', value)
@property
def StartTxDelay(self):
"""The delay amount for transmit
Returns:
str
"""
return self._get_attribute('startTxDelay')
@StartTxDelay.setter
def StartTxDelay(self, value):
self._set_attribute('startTxDelay', value)
def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer
=None, StartTxDelay=None):
"""Updates a child instance of virtualChassis on the server.
Args:
EnableLicenseCheck (bool): Enables license check on port connect
LicenseServer (str): The address of the license server
NtpServer (str): The address of the NTP server
StartTxDelay (str): The delay amount for transmit
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
<|reserved_special_token_1|>
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class VirtualChassis(Base):
"""Virtual Chassis is used to get and to manage a Virtual Chassis topology and get the list of discovered appliances
The VirtualChassis class encapsulates a required virtualChassis resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'virtualChassis'
def __init__(self, parent):
super(VirtualChassis, self).__init__(parent)
@property
def DiscoveredAppliance(self):
"""An instance of the DiscoveredAppliance class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance
return DiscoveredAppliance(self)
@property
def Hypervisor(self):
"""An instance of the Hypervisor class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor
return Hypervisor(self)
@property
def IxVmCard(self):
"""An instance of the IxVmCard class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard.IxVmCard)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard import IxVmCard
return IxVmCard(self)
@property
def EnableLicenseCheck(self):
"""Enables license check on port connect
Returns:
bool
"""
return self._get_attribute('enableLicenseCheck')
@EnableLicenseCheck.setter
def EnableLicenseCheck(self, value):
self._set_attribute('enableLicenseCheck', value)
@property
def Hostname(self):
"""Virtual Chassis hostname or IP
Returns:
str
"""
return self._get_attribute('hostname')
@property
def LicenseServer(self):
"""The address of the license server
Returns:
str
"""
return self._get_attribute('licenseServer')
@LicenseServer.setter
def LicenseServer(self, value):
self._set_attribute('licenseServer', value)
@property
def NtpServer(self):
"""The address of the NTP server
Returns:
str
"""
return self._get_attribute('ntpServer')
@NtpServer.setter
def NtpServer(self, value):
self._set_attribute('ntpServer', value)
@property
def StartTxDelay(self):
"""The delay amount for transmit
Returns:
str
"""
return self._get_attribute('startTxDelay')
@StartTxDelay.setter
def StartTxDelay(self, value):
self._set_attribute('startTxDelay', value)
def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer=None, StartTxDelay=None):
"""Updates a child instance of virtualChassis on the server.
Args:
EnableLicenseCheck (bool): Enables license check on port connect
LicenseServer (str): The address of the license server
NtpServer (str): The address of the NTP server
StartTxDelay (str): The delay amount for transmit
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
|
flexible
|
{
"blob_id": "4b78c99dd6156afe960effcacb25804446310f7c",
"index": 9708,
"step-1": "<mask token>\n\n\nclass VirtualChassis(Base):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, parent):\n super(VirtualChassis, self).__init__(parent)\n <mask token>\n\n @property\n def Hypervisor(self):\n \"\"\"An instance of the Hypervisor class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor\n return Hypervisor(self)\n <mask token>\n <mask token>\n\n @EnableLicenseCheck.setter\n def EnableLicenseCheck(self, value):\n self._set_attribute('enableLicenseCheck', value)\n\n @property\n def Hostname(self):\n \"\"\"Virtual Chassis hostname or IP\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('hostname')\n\n @property\n def LicenseServer(self):\n \"\"\"The address of the license server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('licenseServer')\n\n @LicenseServer.setter\n def LicenseServer(self, value):\n self._set_attribute('licenseServer', value)\n\n @property\n def NtpServer(self):\n \"\"\"The address of the NTP server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('ntpServer')\n\n @NtpServer.setter\n def NtpServer(self, value):\n self._set_attribute('ntpServer', value)\n\n @property\n def StartTxDelay(self):\n \"\"\"The delay amount for transmit\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('startTxDelay')\n\n @StartTxDelay.setter\n def StartTxDelay(self, value):\n self._set_attribute('startTxDelay', value)\n\n def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer\n =None, StartTxDelay=None):\n \"\"\"Updates a child instance of virtualChassis on the server.\n\n Args:\n EnableLicenseCheck (bool): Enables license check on port connect\n LicenseServer (str): The address of the license server\n NtpServer (str): The address of the NTP server\n StartTxDelay (str): The delay amount for transmit\n\n Raises:\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n self._update(locals())\n",
"step-2": "<mask token>\n\n\nclass VirtualChassis(Base):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, parent):\n super(VirtualChassis, self).__init__(parent)\n\n @property\n def DiscoveredAppliance(self):\n \"\"\"An instance of the DiscoveredAppliance class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance\n return DiscoveredAppliance(self)\n\n @property\n def Hypervisor(self):\n \"\"\"An instance of the Hypervisor class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor\n return Hypervisor(self)\n <mask token>\n <mask token>\n\n @EnableLicenseCheck.setter\n def EnableLicenseCheck(self, value):\n self._set_attribute('enableLicenseCheck', value)\n\n @property\n def Hostname(self):\n \"\"\"Virtual Chassis hostname or IP\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('hostname')\n\n @property\n def LicenseServer(self):\n \"\"\"The address of the license server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('licenseServer')\n\n @LicenseServer.setter\n def LicenseServer(self, value):\n self._set_attribute('licenseServer', value)\n\n @property\n def NtpServer(self):\n \"\"\"The address of the NTP server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('ntpServer')\n\n @NtpServer.setter\n def NtpServer(self, value):\n self._set_attribute('ntpServer', value)\n\n @property\n def StartTxDelay(self):\n \"\"\"The delay amount for transmit\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('startTxDelay')\n\n @StartTxDelay.setter\n def StartTxDelay(self, value):\n self._set_attribute('startTxDelay', value)\n\n def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer\n =None, StartTxDelay=None):\n \"\"\"Updates a child instance of virtualChassis on the server.\n\n Args:\n EnableLicenseCheck (bool): Enables license check on port connect\n LicenseServer (str): The address of the license server\n NtpServer (str): The address of the NTP server\n StartTxDelay (str): The delay amount for transmit\n\n Raises:\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n self._update(locals())\n",
"step-3": "<mask token>\n\n\nclass VirtualChassis(Base):\n <mask token>\n __slots__ = ()\n _SDM_NAME = 'virtualChassis'\n\n def __init__(self, parent):\n super(VirtualChassis, self).__init__(parent)\n\n @property\n def DiscoveredAppliance(self):\n \"\"\"An instance of the DiscoveredAppliance class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance\n return DiscoveredAppliance(self)\n\n @property\n def Hypervisor(self):\n \"\"\"An instance of the Hypervisor class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor\n return Hypervisor(self)\n\n @property\n def IxVmCard(self):\n \"\"\"An instance of the IxVmCard class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard.IxVmCard)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard import IxVmCard\n return IxVmCard(self)\n\n @property\n def EnableLicenseCheck(self):\n \"\"\"Enables license check on port connect\n\n Returns:\n bool\n \"\"\"\n return self._get_attribute('enableLicenseCheck')\n\n @EnableLicenseCheck.setter\n def EnableLicenseCheck(self, value):\n self._set_attribute('enableLicenseCheck', value)\n\n @property\n def Hostname(self):\n \"\"\"Virtual Chassis hostname or IP\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('hostname')\n\n @property\n def LicenseServer(self):\n \"\"\"The address of the license server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('licenseServer')\n\n @LicenseServer.setter\n def LicenseServer(self, value):\n self._set_attribute('licenseServer', value)\n\n @property\n def NtpServer(self):\n \"\"\"The address of the NTP server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('ntpServer')\n\n @NtpServer.setter\n def NtpServer(self, value):\n self._set_attribute('ntpServer', value)\n\n @property\n def StartTxDelay(self):\n \"\"\"The delay amount for transmit\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('startTxDelay')\n\n @StartTxDelay.setter\n def StartTxDelay(self, value):\n self._set_attribute('startTxDelay', value)\n\n def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer\n =None, StartTxDelay=None):\n \"\"\"Updates a child instance of virtualChassis on the server.\n\n Args:\n EnableLicenseCheck (bool): Enables license check on port connect\n LicenseServer (str): The address of the license server\n NtpServer (str): The address of the NTP server\n StartTxDelay (str): The delay amount for transmit\n\n Raises:\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n self._update(locals())\n",
"step-4": "<mask token>\n\n\nclass VirtualChassis(Base):\n \"\"\"Virtual Chassis is used to get and to manage a Virtual Chassis topology and get the list of discovered appliances\n The VirtualChassis class encapsulates a required virtualChassis resource which will be retrieved from the server every time the property is accessed.\n \"\"\"\n __slots__ = ()\n _SDM_NAME = 'virtualChassis'\n\n def __init__(self, parent):\n super(VirtualChassis, self).__init__(parent)\n\n @property\n def DiscoveredAppliance(self):\n \"\"\"An instance of the DiscoveredAppliance class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance\n return DiscoveredAppliance(self)\n\n @property\n def Hypervisor(self):\n \"\"\"An instance of the Hypervisor class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor\n return Hypervisor(self)\n\n @property\n def IxVmCard(self):\n \"\"\"An instance of the IxVmCard class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard.IxVmCard)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard import IxVmCard\n return IxVmCard(self)\n\n @property\n def EnableLicenseCheck(self):\n \"\"\"Enables license check on port connect\n\n Returns:\n bool\n \"\"\"\n return self._get_attribute('enableLicenseCheck')\n\n @EnableLicenseCheck.setter\n def EnableLicenseCheck(self, value):\n self._set_attribute('enableLicenseCheck', value)\n\n @property\n def Hostname(self):\n \"\"\"Virtual Chassis hostname or IP\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('hostname')\n\n @property\n def LicenseServer(self):\n \"\"\"The address of the license server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('licenseServer')\n\n @LicenseServer.setter\n def LicenseServer(self, value):\n self._set_attribute('licenseServer', value)\n\n @property\n def NtpServer(self):\n \"\"\"The address of the NTP server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('ntpServer')\n\n @NtpServer.setter\n def NtpServer(self, value):\n self._set_attribute('ntpServer', value)\n\n @property\n def StartTxDelay(self):\n \"\"\"The delay amount for transmit\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('startTxDelay')\n\n @StartTxDelay.setter\n def StartTxDelay(self, value):\n self._set_attribute('startTxDelay', value)\n\n def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer\n =None, StartTxDelay=None):\n \"\"\"Updates a child instance of virtualChassis on the server.\n\n Args:\n EnableLicenseCheck (bool): Enables license check on port connect\n LicenseServer (str): The address of the license server\n NtpServer (str): The address of the NTP server\n StartTxDelay (str): The delay amount for transmit\n\n Raises:\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n self._update(locals())\n",
"step-5": "# MIT LICENSE\n#\n# Copyright 1997 - 2019 by IXIA Keysight\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE. \nfrom ixnetwork_restpy.base import Base\nfrom ixnetwork_restpy.files import Files\n\n\nclass VirtualChassis(Base):\n \"\"\"Virtual Chassis is used to get and to manage a Virtual Chassis topology and get the list of discovered appliances\n The VirtualChassis class encapsulates a required virtualChassis resource which will be retrieved from the server every time the property is accessed.\n \"\"\"\n\n __slots__ = ()\n _SDM_NAME = 'virtualChassis'\n\n def __init__(self, parent):\n super(VirtualChassis, self).__init__(parent)\n\n @property\n def DiscoveredAppliance(self):\n \"\"\"An instance of the DiscoveredAppliance class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance\n return DiscoveredAppliance(self)\n\n @property\n def Hypervisor(self):\n \"\"\"An instance of the Hypervisor class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor\n return Hypervisor(self)\n\n @property\n def IxVmCard(self):\n \"\"\"An instance of the IxVmCard class.\n\n Returns:\n obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard.IxVmCard)\n\n Raises:\n NotFoundError: The requested resource does not exist on the server\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard import IxVmCard\n return IxVmCard(self)\n\n @property\n def EnableLicenseCheck(self):\n \"\"\"Enables license check on port connect\n\n Returns:\n bool\n \"\"\"\n return self._get_attribute('enableLicenseCheck')\n @EnableLicenseCheck.setter\n def EnableLicenseCheck(self, value):\n self._set_attribute('enableLicenseCheck', value)\n\n @property\n def Hostname(self):\n \"\"\"Virtual Chassis hostname or IP\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('hostname')\n\n @property\n def LicenseServer(self):\n \"\"\"The address of the license server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('licenseServer')\n @LicenseServer.setter\n def LicenseServer(self, value):\n self._set_attribute('licenseServer', value)\n\n @property\n def NtpServer(self):\n \"\"\"The address of the NTP server\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('ntpServer')\n @NtpServer.setter\n def NtpServer(self, value):\n self._set_attribute('ntpServer', value)\n\n @property\n def StartTxDelay(self):\n \"\"\"The delay amount for transmit\n\n Returns:\n str\n \"\"\"\n return self._get_attribute('startTxDelay')\n @StartTxDelay.setter\n def StartTxDelay(self, value):\n self._set_attribute('startTxDelay', value)\n\n def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer=None, StartTxDelay=None):\n \"\"\"Updates a child instance of virtualChassis on the server.\n\n Args:\n EnableLicenseCheck (bool): Enables license check on port connect\n LicenseServer (str): The address of the license server\n NtpServer (str): The address of the NTP server\n StartTxDelay (str): The delay amount for transmit\n\n Raises:\n ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n self._update(locals())\n",
"step-ids": [
12,
13,
16,
17,
19
]
}
|
[
12,
13,
16,
17,
19
] |
def _make_key(*args, **kwargs):
all_args = [str(arg) for arg in args]
all_args += [str(arg) + '=' + str(value) for arg, value in kwargs.items()]
return '|'.join(all_args)
class DoubleLinked:
def __init__(self, prv, nxt, key):
self.prv = prv
self.nxt = nxt
self.key = key
class CacheEntry:
def __init__(self, value, position):
self.value = value
self.position = position
class LRUCache:
def __init__(self, get_from_origin, max_size=1024):
if max_size == 0:
raise NotImplementedError()
if max_size < 0:
raise ValueError()
# keep separate size counter, to save going over the list
self.size = 0
self.max_size = max_size
# the function to call
self._get_from_origin = get_from_origin
# the values to cache
self._cache = {}
self._most_recent = None
self._least_recent = None
@property
def full(self):
return self.size == self.max_size
def get(self, *args, **kwargs):
if not args and not kwargs:
raise ValueError()
key = _make_key(*args, **kwargs)
if key in self._cache:
return self._hit(key)
return self._miss(key, *args, **kwargs)
def _hit(self, key):
self._bump_cached(key)
return self._cache[key].value
def _miss(self, key, *args, **kwargs):
value = self._get_from_origin(*args, **kwargs)
if not self._most_recent:
self._bump_init(key)
else:
self._bump_new(key)
self._set(key, value)
return value
def _bump_init(self, key):
self._most_recent = DoubleLinked(nxt=None, prv=None, key=key)
self._least_recent = self._most_recent
self.size = 1
def _bump_new(self, key):
self._bump(key)
# remove oldest entry
# this is the entire reason for the linked list business
if self.full:
old_last = self._least_recent
new_last = old_last.prv
new_last.nxt = None
self._least_recent = new_last
self._remove(old_last.key)
else:
self.size += 1
def _bump_cached(self, key):
self._bump(key)
self._remove_old_position(key)
def _remove_old_position(self, key):
old_position = self._cache[key].position
if not old_position.prv:
return # we are already the most recent
old_position.prv.nxt = old_position.nxt
if old_position.nxt: # if we're not the last
old_position.nxt.prv = old_position.prv
else:
self._least_recent = old_position.prv
self._cache[key].position = self._most_recent
def _bump(self, key):
old_first = self._most_recent
new_first = DoubleLinked(nxt=old_first, prv=None, key=key)
old_first.prv = new_first
self._most_recent = new_first
def _set(self, key, value):
self._cache[key] = CacheEntry(value, self._most_recent)
def _remove(self, key):
del self._cache[key]
def __repr__(self):
if not self._most_recent:
return '[ | ]'
current = self._most_recent
keys = [current.key]
while current.nxt:
current = current.nxt
keys.append(current.key)
return '[ ' + (' | '.join(keys)) + ' ]'
def __len__(self):
return self.size
class cache: # pylint: disable=invalid-name
def __init__(self, max_size):
assert isinstance(max_size, int)
self.max_size = max_size
def __call__(self, func):
lru = LRUCache(func, max_size=self.max_size)
def cached_f(*args, **kwargs):
return lru.get(*args, **kwargs)
return cached_f
|
normal
|
{
"blob_id": "9c251e0224979877b9ce244e4871fd4c403abb8e",
"index": 1583,
"step-1": "<mask token>\n\n\nclass LRUCache:\n\n def __init__(self, get_from_origin, max_size=1024):\n if max_size == 0:\n raise NotImplementedError()\n if max_size < 0:\n raise ValueError()\n self.size = 0\n self.max_size = max_size\n self._get_from_origin = get_from_origin\n self._cache = {}\n self._most_recent = None\n self._least_recent = None\n <mask token>\n <mask token>\n\n def _hit(self, key):\n self._bump_cached(key)\n return self._cache[key].value\n\n def _miss(self, key, *args, **kwargs):\n value = self._get_from_origin(*args, **kwargs)\n if not self._most_recent:\n self._bump_init(key)\n else:\n self._bump_new(key)\n self._set(key, value)\n return value\n <mask token>\n\n def _bump_new(self, key):\n self._bump(key)\n if self.full:\n old_last = self._least_recent\n new_last = old_last.prv\n new_last.nxt = None\n self._least_recent = new_last\n self._remove(old_last.key)\n else:\n self.size += 1\n\n def _bump_cached(self, key):\n self._bump(key)\n self._remove_old_position(key)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n if not self._most_recent:\n return '[ | ]'\n current = self._most_recent\n keys = [current.key]\n while current.nxt:\n current = current.nxt\n keys.append(current.key)\n return '[ ' + ' | '.join(keys) + ' ]'\n <mask token>\n\n\nclass cache:\n\n def __init__(self, max_size):\n assert isinstance(max_size, int)\n self.max_size = max_size\n\n def __call__(self, func):\n lru = LRUCache(func, max_size=self.max_size)\n\n def cached_f(*args, **kwargs):\n return lru.get(*args, **kwargs)\n return cached_f\n",
"step-2": "<mask token>\n\n\nclass LRUCache:\n\n def __init__(self, get_from_origin, max_size=1024):\n if max_size == 0:\n raise NotImplementedError()\n if max_size < 0:\n raise ValueError()\n self.size = 0\n self.max_size = max_size\n self._get_from_origin = get_from_origin\n self._cache = {}\n self._most_recent = None\n self._least_recent = None\n <mask token>\n <mask token>\n\n def _hit(self, key):\n self._bump_cached(key)\n return self._cache[key].value\n\n def _miss(self, key, *args, **kwargs):\n value = self._get_from_origin(*args, **kwargs)\n if not self._most_recent:\n self._bump_init(key)\n else:\n self._bump_new(key)\n self._set(key, value)\n return value\n\n def _bump_init(self, key):\n self._most_recent = DoubleLinked(nxt=None, prv=None, key=key)\n self._least_recent = self._most_recent\n self.size = 1\n\n def _bump_new(self, key):\n self._bump(key)\n if self.full:\n old_last = self._least_recent\n new_last = old_last.prv\n new_last.nxt = None\n self._least_recent = new_last\n self._remove(old_last.key)\n else:\n self.size += 1\n\n def _bump_cached(self, key):\n self._bump(key)\n self._remove_old_position(key)\n <mask token>\n\n def _bump(self, key):\n old_first = self._most_recent\n new_first = DoubleLinked(nxt=old_first, prv=None, key=key)\n old_first.prv = new_first\n self._most_recent = new_first\n\n def _set(self, key, value):\n self._cache[key] = CacheEntry(value, self._most_recent)\n\n def _remove(self, key):\n del self._cache[key]\n\n def __repr__(self):\n if not self._most_recent:\n return '[ | ]'\n current = self._most_recent\n keys = [current.key]\n while current.nxt:\n current = current.nxt\n keys.append(current.key)\n return '[ ' + ' | '.join(keys) + ' ]'\n <mask token>\n\n\nclass cache:\n\n def __init__(self, max_size):\n assert isinstance(max_size, int)\n self.max_size = max_size\n\n def __call__(self, func):\n lru = LRUCache(func, max_size=self.max_size)\n\n def cached_f(*args, **kwargs):\n return lru.get(*args, **kwargs)\n return cached_f\n",
"step-3": "<mask token>\n\n\nclass LRUCache:\n\n def __init__(self, get_from_origin, max_size=1024):\n if max_size == 0:\n raise NotImplementedError()\n if max_size < 0:\n raise ValueError()\n self.size = 0\n self.max_size = max_size\n self._get_from_origin = get_from_origin\n self._cache = {}\n self._most_recent = None\n self._least_recent = None\n\n @property\n def full(self):\n return self.size == self.max_size\n <mask token>\n\n def _hit(self, key):\n self._bump_cached(key)\n return self._cache[key].value\n\n def _miss(self, key, *args, **kwargs):\n value = self._get_from_origin(*args, **kwargs)\n if not self._most_recent:\n self._bump_init(key)\n else:\n self._bump_new(key)\n self._set(key, value)\n return value\n\n def _bump_init(self, key):\n self._most_recent = DoubleLinked(nxt=None, prv=None, key=key)\n self._least_recent = self._most_recent\n self.size = 1\n\n def _bump_new(self, key):\n self._bump(key)\n if self.full:\n old_last = self._least_recent\n new_last = old_last.prv\n new_last.nxt = None\n self._least_recent = new_last\n self._remove(old_last.key)\n else:\n self.size += 1\n\n def _bump_cached(self, key):\n self._bump(key)\n self._remove_old_position(key)\n <mask token>\n\n def _bump(self, key):\n old_first = self._most_recent\n new_first = DoubleLinked(nxt=old_first, prv=None, key=key)\n old_first.prv = new_first\n self._most_recent = new_first\n\n def _set(self, key, value):\n self._cache[key] = CacheEntry(value, self._most_recent)\n\n def _remove(self, key):\n del self._cache[key]\n\n def __repr__(self):\n if not self._most_recent:\n return '[ | ]'\n current = self._most_recent\n keys = [current.key]\n while current.nxt:\n current = current.nxt\n keys.append(current.key)\n return '[ ' + ' | '.join(keys) + ' ]'\n\n def __len__(self):\n return self.size\n\n\nclass cache:\n\n def __init__(self, max_size):\n assert isinstance(max_size, int)\n self.max_size = max_size\n\n def __call__(self, func):\n lru = LRUCache(func, max_size=self.max_size)\n\n def cached_f(*args, **kwargs):\n return lru.get(*args, **kwargs)\n return cached_f\n",
"step-4": "def _make_key(*args, **kwargs):\n all_args = [str(arg) for arg in args]\n all_args += [(str(arg) + '=' + str(value)) for arg, value in kwargs.items()\n ]\n return '|'.join(all_args)\n\n\nclass DoubleLinked:\n\n def __init__(self, prv, nxt, key):\n self.prv = prv\n self.nxt = nxt\n self.key = key\n\n\nclass CacheEntry:\n\n def __init__(self, value, position):\n self.value = value\n self.position = position\n\n\nclass LRUCache:\n\n def __init__(self, get_from_origin, max_size=1024):\n if max_size == 0:\n raise NotImplementedError()\n if max_size < 0:\n raise ValueError()\n self.size = 0\n self.max_size = max_size\n self._get_from_origin = get_from_origin\n self._cache = {}\n self._most_recent = None\n self._least_recent = None\n\n @property\n def full(self):\n return self.size == self.max_size\n\n def get(self, *args, **kwargs):\n if not args and not kwargs:\n raise ValueError()\n key = _make_key(*args, **kwargs)\n if key in self._cache:\n return self._hit(key)\n return self._miss(key, *args, **kwargs)\n\n def _hit(self, key):\n self._bump_cached(key)\n return self._cache[key].value\n\n def _miss(self, key, *args, **kwargs):\n value = self._get_from_origin(*args, **kwargs)\n if not self._most_recent:\n self._bump_init(key)\n else:\n self._bump_new(key)\n self._set(key, value)\n return value\n\n def _bump_init(self, key):\n self._most_recent = DoubleLinked(nxt=None, prv=None, key=key)\n self._least_recent = self._most_recent\n self.size = 1\n\n def _bump_new(self, key):\n self._bump(key)\n if self.full:\n old_last = self._least_recent\n new_last = old_last.prv\n new_last.nxt = None\n self._least_recent = new_last\n self._remove(old_last.key)\n else:\n self.size += 1\n\n def _bump_cached(self, key):\n self._bump(key)\n self._remove_old_position(key)\n\n def _remove_old_position(self, key):\n old_position = self._cache[key].position\n if not old_position.prv:\n return\n old_position.prv.nxt = old_position.nxt\n if old_position.nxt:\n old_position.nxt.prv = old_position.prv\n else:\n self._least_recent = old_position.prv\n self._cache[key].position = self._most_recent\n\n def _bump(self, key):\n old_first = self._most_recent\n new_first = DoubleLinked(nxt=old_first, prv=None, key=key)\n old_first.prv = new_first\n self._most_recent = new_first\n\n def _set(self, key, value):\n self._cache[key] = CacheEntry(value, self._most_recent)\n\n def _remove(self, key):\n del self._cache[key]\n\n def __repr__(self):\n if not self._most_recent:\n return '[ | ]'\n current = self._most_recent\n keys = [current.key]\n while current.nxt:\n current = current.nxt\n keys.append(current.key)\n return '[ ' + ' | '.join(keys) + ' ]'\n\n def __len__(self):\n return self.size\n\n\nclass cache:\n\n def __init__(self, max_size):\n assert isinstance(max_size, int)\n self.max_size = max_size\n\n def __call__(self, func):\n lru = LRUCache(func, max_size=self.max_size)\n\n def cached_f(*args, **kwargs):\n return lru.get(*args, **kwargs)\n return cached_f\n",
"step-5": "def _make_key(*args, **kwargs):\n all_args = [str(arg) for arg in args]\n all_args += [str(arg) + '=' + str(value) for arg, value in kwargs.items()]\n return '|'.join(all_args)\n\n\nclass DoubleLinked:\n def __init__(self, prv, nxt, key):\n self.prv = prv\n self.nxt = nxt\n self.key = key\n\n\nclass CacheEntry:\n def __init__(self, value, position):\n self.value = value\n self.position = position\n\n\nclass LRUCache:\n def __init__(self, get_from_origin, max_size=1024):\n if max_size == 0:\n raise NotImplementedError()\n if max_size < 0:\n raise ValueError()\n\n # keep separate size counter, to save going over the list\n self.size = 0\n self.max_size = max_size\n # the function to call\n self._get_from_origin = get_from_origin\n\n # the values to cache\n self._cache = {}\n self._most_recent = None\n self._least_recent = None\n\n @property\n def full(self):\n return self.size == self.max_size\n\n def get(self, *args, **kwargs):\n if not args and not kwargs:\n raise ValueError()\n\n key = _make_key(*args, **kwargs)\n if key in self._cache:\n return self._hit(key)\n return self._miss(key, *args, **kwargs)\n\n def _hit(self, key):\n self._bump_cached(key)\n return self._cache[key].value\n\n def _miss(self, key, *args, **kwargs):\n value = self._get_from_origin(*args, **kwargs)\n\n if not self._most_recent:\n self._bump_init(key)\n else:\n self._bump_new(key)\n\n self._set(key, value)\n\n return value\n\n def _bump_init(self, key):\n self._most_recent = DoubleLinked(nxt=None, prv=None, key=key)\n self._least_recent = self._most_recent\n self.size = 1\n\n def _bump_new(self, key):\n self._bump(key)\n\n # remove oldest entry\n # this is the entire reason for the linked list business\n if self.full:\n old_last = self._least_recent\n new_last = old_last.prv\n new_last.nxt = None\n self._least_recent = new_last\n self._remove(old_last.key)\n else:\n self.size += 1\n\n def _bump_cached(self, key):\n self._bump(key)\n self._remove_old_position(key)\n\n def _remove_old_position(self, key):\n old_position = self._cache[key].position\n\n if not old_position.prv:\n return # we are already the most recent\n\n old_position.prv.nxt = old_position.nxt\n\n if old_position.nxt: # if we're not the last\n old_position.nxt.prv = old_position.prv\n else:\n self._least_recent = old_position.prv\n\n self._cache[key].position = self._most_recent\n\n def _bump(self, key):\n old_first = self._most_recent\n new_first = DoubleLinked(nxt=old_first, prv=None, key=key)\n old_first.prv = new_first\n self._most_recent = new_first\n\n def _set(self, key, value):\n self._cache[key] = CacheEntry(value, self._most_recent)\n\n def _remove(self, key):\n del self._cache[key]\n\n def __repr__(self):\n if not self._most_recent:\n return '[ | ]'\n current = self._most_recent\n keys = [current.key]\n while current.nxt:\n current = current.nxt\n keys.append(current.key)\n return '[ ' + (' | '.join(keys)) + ' ]'\n\n def __len__(self):\n return self.size\n\n\nclass cache: # pylint: disable=invalid-name\n def __init__(self, max_size):\n assert isinstance(max_size, int)\n self.max_size = max_size\n\n def __call__(self, func):\n lru = LRUCache(func, max_size=self.max_size)\n\n def cached_f(*args, **kwargs):\n return lru.get(*args, **kwargs)\n return cached_f\n",
"step-ids": [
10,
14,
16,
23,
24
]
}
|
[
10,
14,
16,
23,
24
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='Invoice', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('created_on', models.DateTimeField(
verbose_name='Created on', unique=True, editable=False)), (
'payment_no', models.PositiveIntegerField(verbose_name='Payment on',
unique=True, editable=False)), ('payment_info', models.CharField(
verbose_name='Payment Info', max_length=128, editable=False)), (
'user', models.ForeignKey(editable=False, to=settings.
AUTH_USER_MODEL, verbose_name='User'))], options={'verbose_name':
'invoice', 'verbose_name_plural': 'invoices'}), migrations.
CreateModel(name='Payment', fields=[('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=
True)), ('created_on', models.DateTimeField(auto_now_add=True,
verbose_name='Created on')), ('amount', models.DecimalField(
verbose_name='Amount', max_digits=9, decimal_places=2)), (
'payment_no', models.PositiveIntegerField(unique=True, verbose_name
='Payment no')), ('mode', models.PositiveSmallIntegerField(
verbose_name='Mode', choices=[(0, b'REAL'), (1, b'TEST')])), (
'sys_invs_no', models.PositiveIntegerField(verbose_name=
b'LMI_SYS_INVS_NO')), ('sys_trans_no', models.PositiveIntegerField(
verbose_name=b'LMI_SYS_TRANS_NO')), ('sys_trans_date', models.
DateTimeField(verbose_name=b'LMI_SYS_TRANS_DATE')), ('payer_purse',
models.CharField(max_length=13, verbose_name='Payer purse')), (
'payer_wm', models.CharField(max_length=12, verbose_name='Payer WM'
)), ('paymer_number', models.CharField(max_length=30, verbose_name=
'Paymer number', blank=True)), ('paymer_email', models.EmailField(
max_length=254, verbose_name='Paymer email', blank=True)), (
'telepat_phonenumber', models.CharField(max_length=30, verbose_name
='Phone number', blank=True)), ('telepat_orderid', models.CharField
(max_length=30, verbose_name='Order id', blank=True)), (
'payment_creditdays', models.PositiveIntegerField(null=True,
verbose_name='Credit days', blank=True)), ('invoice', models.
OneToOneField(related_name='payment', null=True, blank=True, to=
'webmoney_merchant.Invoice', verbose_name='Invoice'))], options={
'verbose_name': 'payment', 'verbose_name_plural': 'payments'}),
migrations.CreateModel(name='Purse', fields=[('id', models.
AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)), ('purse', models.CharField(unique=True,
max_length=13, verbose_name='Purse')), ('purse_type', models.
CharField(default=b'B', unique=True, max_length=1, verbose_name=
'Purse type', choices=[(b'B', b'WMB'), (b'C', b'WMC'), (b'D',
b'WMD'), (b'E', b'WME'), (b'G', b'WMG'), (b'K', b'WMK'), (b'R',
b'WMR'), (b'U', b'WMU'), (b'X', b'WMX'), (b'Y', b'WMY'), (b'Z',
b'WMZ')])), ('secret_key', models.CharField(max_length=50,
verbose_name='Secret key'))], options={'verbose_name': 'purse',
'verbose_name_plural': 'purses'}), migrations.AddField(model_name=
'payment', name='payee_purse', field=models.ForeignKey(related_name
='payments', verbose_name='Payee purse', to='webmoney_merchant.Purse'))
]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='Invoice', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('created_on', models.DateTimeField(
verbose_name='Created on', unique=True, editable=False)), (
'payment_no', models.PositiveIntegerField(verbose_name='Payment on',
unique=True, editable=False)), ('payment_info', models.CharField(
verbose_name='Payment Info', max_length=128, editable=False)), (
'user', models.ForeignKey(editable=False, to=settings.
AUTH_USER_MODEL, verbose_name='User'))], options={'verbose_name':
'invoice', 'verbose_name_plural': 'invoices'}), migrations.
CreateModel(name='Payment', fields=[('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=
True)), ('created_on', models.DateTimeField(auto_now_add=True,
verbose_name='Created on')), ('amount', models.DecimalField(
verbose_name='Amount', max_digits=9, decimal_places=2)), (
'payment_no', models.PositiveIntegerField(unique=True, verbose_name
='Payment no')), ('mode', models.PositiveSmallIntegerField(
verbose_name='Mode', choices=[(0, b'REAL'), (1, b'TEST')])), (
'sys_invs_no', models.PositiveIntegerField(verbose_name=
b'LMI_SYS_INVS_NO')), ('sys_trans_no', models.PositiveIntegerField(
verbose_name=b'LMI_SYS_TRANS_NO')), ('sys_trans_date', models.
DateTimeField(verbose_name=b'LMI_SYS_TRANS_DATE')), ('payer_purse',
models.CharField(max_length=13, verbose_name='Payer purse')), (
'payer_wm', models.CharField(max_length=12, verbose_name='Payer WM'
)), ('paymer_number', models.CharField(max_length=30, verbose_name=
'Paymer number', blank=True)), ('paymer_email', models.EmailField(
max_length=254, verbose_name='Paymer email', blank=True)), (
'telepat_phonenumber', models.CharField(max_length=30, verbose_name
='Phone number', blank=True)), ('telepat_orderid', models.CharField
(max_length=30, verbose_name='Order id', blank=True)), (
'payment_creditdays', models.PositiveIntegerField(null=True,
verbose_name='Credit days', blank=True)), ('invoice', models.
OneToOneField(related_name='payment', null=True, blank=True, to=
'webmoney_merchant.Invoice', verbose_name='Invoice'))], options={
'verbose_name': 'payment', 'verbose_name_plural': 'payments'}),
migrations.CreateModel(name='Purse', fields=[('id', models.
AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)), ('purse', models.CharField(unique=True,
max_length=13, verbose_name='Purse')), ('purse_type', models.
CharField(default=b'B', unique=True, max_length=1, verbose_name=
'Purse type', choices=[(b'B', b'WMB'), (b'C', b'WMC'), (b'D',
b'WMD'), (b'E', b'WME'), (b'G', b'WMG'), (b'K', b'WMK'), (b'R',
b'WMR'), (b'U', b'WMU'), (b'X', b'WMX'), (b'Y', b'WMY'), (b'Z',
b'WMZ')])), ('secret_key', models.CharField(max_length=50,
verbose_name='Secret key'))], options={'verbose_name': 'purse',
'verbose_name_plural': 'purses'}), migrations.AddField(model_name=
'payment', name='payee_purse', field=models.ForeignKey(related_name
='payments', verbose_name='Payee purse', to='webmoney_merchant.Purse'))
]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.DateTimeField(verbose_name='Created on', unique=True, editable=False)),
('payment_no', models.PositiveIntegerField(verbose_name='Payment on', unique=True, editable=False)),
('payment_info', models.CharField(verbose_name='Payment Info', max_length=128, editable=False)),
('user', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'invoice',
'verbose_name_plural': 'invoices',
},
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')),
('amount', models.DecimalField(verbose_name='Amount', max_digits=9, decimal_places=2)),
('payment_no', models.PositiveIntegerField(unique=True, verbose_name='Payment no')),
('mode', models.PositiveSmallIntegerField(verbose_name='Mode', choices=[(0, b'REAL'), (1, b'TEST')])),
('sys_invs_no', models.PositiveIntegerField(verbose_name=b'LMI_SYS_INVS_NO')),
('sys_trans_no', models.PositiveIntegerField(verbose_name=b'LMI_SYS_TRANS_NO')),
('sys_trans_date', models.DateTimeField(verbose_name=b'LMI_SYS_TRANS_DATE')),
('payer_purse', models.CharField(max_length=13, verbose_name='Payer purse')),
('payer_wm', models.CharField(max_length=12, verbose_name='Payer WM')),
('paymer_number', models.CharField(max_length=30, verbose_name='Paymer number', blank=True)),
('paymer_email', models.EmailField(max_length=254, verbose_name='Paymer email', blank=True)),
('telepat_phonenumber', models.CharField(max_length=30, verbose_name='Phone number', blank=True)),
('telepat_orderid', models.CharField(max_length=30, verbose_name='Order id', blank=True)),
('payment_creditdays', models.PositiveIntegerField(null=True, verbose_name='Credit days', blank=True)),
('invoice', models.OneToOneField(related_name='payment', null=True, blank=True, to='webmoney_merchant.Invoice', verbose_name='Invoice')),
],
options={
'verbose_name': 'payment',
'verbose_name_plural': 'payments',
},
),
migrations.CreateModel(
name='Purse',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('purse', models.CharField(unique=True, max_length=13, verbose_name='Purse')),
('purse_type', models.CharField(default=b'B', unique=True, max_length=1, verbose_name='Purse type', choices=[(b'B', b'WMB'), (b'C', b'WMC'), (b'D', b'WMD'), (b'E', b'WME'), (b'G', b'WMG'), (b'K', b'WMK'), (b'R', b'WMR'), (b'U', b'WMU'), (b'X', b'WMX'), (b'Y', b'WMY'), (b'Z', b'WMZ')])),
('secret_key', models.CharField(max_length=50, verbose_name='Secret key')),
],
options={
'verbose_name': 'purse',
'verbose_name_plural': 'purses',
},
),
migrations.AddField(
model_name='payment',
name='payee_purse',
field=models.ForeignKey(related_name='payments', verbose_name='Payee purse', to='webmoney_merchant.Purse'),
),
]
|
flexible
|
{
"blob_id": "deb8ee1d6327a6406244147a819821e8d2b2890e",
"index": 1385,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Invoice', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('created_on', models.DateTimeField(\n verbose_name='Created on', unique=True, editable=False)), (\n 'payment_no', models.PositiveIntegerField(verbose_name='Payment on',\n unique=True, editable=False)), ('payment_info', models.CharField(\n verbose_name='Payment Info', max_length=128, editable=False)), (\n 'user', models.ForeignKey(editable=False, to=settings.\n AUTH_USER_MODEL, verbose_name='User'))], options={'verbose_name':\n 'invoice', 'verbose_name_plural': 'invoices'}), migrations.\n CreateModel(name='Payment', fields=[('id', models.AutoField(\n verbose_name='ID', serialize=False, auto_created=True, primary_key=\n True)), ('created_on', models.DateTimeField(auto_now_add=True,\n verbose_name='Created on')), ('amount', models.DecimalField(\n verbose_name='Amount', max_digits=9, decimal_places=2)), (\n 'payment_no', models.PositiveIntegerField(unique=True, verbose_name\n ='Payment no')), ('mode', models.PositiveSmallIntegerField(\n verbose_name='Mode', choices=[(0, b'REAL'), (1, b'TEST')])), (\n 'sys_invs_no', models.PositiveIntegerField(verbose_name=\n b'LMI_SYS_INVS_NO')), ('sys_trans_no', models.PositiveIntegerField(\n verbose_name=b'LMI_SYS_TRANS_NO')), ('sys_trans_date', models.\n DateTimeField(verbose_name=b'LMI_SYS_TRANS_DATE')), ('payer_purse',\n models.CharField(max_length=13, verbose_name='Payer purse')), (\n 'payer_wm', models.CharField(max_length=12, verbose_name='Payer WM'\n )), ('paymer_number', models.CharField(max_length=30, verbose_name=\n 'Paymer number', blank=True)), ('paymer_email', models.EmailField(\n max_length=254, verbose_name='Paymer email', blank=True)), (\n 'telepat_phonenumber', models.CharField(max_length=30, verbose_name\n ='Phone number', blank=True)), ('telepat_orderid', models.CharField\n (max_length=30, verbose_name='Order id', blank=True)), (\n 'payment_creditdays', models.PositiveIntegerField(null=True,\n verbose_name='Credit days', blank=True)), ('invoice', models.\n OneToOneField(related_name='payment', null=True, blank=True, to=\n 'webmoney_merchant.Invoice', verbose_name='Invoice'))], options={\n 'verbose_name': 'payment', 'verbose_name_plural': 'payments'}),\n migrations.CreateModel(name='Purse', fields=[('id', models.\n AutoField(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)), ('purse', models.CharField(unique=True,\n max_length=13, verbose_name='Purse')), ('purse_type', models.\n CharField(default=b'B', unique=True, max_length=1, verbose_name=\n 'Purse type', choices=[(b'B', b'WMB'), (b'C', b'WMC'), (b'D',\n b'WMD'), (b'E', b'WME'), (b'G', b'WMG'), (b'K', b'WMK'), (b'R',\n b'WMR'), (b'U', b'WMU'), (b'X', b'WMX'), (b'Y', b'WMY'), (b'Z',\n b'WMZ')])), ('secret_key', models.CharField(max_length=50,\n verbose_name='Secret key'))], options={'verbose_name': 'purse',\n 'verbose_name_plural': 'purses'}), migrations.AddField(model_name=\n 'payment', name='payee_purse', field=models.ForeignKey(related_name\n ='payments', verbose_name='Payee purse', to='webmoney_merchant.Purse'))\n ]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Invoice', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('created_on', models.DateTimeField(\n verbose_name='Created on', unique=True, editable=False)), (\n 'payment_no', models.PositiveIntegerField(verbose_name='Payment on',\n unique=True, editable=False)), ('payment_info', models.CharField(\n verbose_name='Payment Info', max_length=128, editable=False)), (\n 'user', models.ForeignKey(editable=False, to=settings.\n AUTH_USER_MODEL, verbose_name='User'))], options={'verbose_name':\n 'invoice', 'verbose_name_plural': 'invoices'}), migrations.\n CreateModel(name='Payment', fields=[('id', models.AutoField(\n verbose_name='ID', serialize=False, auto_created=True, primary_key=\n True)), ('created_on', models.DateTimeField(auto_now_add=True,\n verbose_name='Created on')), ('amount', models.DecimalField(\n verbose_name='Amount', max_digits=9, decimal_places=2)), (\n 'payment_no', models.PositiveIntegerField(unique=True, verbose_name\n ='Payment no')), ('mode', models.PositiveSmallIntegerField(\n verbose_name='Mode', choices=[(0, b'REAL'), (1, b'TEST')])), (\n 'sys_invs_no', models.PositiveIntegerField(verbose_name=\n b'LMI_SYS_INVS_NO')), ('sys_trans_no', models.PositiveIntegerField(\n verbose_name=b'LMI_SYS_TRANS_NO')), ('sys_trans_date', models.\n DateTimeField(verbose_name=b'LMI_SYS_TRANS_DATE')), ('payer_purse',\n models.CharField(max_length=13, verbose_name='Payer purse')), (\n 'payer_wm', models.CharField(max_length=12, verbose_name='Payer WM'\n )), ('paymer_number', models.CharField(max_length=30, verbose_name=\n 'Paymer number', blank=True)), ('paymer_email', models.EmailField(\n max_length=254, verbose_name='Paymer email', blank=True)), (\n 'telepat_phonenumber', models.CharField(max_length=30, verbose_name\n ='Phone number', blank=True)), ('telepat_orderid', models.CharField\n (max_length=30, verbose_name='Order id', blank=True)), (\n 'payment_creditdays', models.PositiveIntegerField(null=True,\n verbose_name='Credit days', blank=True)), ('invoice', models.\n OneToOneField(related_name='payment', null=True, blank=True, to=\n 'webmoney_merchant.Invoice', verbose_name='Invoice'))], options={\n 'verbose_name': 'payment', 'verbose_name_plural': 'payments'}),\n migrations.CreateModel(name='Purse', fields=[('id', models.\n AutoField(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)), ('purse', models.CharField(unique=True,\n max_length=13, verbose_name='Purse')), ('purse_type', models.\n CharField(default=b'B', unique=True, max_length=1, verbose_name=\n 'Purse type', choices=[(b'B', b'WMB'), (b'C', b'WMC'), (b'D',\n b'WMD'), (b'E', b'WME'), (b'G', b'WMG'), (b'K', b'WMK'), (b'R',\n b'WMR'), (b'U', b'WMU'), (b'X', b'WMX'), (b'Y', b'WMY'), (b'Z',\n b'WMZ')])), ('secret_key', models.CharField(max_length=50,\n verbose_name='Secret key'))], options={'verbose_name': 'purse',\n 'verbose_name_plural': 'purses'}), migrations.AddField(model_name=\n 'payment', name='payee_purse', field=models.ForeignKey(related_name\n ='payments', verbose_name='Payee purse', to='webmoney_merchant.Purse'))\n ]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Invoice',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_on', models.DateTimeField(verbose_name='Created on', unique=True, editable=False)),\n ('payment_no', models.PositiveIntegerField(verbose_name='Payment on', unique=True, editable=False)),\n ('payment_info', models.CharField(verbose_name='Payment Info', max_length=128, editable=False)),\n ('user', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL, verbose_name='User')),\n ],\n options={\n 'verbose_name': 'invoice',\n 'verbose_name_plural': 'invoices',\n },\n ),\n migrations.CreateModel(\n name='Payment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')),\n ('amount', models.DecimalField(verbose_name='Amount', max_digits=9, decimal_places=2)),\n ('payment_no', models.PositiveIntegerField(unique=True, verbose_name='Payment no')),\n ('mode', models.PositiveSmallIntegerField(verbose_name='Mode', choices=[(0, b'REAL'), (1, b'TEST')])),\n ('sys_invs_no', models.PositiveIntegerField(verbose_name=b'LMI_SYS_INVS_NO')),\n ('sys_trans_no', models.PositiveIntegerField(verbose_name=b'LMI_SYS_TRANS_NO')),\n ('sys_trans_date', models.DateTimeField(verbose_name=b'LMI_SYS_TRANS_DATE')),\n ('payer_purse', models.CharField(max_length=13, verbose_name='Payer purse')),\n ('payer_wm', models.CharField(max_length=12, verbose_name='Payer WM')),\n ('paymer_number', models.CharField(max_length=30, verbose_name='Paymer number', blank=True)),\n ('paymer_email', models.EmailField(max_length=254, verbose_name='Paymer email', blank=True)),\n ('telepat_phonenumber', models.CharField(max_length=30, verbose_name='Phone number', blank=True)),\n ('telepat_orderid', models.CharField(max_length=30, verbose_name='Order id', blank=True)),\n ('payment_creditdays', models.PositiveIntegerField(null=True, verbose_name='Credit days', blank=True)),\n ('invoice', models.OneToOneField(related_name='payment', null=True, blank=True, to='webmoney_merchant.Invoice', verbose_name='Invoice')),\n ],\n options={\n 'verbose_name': 'payment',\n 'verbose_name_plural': 'payments',\n },\n ),\n migrations.CreateModel(\n name='Purse',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('purse', models.CharField(unique=True, max_length=13, verbose_name='Purse')),\n ('purse_type', models.CharField(default=b'B', unique=True, max_length=1, verbose_name='Purse type', choices=[(b'B', b'WMB'), (b'C', b'WMC'), (b'D', b'WMD'), (b'E', b'WME'), (b'G', b'WMG'), (b'K', b'WMK'), (b'R', b'WMR'), (b'U', b'WMU'), (b'X', b'WMX'), (b'Y', b'WMY'), (b'Z', b'WMZ')])),\n ('secret_key', models.CharField(max_length=50, verbose_name='Secret key')),\n ],\n options={\n 'verbose_name': 'purse',\n 'verbose_name_plural': 'purses',\n },\n ),\n migrations.AddField(\n model_name='payment',\n name='payee_purse',\n field=models.ForeignKey(related_name='payments', verbose_name='Payee purse', to='webmoney_merchant.Purse'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import scrapy
from yijing64.items import Yijing64Item
# import pymysql
class ZhouyiSpider(scrapy.Spider):
name = 'zhouyi'
allowed_domains = ['m.zhouyi.cc']
start_urls = ['https://m.zhouyi.cc/zhouyi/yijing64/']
def parse(self, response):
li_list = response.xpath("//div[@class='gualist1 tip_text']/ul/li")
for li in li_list:
item = Yijing64Item()
item['name'] = li.xpath("./a/text()").extract_first()
# item['urls'] = li.xpath("./a/@href").extract_first()
detail_urls = 'https://m.zhouyi.cc' + \
li.xpath("./a/@href").extract_first()
if detail_urls is not None:
yield scrapy.Request(detail_urls, callback=self.parse_detail, meta={'item': item})
def parse_detail(self, response):
item = response.meta["item"]
item['hexagram1'] = response.xpath("//div/table/tbody/tr[3]/td[1]/text()").extract_first().strip()
item['hexagram2'] = response.xpath("//div/table/tbody/tr[3]/td[2]/text()").extract_first().strip()
item['hexagram3'] = response.xpath("//div/table/tbody/tr[3]/td[3]/text()").extract_first().strip()
item['hexagram4'] = response.xpath("//div/table/tbody/tr[3]/td[4]/text()").extract_first().strip()
# item['hexagram'] = response.xpath("//div[@class='tip_text'][1]").extract_first().strip()
# item['one_yao'] = response.xpath("//div[@class='tip_text'][2]").extract_first().strip()
# item['two_yao'] = response.xpath("//div[@class='tip_text'][3]").extract_first().strip()
# item['san_yao'] = response.xpath("//div[@class='tip_text'][4]").extract_first().strip()
# item['si_yao'] = response.xpath("//div[@class='tip_text'][5]").extract_first().strip()
# item['wu_yao'] = response.xpath("//div[@class='tip_text'][6]").extract_first().strip()
# item['liu_yao'] = response.xpath("//div[@class='tip_text'][7]").extract_first().strip()
yield item
# hexagram_list = response.xpath(
# "//div/table/tbody/tr[3]/td/text()").extract()
# for i, v in enumerate(hexagram_list):
# # print("=="*10)
# # print(i,index)
# if i == 0:
# item['hexagram1'] = v.strip()
# elif i == 1:
# item['hexagram2'] = v.strip()
# elif i == 2:
# item['hexagram3'] = v.strip()
# else:
# item['hexagram4'] = v.strip()
# yield item
# print(item)
# def __init__(self):
# con = pymysql.connect(host=settings['MYSQL_HOST'], user=settings['MYSQL_USER'], passwd=settings['MYSQL_PASS'], db=settings['MYSQL_DB'],charset='utf8')
# cur = con.cursor() # 创建数据库连接,定义连接指针
# con.close()
|
normal
|
{
"blob_id": "cd9f25a2810b02f5588e4e9e8445e7aaec056bf8",
"index": 7704,
"step-1": "<mask token>\n\n\nclass ZhouyiSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def parse_detail(self, response):\n item = response.meta['item']\n item['hexagram1'] = response.xpath(\n '//div/table/tbody/tr[3]/td[1]/text()').extract_first().strip()\n item['hexagram2'] = response.xpath(\n '//div/table/tbody/tr[3]/td[2]/text()').extract_first().strip()\n item['hexagram3'] = response.xpath(\n '//div/table/tbody/tr[3]/td[3]/text()').extract_first().strip()\n item['hexagram4'] = response.xpath(\n '//div/table/tbody/tr[3]/td[4]/text()').extract_first().strip()\n yield item\n",
"step-2": "<mask token>\n\n\nclass ZhouyiSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n li_list = response.xpath(\"//div[@class='gualist1 tip_text']/ul/li\")\n for li in li_list:\n item = Yijing64Item()\n item['name'] = li.xpath('./a/text()').extract_first()\n detail_urls = 'https://m.zhouyi.cc' + li.xpath('./a/@href'\n ).extract_first()\n if detail_urls is not None:\n yield scrapy.Request(detail_urls, callback=self.\n parse_detail, meta={'item': item})\n\n def parse_detail(self, response):\n item = response.meta['item']\n item['hexagram1'] = response.xpath(\n '//div/table/tbody/tr[3]/td[1]/text()').extract_first().strip()\n item['hexagram2'] = response.xpath(\n '//div/table/tbody/tr[3]/td[2]/text()').extract_first().strip()\n item['hexagram3'] = response.xpath(\n '//div/table/tbody/tr[3]/td[3]/text()').extract_first().strip()\n item['hexagram4'] = response.xpath(\n '//div/table/tbody/tr[3]/td[4]/text()').extract_first().strip()\n yield item\n",
"step-3": "<mask token>\n\n\nclass ZhouyiSpider(scrapy.Spider):\n name = 'zhouyi'\n allowed_domains = ['m.zhouyi.cc']\n start_urls = ['https://m.zhouyi.cc/zhouyi/yijing64/']\n\n def parse(self, response):\n li_list = response.xpath(\"//div[@class='gualist1 tip_text']/ul/li\")\n for li in li_list:\n item = Yijing64Item()\n item['name'] = li.xpath('./a/text()').extract_first()\n detail_urls = 'https://m.zhouyi.cc' + li.xpath('./a/@href'\n ).extract_first()\n if detail_urls is not None:\n yield scrapy.Request(detail_urls, callback=self.\n parse_detail, meta={'item': item})\n\n def parse_detail(self, response):\n item = response.meta['item']\n item['hexagram1'] = response.xpath(\n '//div/table/tbody/tr[3]/td[1]/text()').extract_first().strip()\n item['hexagram2'] = response.xpath(\n '//div/table/tbody/tr[3]/td[2]/text()').extract_first().strip()\n item['hexagram3'] = response.xpath(\n '//div/table/tbody/tr[3]/td[3]/text()').extract_first().strip()\n item['hexagram4'] = response.xpath(\n '//div/table/tbody/tr[3]/td[4]/text()').extract_first().strip()\n yield item\n",
"step-4": "import scrapy\nfrom yijing64.items import Yijing64Item\n\n\nclass ZhouyiSpider(scrapy.Spider):\n name = 'zhouyi'\n allowed_domains = ['m.zhouyi.cc']\n start_urls = ['https://m.zhouyi.cc/zhouyi/yijing64/']\n\n def parse(self, response):\n li_list = response.xpath(\"//div[@class='gualist1 tip_text']/ul/li\")\n for li in li_list:\n item = Yijing64Item()\n item['name'] = li.xpath('./a/text()').extract_first()\n detail_urls = 'https://m.zhouyi.cc' + li.xpath('./a/@href'\n ).extract_first()\n if detail_urls is not None:\n yield scrapy.Request(detail_urls, callback=self.\n parse_detail, meta={'item': item})\n\n def parse_detail(self, response):\n item = response.meta['item']\n item['hexagram1'] = response.xpath(\n '//div/table/tbody/tr[3]/td[1]/text()').extract_first().strip()\n item['hexagram2'] = response.xpath(\n '//div/table/tbody/tr[3]/td[2]/text()').extract_first().strip()\n item['hexagram3'] = response.xpath(\n '//div/table/tbody/tr[3]/td[3]/text()').extract_first().strip()\n item['hexagram4'] = response.xpath(\n '//div/table/tbody/tr[3]/td[4]/text()').extract_first().strip()\n yield item\n",
"step-5": "import scrapy\nfrom yijing64.items import Yijing64Item\n# import pymysql\n\n\nclass ZhouyiSpider(scrapy.Spider):\n name = 'zhouyi'\n allowed_domains = ['m.zhouyi.cc']\n start_urls = ['https://m.zhouyi.cc/zhouyi/yijing64/']\n\n def parse(self, response):\n li_list = response.xpath(\"//div[@class='gualist1 tip_text']/ul/li\")\n\n for li in li_list:\n item = Yijing64Item()\n item['name'] = li.xpath(\"./a/text()\").extract_first()\n # item['urls'] = li.xpath(\"./a/@href\").extract_first()\n detail_urls = 'https://m.zhouyi.cc' + \\\n li.xpath(\"./a/@href\").extract_first()\n\n if detail_urls is not None:\n yield scrapy.Request(detail_urls, callback=self.parse_detail, meta={'item': item})\n\n def parse_detail(self, response):\n item = response.meta[\"item\"]\n item['hexagram1'] = response.xpath(\"//div/table/tbody/tr[3]/td[1]/text()\").extract_first().strip()\n item['hexagram2'] = response.xpath(\"//div/table/tbody/tr[3]/td[2]/text()\").extract_first().strip()\n item['hexagram3'] = response.xpath(\"//div/table/tbody/tr[3]/td[3]/text()\").extract_first().strip()\n item['hexagram4'] = response.xpath(\"//div/table/tbody/tr[3]/td[4]/text()\").extract_first().strip()\n # item['hexagram'] = response.xpath(\"//div[@class='tip_text'][1]\").extract_first().strip()\n # item['one_yao'] = response.xpath(\"//div[@class='tip_text'][2]\").extract_first().strip()\n # item['two_yao'] = response.xpath(\"//div[@class='tip_text'][3]\").extract_first().strip()\n # item['san_yao'] = response.xpath(\"//div[@class='tip_text'][4]\").extract_first().strip()\n # item['si_yao'] = response.xpath(\"//div[@class='tip_text'][5]\").extract_first().strip()\n # item['wu_yao'] = response.xpath(\"//div[@class='tip_text'][6]\").extract_first().strip()\n # item['liu_yao'] = response.xpath(\"//div[@class='tip_text'][7]\").extract_first().strip()\n yield item\n # hexagram_list = response.xpath(\n # \"//div/table/tbody/tr[3]/td/text()\").extract()\n # for i, v in enumerate(hexagram_list):\n # # print(\"==\"*10)\n # # print(i,index)\n # if i == 0:\n # item['hexagram1'] = v.strip()\n # elif i == 1:\n # item['hexagram2'] = v.strip()\n # elif i == 2:\n # item['hexagram3'] = v.strip()\n # else:\n # item['hexagram4'] = v.strip()\n # yield item\n # print(item)\n\n # def __init__(self):\n # \tcon = pymysql.connect(host=settings['MYSQL_HOST'], user=settings['MYSQL_USER'], passwd=settings['MYSQL_PASS'], db=settings['MYSQL_DB'],charset='utf8')\n # \tcur = con.cursor() # 创建数据库连接,定义连接指针\n # \tcon.close()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Notification(BaseAbstractModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Notification(BaseAbstractModel):
title = models.CharField(max_length=200)
body = models.TextField()
recipients = models.ManyToManyField(to=Profile, related_name=
'notifications', related_query_name='notification')
time_stamp = models.DateTimeField(auto_now_add=True)
read = models.BooleanField(default=False)
<|reserved_special_token_1|>
from django.db import models
from helpers.models import BaseAbstractModel
from Auth.models import Profile
from django.db.models.signals import post_save
from django.dispatch import receiver
class Notification(BaseAbstractModel):
title = models.CharField(max_length=200)
body = models.TextField()
recipients = models.ManyToManyField(to=Profile, related_name=
'notifications', related_query_name='notification')
time_stamp = models.DateTimeField(auto_now_add=True)
read = models.BooleanField(default=False)
<|reserved_special_token_1|>
from django.db import models
from helpers.models import BaseAbstractModel
from Auth.models import Profile
# from Jobs.models import UserJob
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Notification(BaseAbstractModel):
title = models.CharField(max_length=200)
body = models.TextField()
recipients = models.ManyToManyField(to=Profile,
related_name='notifications',
related_query_name='notification')
time_stamp = models.DateTimeField(auto_now_add=True)
read = models.BooleanField(default=False)
# @receiver(post_save, sender=UserJob)
# def job_handler(sender, instance, **kwargs):
# if instance.is_active:
# profile_list = instance.author.profile.all()
# subscribed_users = profile_list.filter(
# Q(user__notification_subscription__in_app_notifications=True) | Q(
# user__notification_subscription__email_notifications=True))
# email_subscribed_users = profile_list.filter(
# user__notification_subscription__email_notifications=True)
# if(subscribed_users.count() >= 1):
# notification = Notification.objects.create(
# title="New Job on Twous",
# body=re.sub(' +', ' ', "{} has published another job \
# titled {}".format(
# instance.author.first_name.capitalize(),
# instance.title)))
# notification.recipients.add(*subscribed_users)
# if(email_subscribed_users.count() >= 1):
# send_emails_to_recipients(notification, email_subscribed_users)
# notification.save()
|
flexible
|
{
"blob_id": "1066f86d3a35e892ca2a7054dfc89fe79f1d32c8",
"index": 7496,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Notification(BaseAbstractModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Notification(BaseAbstractModel):\n title = models.CharField(max_length=200)\n body = models.TextField()\n recipients = models.ManyToManyField(to=Profile, related_name=\n 'notifications', related_query_name='notification')\n time_stamp = models.DateTimeField(auto_now_add=True)\n read = models.BooleanField(default=False)\n",
"step-4": "from django.db import models\nfrom helpers.models import BaseAbstractModel\nfrom Auth.models import Profile\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\nclass Notification(BaseAbstractModel):\n title = models.CharField(max_length=200)\n body = models.TextField()\n recipients = models.ManyToManyField(to=Profile, related_name=\n 'notifications', related_query_name='notification')\n time_stamp = models.DateTimeField(auto_now_add=True)\n read = models.BooleanField(default=False)\n",
"step-5": "from django.db import models\nfrom helpers.models import BaseAbstractModel\nfrom Auth.models import Profile\n# from Jobs.models import UserJob\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n# Create your models here.\nclass Notification(BaseAbstractModel):\n title = models.CharField(max_length=200)\n body = models.TextField()\n recipients = models.ManyToManyField(to=Profile,\n related_name='notifications',\n related_query_name='notification')\n time_stamp = models.DateTimeField(auto_now_add=True)\n read = models.BooleanField(default=False)\n\n# @receiver(post_save, sender=UserJob)\n# def job_handler(sender, instance, **kwargs):\n# if instance.is_active:\n# profile_list = instance.author.profile.all()\n# subscribed_users = profile_list.filter(\n# Q(user__notification_subscription__in_app_notifications=True) | Q(\n# user__notification_subscription__email_notifications=True))\n\n# email_subscribed_users = profile_list.filter(\n# user__notification_subscription__email_notifications=True)\n# if(subscribed_users.count() >= 1):\n\n# notification = Notification.objects.create(\n# title=\"New Job on Twous\",\n# body=re.sub(' +', ' ', \"{} has published another job \\\n# titled {}\".format(\n# instance.author.first_name.capitalize(),\n# instance.title)))\n# notification.recipients.add(*subscribed_users)\n\n# if(email_subscribed_users.count() >= 1):\n# send_emails_to_recipients(notification, email_subscribed_users)\n\n# notification.save()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.test import TestCase, SimpleTestCase
from django.urls import reverse, resolve
from .views import profile, order_history
""" Url Testing """
class TestUrls(SimpleTestCase):
def test_profile_resolves(self):
url = reverse('profile')
self.assertEqual(resolve(url).func, profile)
def test_order_history_resolves(self):
url = reverse('order_history', args='1')
self.assertEqual(resolve(url).func, order_history)
|
normal
|
{
"blob_id": "5dc6b54357df87077d8159192cd52697b2616db8",
"index": 9186,
"step-1": "<mask token>\n\n\nclass TestUrls(SimpleTestCase):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestUrls(SimpleTestCase):\n\n def test_profile_resolves(self):\n url = reverse('profile')\n self.assertEqual(resolve(url).func, profile)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestUrls(SimpleTestCase):\n\n def test_profile_resolves(self):\n url = reverse('profile')\n self.assertEqual(resolve(url).func, profile)\n\n def test_order_history_resolves(self):\n url = reverse('order_history', args='1')\n self.assertEqual(resolve(url).func, order_history)\n",
"step-4": "from django.test import TestCase, SimpleTestCase\nfrom django.urls import reverse, resolve\nfrom .views import profile, order_history\n<mask token>\n\n\nclass TestUrls(SimpleTestCase):\n\n def test_profile_resolves(self):\n url = reverse('profile')\n self.assertEqual(resolve(url).func, profile)\n\n def test_order_history_resolves(self):\n url = reverse('order_history', args='1')\n self.assertEqual(resolve(url).func, order_history)\n",
"step-5": "from django.test import TestCase, SimpleTestCase\nfrom django.urls import reverse, resolve\nfrom .views import profile, order_history\n\n\"\"\" Url Testing \"\"\"\n\nclass TestUrls(SimpleTestCase):\n\n def test_profile_resolves(self):\n url = reverse('profile')\n self.assertEqual(resolve(url).func, profile)\n\n def test_order_history_resolves(self):\n url = reverse('order_history', args='1')\n self.assertEqual(resolve(url).func, order_history)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ALLOWED_HOSTS = []
INSTALLED_APPS = ['django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'django.contrib.sessions',
'django.contrib.messages', 'django.contrib.staticfiles',
'admin_honeypot', 'bootstrap3', 'el_pagination', 'compressor',
'accounts', 'bot', 'home', 'pages', 'serve_media', 'events', 'gallery',
'groups', 'django_rq', 'surveys']
MIDDLEWARE_CLASSES = ['django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'csp.middleware.CSPMiddleware']
ROOT_URLCONF = 'config.urls'
TEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR,
'templates/error_pages')], 'APP_DIRS': True, 'OPTIONS': {
'context_processors': ['django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages']}}]
WSGI_APPLICATION = 'config.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [{'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'
}, {'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator'}, {
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator'}, {
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [os.path.join(BASE_DIR, 'static/locale/')]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets/')
LOGIN_REDIRECT_URL = '/home'
TELEGRAM_TOKEN = os.environ.get('GROUPSOME_TELEGRAM_TOKEN')
TELEGRAM_WEBHOOK_SECRET = os.environ.get('GROUPSOME_TELEGRAM_WEBHOOK_SECRET')
TELEGRAM_BOT_USERNAME = 'groupsomebot'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_SERVE_USING_NGINX = False
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder', 'pipeline.finders.PipelineFinder')
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
PIPELINE = {'PIPELINE_ENABLED': True, 'COMPILERS': (
'pipeline.compilers.stylus.StylusCompiler',), 'STYLESHEETS': {'main': {
'source_filenames': ('style/main.styl',), 'output_filename':
'style/main.css'}}, 'STYLUS_ARGUMENTS': '-c'}
CSP_STYLE_SRC = "'self'", "'unsafe-inline'", 'fonts.googleapis.com'
CSP_FONT_SRC = "'self'", 'fonts.gstatic.com'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ALLOWED_HOSTS = []
INSTALLED_APPS = ['django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'django.contrib.sessions',
'django.contrib.messages', 'django.contrib.staticfiles',
'admin_honeypot', 'bootstrap3', 'el_pagination', 'compressor',
'accounts', 'bot', 'home', 'pages', 'serve_media', 'events', 'gallery',
'groups', 'django_rq', 'surveys']
MIDDLEWARE_CLASSES = ['django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'csp.middleware.CSPMiddleware']
ROOT_URLCONF = 'config.urls'
TEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR,
'templates/error_pages')], 'APP_DIRS': True, 'OPTIONS': {
'context_processors': ['django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages']}}]
WSGI_APPLICATION = 'config.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [{'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'
}, {'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator'}, {
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator'}, {
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [os.path.join(BASE_DIR, 'static/locale/')]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets/')
LOGIN_REDIRECT_URL = '/home'
TELEGRAM_TOKEN = os.environ.get('GROUPSOME_TELEGRAM_TOKEN')
TELEGRAM_WEBHOOK_SECRET = os.environ.get('GROUPSOME_TELEGRAM_WEBHOOK_SECRET')
TELEGRAM_BOT_USERNAME = 'groupsomebot'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_SERVE_USING_NGINX = False
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder', 'pipeline.finders.PipelineFinder')
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
PIPELINE = {'PIPELINE_ENABLED': True, 'COMPILERS': (
'pipeline.compilers.stylus.StylusCompiler',), 'STYLESHEETS': {'main': {
'source_filenames': ('style/main.styl',), 'output_filename':
'style/main.css'}}, 'STYLUS_ARGUMENTS': '-c'}
CSP_STYLE_SRC = "'self'", "'unsafe-inline'", 'fonts.googleapis.com'
CSP_FONT_SRC = "'self'", 'fonts.gstatic.com'
<|reserved_special_token_1|>
"""
Django settings for gamelibrary project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECURITY WARNING: don't run with debug turned on in production!
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'admin_honeypot',
'bootstrap3',
'el_pagination',
'compressor',
# 'pipeline',
'accounts',
'bot',
'home',
'pages',
'serve_media',
'events',
'gallery',
'groups',
'django_rq',
'surveys',
]
MIDDLEWARE_CLASSES = [
# 'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.cache.FetchFromCacheMiddleware',
'csp.middleware.CSPMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR, 'templates/error_pages')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [
os.path.join(BASE_DIR, 'static/locale/'),
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets/')
# Redirect to here after Login
LOGIN_REDIRECT_URL = '/home'
TELEGRAM_TOKEN = os.environ.get('GROUPSOME_TELEGRAM_TOKEN')
TELEGRAM_WEBHOOK_SECRET = os.environ.get('GROUPSOME_TELEGRAM_WEBHOOK_SECRET')
TELEGRAM_BOT_USERNAME = "groupsomebot"
# Media root directory
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_SERVE_USING_NGINX = False
# Needed for Endless Scrolling
# TEMPLATE_CONTEXT_PROCESSORS += (
# 'django.core.context_processors.request',
# )
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
'pipeline.finders.PipelineFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
PIPELINE = {
'PIPELINE_ENABLED': True,
'COMPILERS': (
'pipeline.compilers.stylus.StylusCompiler',
),
'STYLESHEETS': {
'main': {
'source_filenames': (
'style/main.styl',
),
'output_filename': 'style/main.css',
}
},
'STYLUS_ARGUMENTS': '-c',
}
CSP_STYLE_SRC = ("'self'", "'unsafe-inline'", "fonts.googleapis.com")
CSP_FONT_SRC = ("'self'", "fonts.gstatic.com")
|
flexible
|
{
"blob_id": "b42414b7d8ed80d8794ab7c49dfde1e5df0721f1",
"index": 1318,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nALLOWED_HOSTS = []\nINSTALLED_APPS = ['django.contrib.admin', 'django.contrib.auth',\n 'django.contrib.contenttypes', 'django.contrib.sessions',\n 'django.contrib.messages', 'django.contrib.staticfiles',\n 'admin_honeypot', 'bootstrap3', 'el_pagination', 'compressor',\n 'accounts', 'bot', 'home', 'pages', 'serve_media', 'events', 'gallery',\n 'groups', 'django_rq', 'surveys']\nMIDDLEWARE_CLASSES = ['django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'csp.middleware.CSPMiddleware']\nROOT_URLCONF = 'config.urls'\nTEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR,\n 'templates/error_pages')], 'APP_DIRS': True, 'OPTIONS': {\n 'context_processors': ['django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages']}}]\nWSGI_APPLICATION = 'config.wsgi.application'\nAUTH_PASSWORD_VALIDATORS = [{'NAME':\n 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'\n }, {'NAME':\n 'django.contrib.auth.password_validation.MinimumLengthValidator'}, {\n 'NAME':\n 'django.contrib.auth.password_validation.CommonPasswordValidator'}, {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}\n ]\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'static/locale/')]\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'assets/')\nLOGIN_REDIRECT_URL = '/home'\nTELEGRAM_TOKEN = os.environ.get('GROUPSOME_TELEGRAM_TOKEN')\nTELEGRAM_WEBHOOK_SECRET = os.environ.get('GROUPSOME_TELEGRAM_WEBHOOK_SECRET')\nTELEGRAM_BOT_USERNAME = 'groupsomebot'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_SERVE_USING_NGINX = False\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder', 'pipeline.finders.PipelineFinder')\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'\nPIPELINE = {'PIPELINE_ENABLED': True, 'COMPILERS': (\n 'pipeline.compilers.stylus.StylusCompiler',), 'STYLESHEETS': {'main': {\n 'source_filenames': ('style/main.styl',), 'output_filename':\n 'style/main.css'}}, 'STYLUS_ARGUMENTS': '-c'}\nCSP_STYLE_SRC = \"'self'\", \"'unsafe-inline'\", 'fonts.googleapis.com'\nCSP_FONT_SRC = \"'self'\", 'fonts.gstatic.com'\n",
"step-3": "<mask token>\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nALLOWED_HOSTS = []\nINSTALLED_APPS = ['django.contrib.admin', 'django.contrib.auth',\n 'django.contrib.contenttypes', 'django.contrib.sessions',\n 'django.contrib.messages', 'django.contrib.staticfiles',\n 'admin_honeypot', 'bootstrap3', 'el_pagination', 'compressor',\n 'accounts', 'bot', 'home', 'pages', 'serve_media', 'events', 'gallery',\n 'groups', 'django_rq', 'surveys']\nMIDDLEWARE_CLASSES = ['django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'csp.middleware.CSPMiddleware']\nROOT_URLCONF = 'config.urls'\nTEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR,\n 'templates/error_pages')], 'APP_DIRS': True, 'OPTIONS': {\n 'context_processors': ['django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages']}}]\nWSGI_APPLICATION = 'config.wsgi.application'\nAUTH_PASSWORD_VALIDATORS = [{'NAME':\n 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'\n }, {'NAME':\n 'django.contrib.auth.password_validation.MinimumLengthValidator'}, {\n 'NAME':\n 'django.contrib.auth.password_validation.CommonPasswordValidator'}, {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}\n ]\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'static/locale/')]\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'assets/')\nLOGIN_REDIRECT_URL = '/home'\nTELEGRAM_TOKEN = os.environ.get('GROUPSOME_TELEGRAM_TOKEN')\nTELEGRAM_WEBHOOK_SECRET = os.environ.get('GROUPSOME_TELEGRAM_WEBHOOK_SECRET')\nTELEGRAM_BOT_USERNAME = 'groupsomebot'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_SERVE_USING_NGINX = False\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder', 'pipeline.finders.PipelineFinder')\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'\nPIPELINE = {'PIPELINE_ENABLED': True, 'COMPILERS': (\n 'pipeline.compilers.stylus.StylusCompiler',), 'STYLESHEETS': {'main': {\n 'source_filenames': ('style/main.styl',), 'output_filename':\n 'style/main.css'}}, 'STYLUS_ARGUMENTS': '-c'}\nCSP_STYLE_SRC = \"'self'\", \"'unsafe-inline'\", 'fonts.googleapis.com'\nCSP_FONT_SRC = \"'self'\", 'fonts.gstatic.com'\n",
"step-4": "\"\"\"\nDjango settings for gamelibrary project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\n# from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\n\n\n# SECURITY WARNING: don't run with debug turned on in production!\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'admin_honeypot',\n 'bootstrap3',\n 'el_pagination',\n 'compressor',\n # 'pipeline',\n 'accounts',\n 'bot',\n 'home',\n 'pages',\n 'serve_media',\n 'events',\n 'gallery',\n 'groups',\n 'django_rq',\n 'surveys',\n]\n\nMIDDLEWARE_CLASSES = [\n # 'django.middleware.cache.UpdateCacheMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n # 'django.middleware.cache.FetchFromCacheMiddleware',\n 'csp.middleware.CSPMiddleware',\n]\n\nROOT_URLCONF = 'config.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR, 'templates/error_pages')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'config.wsgi.application'\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, 'static/locale/'),\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'assets/')\n\n# Redirect to here after Login\nLOGIN_REDIRECT_URL = '/home'\n\nTELEGRAM_TOKEN = os.environ.get('GROUPSOME_TELEGRAM_TOKEN')\nTELEGRAM_WEBHOOK_SECRET = os.environ.get('GROUPSOME_TELEGRAM_WEBHOOK_SECRET')\nTELEGRAM_BOT_USERNAME = \"groupsomebot\"\n\n# Media root directory\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_SERVE_USING_NGINX = False\n\n# Needed for Endless Scrolling\n# TEMPLATE_CONTEXT_PROCESSORS += (\n# 'django.core.context_processors.request',\n# )\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n 'pipeline.finders.PipelineFinder',\n)\n\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'\n\nPIPELINE = {\n 'PIPELINE_ENABLED': True,\n 'COMPILERS': (\n 'pipeline.compilers.stylus.StylusCompiler',\n ),\n 'STYLESHEETS': {\n 'main': {\n 'source_filenames': (\n 'style/main.styl',\n ),\n 'output_filename': 'style/main.css',\n }\n },\n 'STYLUS_ARGUMENTS': '-c',\n}\n\nCSP_STYLE_SRC = (\"'self'\", \"'unsafe-inline'\", \"fonts.googleapis.com\")\nCSP_FONT_SRC = (\"'self'\", \"fonts.gstatic.com\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Temp in ', celsius, 'celsius=', fah, ' Fahrenheit')
<|reserved_special_token_1|>
celsius = input('Enter temperature in Celsius')
celsius = int(celsius)
fah = celsius * 9 / 5 + 32
print('Temp in ', celsius, 'celsius=', fah, ' Fahrenheit')
<|reserved_special_token_1|>
#Program to convert temp in degree Celsius to temp in degree Fahrenheit
celsius=input("Enter temperature in Celsius")
celsius=int(celsius)
fah=(celsius*9/5)+32
print("Temp in ",celsius,"celsius=",fah," Fahrenheit")
|
flexible
|
{
"blob_id": "e1172cadeb8b2ce036d8431cef78cfe19bda0cb8",
"index": 2161,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Temp in ', celsius, 'celsius=', fah, ' Fahrenheit')\n",
"step-3": "celsius = input('Enter temperature in Celsius')\ncelsius = int(celsius)\nfah = celsius * 9 / 5 + 32\nprint('Temp in ', celsius, 'celsius=', fah, ' Fahrenheit')\n",
"step-4": "#Program to convert temp in degree Celsius to temp in degree Fahrenheit\ncelsius=input(\"Enter temperature in Celsius\")\ncelsius=int(celsius)\nfah=(celsius*9/5)+32\nprint(\"Temp in \",celsius,\"celsius=\",fah,\" Fahrenheit\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from route4me import Route4Me
API_KEY = "11111111111111111111111111111111"
def main():
r4m = Route4Me(API_KEY)
route = r4m.route
response = route.get_routes(limit=1, offset=0)
if isinstance(response, dict) and 'errors' in response.keys():
print('. '.join(response['errors']))
else:
route_id = response[0]['route_id']
print('Route ID: {}'.format(route_id))
response = route.get_route(route_id=route_id)
if isinstance(response, dict) and 'errors' in response.keys():
print('. '.join(response['errors']))
else:
print('Original Route')
print('Route ID: {}'.format(response['route_id']))
for i, address in enumerate(response['addresses']):
print('Address #{}'.format(i + 1))
print('\tAddress: {0}'.format(address['address']))
print('\tRoute Destination ID: {0}'.format(
address['route_destination_id']))
route_destination_id = response['addresses'][1]['route_destination_id']
route_destination_id2 = response['addresses'][2]['route_destination_id']
data = {
"route_destination_id": route_destination_id,
"route_id": route_id,
"addresses": [{
"route_destination_id": route_destination_id2,
"sequence_no": 6,
}]
}
print('After Re-sequence Route')
response = route.resequence_route(**data)
print('Route ID: {}'.format(response['route_id']))
for i, address in enumerate(response['addresses']):
print('Address #{}'.format(i + 1))
print('\tAddress: {0}'.format(address['address']))
print('\tRoute Destination ID: {0}'.format(
address['route_destination_id']))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "bc4684d255a46427f708d8ce8bda2e12fb8c8ffe",
"index": 238,
"step-1": "<mask token>\n\n\ndef main():\n r4m = Route4Me(API_KEY)\n route = r4m.route\n response = route.get_routes(limit=1, offset=0)\n if isinstance(response, dict) and 'errors' in response.keys():\n print('. '.join(response['errors']))\n else:\n route_id = response[0]['route_id']\n print('Route ID: {}'.format(route_id))\n response = route.get_route(route_id=route_id)\n if isinstance(response, dict) and 'errors' in response.keys():\n print('. '.join(response['errors']))\n else:\n print('Original Route')\n print('Route ID: {}'.format(response['route_id']))\n for i, address in enumerate(response['addresses']):\n print('Address #{}'.format(i + 1))\n print('\\tAddress: {0}'.format(address['address']))\n print('\\tRoute Destination ID: {0}'.format(address[\n 'route_destination_id']))\n route_destination_id = response['addresses'][1][\n 'route_destination_id']\n route_destination_id2 = response['addresses'][2][\n 'route_destination_id']\n data = {'route_destination_id': route_destination_id,\n 'route_id': route_id, 'addresses': [{'route_destination_id':\n route_destination_id2, 'sequence_no': 6}]}\n print('After Re-sequence Route')\n response = route.resequence_route(**data)\n print('Route ID: {}'.format(response['route_id']))\n for i, address in enumerate(response['addresses']):\n print('Address #{}'.format(i + 1))\n print('\\tAddress: {0}'.format(address['address']))\n print('\\tRoute Destination ID: {0}'.format(address[\n 'route_destination_id']))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n r4m = Route4Me(API_KEY)\n route = r4m.route\n response = route.get_routes(limit=1, offset=0)\n if isinstance(response, dict) and 'errors' in response.keys():\n print('. '.join(response['errors']))\n else:\n route_id = response[0]['route_id']\n print('Route ID: {}'.format(route_id))\n response = route.get_route(route_id=route_id)\n if isinstance(response, dict) and 'errors' in response.keys():\n print('. '.join(response['errors']))\n else:\n print('Original Route')\n print('Route ID: {}'.format(response['route_id']))\n for i, address in enumerate(response['addresses']):\n print('Address #{}'.format(i + 1))\n print('\\tAddress: {0}'.format(address['address']))\n print('\\tRoute Destination ID: {0}'.format(address[\n 'route_destination_id']))\n route_destination_id = response['addresses'][1][\n 'route_destination_id']\n route_destination_id2 = response['addresses'][2][\n 'route_destination_id']\n data = {'route_destination_id': route_destination_id,\n 'route_id': route_id, 'addresses': [{'route_destination_id':\n route_destination_id2, 'sequence_no': 6}]}\n print('After Re-sequence Route')\n response = route.resequence_route(**data)\n print('Route ID: {}'.format(response['route_id']))\n for i, address in enumerate(response['addresses']):\n print('Address #{}'.format(i + 1))\n print('\\tAddress: {0}'.format(address['address']))\n print('\\tRoute Destination ID: {0}'.format(address[\n 'route_destination_id']))\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nAPI_KEY = '11111111111111111111111111111111'\n\n\ndef main():\n r4m = Route4Me(API_KEY)\n route = r4m.route\n response = route.get_routes(limit=1, offset=0)\n if isinstance(response, dict) and 'errors' in response.keys():\n print('. '.join(response['errors']))\n else:\n route_id = response[0]['route_id']\n print('Route ID: {}'.format(route_id))\n response = route.get_route(route_id=route_id)\n if isinstance(response, dict) and 'errors' in response.keys():\n print('. '.join(response['errors']))\n else:\n print('Original Route')\n print('Route ID: {}'.format(response['route_id']))\n for i, address in enumerate(response['addresses']):\n print('Address #{}'.format(i + 1))\n print('\\tAddress: {0}'.format(address['address']))\n print('\\tRoute Destination ID: {0}'.format(address[\n 'route_destination_id']))\n route_destination_id = response['addresses'][1][\n 'route_destination_id']\n route_destination_id2 = response['addresses'][2][\n 'route_destination_id']\n data = {'route_destination_id': route_destination_id,\n 'route_id': route_id, 'addresses': [{'route_destination_id':\n route_destination_id2, 'sequence_no': 6}]}\n print('After Re-sequence Route')\n response = route.resequence_route(**data)\n print('Route ID: {}'.format(response['route_id']))\n for i, address in enumerate(response['addresses']):\n print('Address #{}'.format(i + 1))\n print('\\tAddress: {0}'.format(address['address']))\n print('\\tRoute Destination ID: {0}'.format(address[\n 'route_destination_id']))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from route4me import Route4Me\nAPI_KEY = '11111111111111111111111111111111'\n\n\ndef main():\n r4m = Route4Me(API_KEY)\n route = r4m.route\n response = route.get_routes(limit=1, offset=0)\n if isinstance(response, dict) and 'errors' in response.keys():\n print('. '.join(response['errors']))\n else:\n route_id = response[0]['route_id']\n print('Route ID: {}'.format(route_id))\n response = route.get_route(route_id=route_id)\n if isinstance(response, dict) and 'errors' in response.keys():\n print('. '.join(response['errors']))\n else:\n print('Original Route')\n print('Route ID: {}'.format(response['route_id']))\n for i, address in enumerate(response['addresses']):\n print('Address #{}'.format(i + 1))\n print('\\tAddress: {0}'.format(address['address']))\n print('\\tRoute Destination ID: {0}'.format(address[\n 'route_destination_id']))\n route_destination_id = response['addresses'][1][\n 'route_destination_id']\n route_destination_id2 = response['addresses'][2][\n 'route_destination_id']\n data = {'route_destination_id': route_destination_id,\n 'route_id': route_id, 'addresses': [{'route_destination_id':\n route_destination_id2, 'sequence_no': 6}]}\n print('After Re-sequence Route')\n response = route.resequence_route(**data)\n print('Route ID: {}'.format(response['route_id']))\n for i, address in enumerate(response['addresses']):\n print('Address #{}'.format(i + 1))\n print('\\tAddress: {0}'.format(address['address']))\n print('\\tRoute Destination ID: {0}'.format(address[\n 'route_destination_id']))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom route4me import Route4Me\n\nAPI_KEY = \"11111111111111111111111111111111\"\n\n\ndef main():\n r4m = Route4Me(API_KEY)\n route = r4m.route\n response = route.get_routes(limit=1, offset=0)\n if isinstance(response, dict) and 'errors' in response.keys():\n print('. '.join(response['errors']))\n else:\n route_id = response[0]['route_id']\n print('Route ID: {}'.format(route_id))\n response = route.get_route(route_id=route_id)\n if isinstance(response, dict) and 'errors' in response.keys():\n print('. '.join(response['errors']))\n else:\n print('Original Route')\n print('Route ID: {}'.format(response['route_id']))\n for i, address in enumerate(response['addresses']):\n print('Address #{}'.format(i + 1))\n print('\\tAddress: {0}'.format(address['address']))\n print('\\tRoute Destination ID: {0}'.format(\n address['route_destination_id']))\n route_destination_id = response['addresses'][1]['route_destination_id']\n route_destination_id2 = response['addresses'][2]['route_destination_id']\n data = {\n \"route_destination_id\": route_destination_id,\n \"route_id\": route_id,\n \"addresses\": [{\n \"route_destination_id\": route_destination_id2,\n \"sequence_no\": 6,\n }]\n }\n print('After Re-sequence Route')\n response = route.resequence_route(**data)\n print('Route ID: {}'.format(response['route_id']))\n for i, address in enumerate(response['addresses']):\n print('Address #{}'.format(i + 1))\n print('\\tAddress: {0}'.format(address['address']))\n print('\\tRoute Destination ID: {0}'.format(\n address['route_destination_id']))\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
# Enter your code here. Read input from STDIN. Print output to STDOUT
n= input()
vals= list(map(int,input().split()))
def median(values):
n=len(values)
values = sorted(values)
if n%2==1:
return values[(n+1)//2 - 1]
else:
return int(sum(values[int((n/2)-1):int((n/2)+1)])/2)
def quartiles(values):
n=len(values)
values.sort()
Q2=median(values)
Q1=median(values[:int(n/2)])
#print ("values=",values)
if n%2==0:
Q3=median(values[int(n/2):])
else:
Q3=median(values[int(n/2+1):])
return Q1,Q2,Q3
Q1,Q2,Q3=quartiles(vals)
print(Q1)
print(Q2)
print(Q3)
|
normal
|
{
"blob_id": "9d6b5baa8462b2996e4518dd39b5bb1efde1fd9d",
"index": 894,
"step-1": "<mask token>\n\n\ndef quartiles(values):\n n = len(values)\n values.sort()\n Q2 = median(values)\n Q1 = median(values[:int(n / 2)])\n if n % 2 == 0:\n Q3 = median(values[int(n / 2):])\n else:\n Q3 = median(values[int(n / 2 + 1):])\n return Q1, Q2, Q3\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef median(values):\n n = len(values)\n values = sorted(values)\n if n % 2 == 1:\n return values[(n + 1) // 2 - 1]\n else:\n return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)\n\n\ndef quartiles(values):\n n = len(values)\n values.sort()\n Q2 = median(values)\n Q1 = median(values[:int(n / 2)])\n if n % 2 == 0:\n Q3 = median(values[int(n / 2):])\n else:\n Q3 = median(values[int(n / 2 + 1):])\n return Q1, Q2, Q3\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef median(values):\n n = len(values)\n values = sorted(values)\n if n % 2 == 1:\n return values[(n + 1) // 2 - 1]\n else:\n return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)\n\n\ndef quartiles(values):\n n = len(values)\n values.sort()\n Q2 = median(values)\n Q1 = median(values[:int(n / 2)])\n if n % 2 == 0:\n Q3 = median(values[int(n / 2):])\n else:\n Q3 = median(values[int(n / 2 + 1):])\n return Q1, Q2, Q3\n\n\n<mask token>\nprint(Q1)\nprint(Q2)\nprint(Q3)\n",
"step-4": "n = input()\nvals = list(map(int, input().split()))\n\n\ndef median(values):\n n = len(values)\n values = sorted(values)\n if n % 2 == 1:\n return values[(n + 1) // 2 - 1]\n else:\n return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)\n\n\ndef quartiles(values):\n n = len(values)\n values.sort()\n Q2 = median(values)\n Q1 = median(values[:int(n / 2)])\n if n % 2 == 0:\n Q3 = median(values[int(n / 2):])\n else:\n Q3 = median(values[int(n / 2 + 1):])\n return Q1, Q2, Q3\n\n\nQ1, Q2, Q3 = quartiles(vals)\nprint(Q1)\nprint(Q2)\nprint(Q3)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n# Enter your code here. Read input from STDIN. Print output to STDOUT\r\n\r\nn= input()\r\nvals= list(map(int,input().split()))\r\n\r\ndef median(values):\r\n n=len(values)\r\n values = sorted(values)\r\n if n%2==1:\r\n return values[(n+1)//2 - 1]\r\n else:\r\n return int(sum(values[int((n/2)-1):int((n/2)+1)])/2)\r\n \r\ndef quartiles(values):\r\n n=len(values)\r\n values.sort()\r\n Q2=median(values)\r\n Q1=median(values[:int(n/2)])\r\n #print (\"values=\",values)\r\n\r\n if n%2==0:\r\n Q3=median(values[int(n/2):]) \r\n\r\n else:\r\n Q3=median(values[int(n/2+1):])\r\n \r\n return Q1,Q2,Q3\r\n\r\nQ1,Q2,Q3=quartiles(vals)\r\n\r\nprint(Q1)\r\nprint(Q2)\r\nprint(Q3)\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#
# Wrappers for model evaluation
#
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from modules import Classifier
from typing import Generator, NamedTuple, Optional, Union
from utils import expand_generator
class Evaluator(object):
class Result(NamedTuple):
accuracy: float
log_loss: float
def evaluate(self, *args, **kwargs):
return NotImplemented
class ModelEvaluator(Evaluator):
def __init__(self, dataset: Dataset, batch_size: int, num_workers: int, mixed_precision: bool = True):
self.dataset = dataset
self.mixed_precision = mixed_precision
self.loader = DataLoader(dataset, batch_size, shuffle=False, num_workers=num_workers, drop_last=False)
@property
def num_batches(self):
return len(self.loader)
def evaluate(self, model: Classifier, device: Optional[Union[torch.device, str]] = None) -> Evaluator.Result:
return expand_generator(self.evaluate_iter(model, device), return_only=True)
def evaluate_iter(
self,
model: Classifier,
device: Optional[Union[torch.device, str]] = None) -> Generator[dict, None, Evaluator.Result]:
with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled=self.mixed_precision):
mean_accuracy = 0.
mean_log_loss = 0.
for i, (x, y) in enumerate(self.loader):
x = x.to(device)
y = y.to(device)
logits = model(x)
correct = torch.sum(logits.argmax(-1) == y).item()
log_loss = F.cross_entropy(logits, y, reduction='sum').item()
mean_accuracy += correct / len(self.dataset)
mean_log_loss += log_loss / len(self.dataset)
yield dict(batch=i)
return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)
|
normal
|
{
"blob_id": "493dbf85069f2115896a5f5f5d593c8d95b85cff",
"index": 4594,
"step-1": "<mask token>\n\n\nclass ModelEvaluator(Evaluator):\n\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,\n mixed_precision: bool=True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False,\n num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.\n device, str]]=None) ->Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device),\n return_only=True)\n\n def evaluate_iter(self, model: Classifier, device: Optional[Union[torch\n .device, str]]=None) ->Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled\n =self.mixed_precision):\n mean_accuracy = 0.0\n mean_log_loss = 0.0\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n",
"step-2": "<mask token>\n\n\nclass Evaluator(object):\n\n\n class Result(NamedTuple):\n accuracy: float\n log_loss: float\n <mask token>\n\n\nclass ModelEvaluator(Evaluator):\n\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,\n mixed_precision: bool=True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False,\n num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.\n device, str]]=None) ->Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device),\n return_only=True)\n\n def evaluate_iter(self, model: Classifier, device: Optional[Union[torch\n .device, str]]=None) ->Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled\n =self.mixed_precision):\n mean_accuracy = 0.0\n mean_log_loss = 0.0\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n",
"step-3": "<mask token>\n\n\nclass Evaluator(object):\n\n\n class Result(NamedTuple):\n accuracy: float\n log_loss: float\n\n def evaluate(self, *args, **kwargs):\n return NotImplemented\n\n\nclass ModelEvaluator(Evaluator):\n\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,\n mixed_precision: bool=True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False,\n num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.\n device, str]]=None) ->Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device),\n return_only=True)\n\n def evaluate_iter(self, model: Classifier, device: Optional[Union[torch\n .device, str]]=None) ->Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled\n =self.mixed_precision):\n mean_accuracy = 0.0\n mean_log_loss = 0.0\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n",
"step-4": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom modules import Classifier\nfrom typing import Generator, NamedTuple, Optional, Union\nfrom utils import expand_generator\n\n\nclass Evaluator(object):\n\n\n class Result(NamedTuple):\n accuracy: float\n log_loss: float\n\n def evaluate(self, *args, **kwargs):\n return NotImplemented\n\n\nclass ModelEvaluator(Evaluator):\n\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,\n mixed_precision: bool=True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False,\n num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.\n device, str]]=None) ->Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device),\n return_only=True)\n\n def evaluate_iter(self, model: Classifier, device: Optional[Union[torch\n .device, str]]=None) ->Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled\n =self.mixed_precision):\n mean_accuracy = 0.0\n mean_log_loss = 0.0\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n",
"step-5": "#\n# Wrappers for model evaluation\n#\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom modules import Classifier\nfrom typing import Generator, NamedTuple, Optional, Union\nfrom utils import expand_generator\n\n\nclass Evaluator(object):\n class Result(NamedTuple):\n accuracy: float\n log_loss: float\n\n def evaluate(self, *args, **kwargs):\n return NotImplemented\n\n\nclass ModelEvaluator(Evaluator):\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int, mixed_precision: bool = True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False, num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.device, str]] = None) -> Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device), return_only=True)\n\n def evaluate_iter(\n self,\n model: Classifier,\n device: Optional[Union[torch.device, str]] = None) -> Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled=self.mixed_precision):\n mean_accuracy = 0.\n mean_log_loss = 0.\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import discord
from discord.ext import commands
from os import path
import os
import datetime as dt
import numpy as np
import math
# client = commands.Bot(command_prefix = '.', case_insensitive=True)
# UTOPIA = 679921845671035034
# DEV_BOT_TOKEN = 'NzUzMzg1MjE1MTAzMzM2NTg4.X1laqA.vKvoV8Gz9jBWDWvIaBGDC4xbLB4'
# BOT_TOKEN = 'NzU0MDAyMzEwNTM5MTE2NTQ0.X1uZXw.urRh3pgMuS8IAfD4jAMbJVdO8D4'
# CREDS = BOT_TOKEN
|
normal
|
{
"blob_id": "bc8d3a5e3ed845b4ab2d203bec47881be64ba3f8",
"index": 3723,
"step-1": "<mask token>\n",
"step-2": "import discord\nfrom discord.ext import commands\nfrom os import path\nimport os\nimport datetime as dt\nimport numpy as np\nimport math\n",
"step-3": "import discord\nfrom discord.ext import commands\nfrom os import path\nimport os\nimport datetime as dt\nimport numpy as np\nimport math\n\n# client = commands.Bot(command_prefix = '.', case_insensitive=True)\n\n\n\n# UTOPIA = 679921845671035034\n\n\n# DEV_BOT_TOKEN = 'NzUzMzg1MjE1MTAzMzM2NTg4.X1laqA.vKvoV8Gz9jBWDWvIaBGDC4xbLB4'\n# BOT_TOKEN = 'NzU0MDAyMzEwNTM5MTE2NTQ0.X1uZXw.urRh3pgMuS8IAfD4jAMbJVdO8D4'\n# CREDS = BOT_TOKEN\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from com.kakao.cafe.menu.tea.milkTea import MilkTea
class MatchaMilkTea(MilkTea):
def __init__(self):
super().__init__()
self.__matcha = 1
self.__condensedMilk = 1
self.name = "MatchaMilkTea"
self.__price = 4500
self.__milk = 400
self.__blackTea = 2
def getName(self) -> str:
return self.name
def setName(self, name: str) -> None:
self.name = name
def getPrice(self) -> int:
return self.__price
def setPrice(self, price: int) -> None:
self.__price = price
def isIced(self) -> bool:
return self.iced
def setIced(self, iced: bool) -> None:
self._iced = iced
def getWater(self) -> int:
pass
def setWater(self, water: int) -> None:
pass
def getMilk(self) -> int:
return self.__milk
def setMilk(self, milk: int) -> None:
self.__milk = milk
def getBlackTea(self) -> int:
return self.__blackTea
def setBlackTea(self, blacktea: int) -> None:
self.__blackTea = blacktea
def getMatcha(self) -> int:
return self.__matcha
def setMatcha(self, matcha: int) -> None:
self.__matcha = matcha
def getCondensedMilk(self) -> int:
return self.__condensedMilk
def setCondensedMilk(self, condensedMilk: int) -> None:
self.__condensedMilk = condensedMilk
def addBlackTea(self, amount: int) -> None:
self.setBlackTea(self.getBlackTea() + amount)
self.setPrice(self.getPrice() + amount * 500)
def subBlackTea(self, amount: int) -> None:
if amount > self.__blackTea:
raise ValueError
print("You can't subtract more blacktea.")
else:
self.setBlackTea(self.getBlackTea() - amount)
def addMatcha(self, amount: int) -> None:
self.setMatcha(self.getMatcha() + amount)
self.setPrice(self.getPrice() + amount * 400)
def addCondensedMilk(self, amount: int) -> None:
self.setCondensedMilk(self.getCondensedMilk() + amount)
self.setPrice(self.getPrice() + amount * 500)
|
normal
|
{
"blob_id": "96b113678a3453520cd2e62eb11efd9582710409",
"index": 2087,
"step-1": "<mask token>\n\n\nclass MatchaMilkTea(MilkTea):\n <mask token>\n\n def getName(self) ->str:\n return self.name\n <mask token>\n <mask token>\n\n def setPrice(self, price: int) ->None:\n self.__price = price\n <mask token>\n\n def setIced(self, iced: bool) ->None:\n self._iced = iced\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getMatcha(self) ->int:\n return self.__matcha\n\n def setMatcha(self, matcha: int) ->None:\n self.__matcha = matcha\n <mask token>\n\n def setCondensedMilk(self, condensedMilk: int) ->None:\n self.__condensedMilk = condensedMilk\n <mask token>\n\n def subBlackTea(self, amount: int) ->None:\n if amount > self.__blackTea:\n raise ValueError\n print(\"You can't subtract more blacktea.\")\n else:\n self.setBlackTea(self.getBlackTea() - amount)\n <mask token>\n\n def addCondensedMilk(self, amount: int) ->None:\n self.setCondensedMilk(self.getCondensedMilk() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n",
"step-2": "<mask token>\n\n\nclass MatchaMilkTea(MilkTea):\n\n def __init__(self):\n super().__init__()\n self.__matcha = 1\n self.__condensedMilk = 1\n self.name = 'MatchaMilkTea'\n self.__price = 4500\n self.__milk = 400\n self.__blackTea = 2\n\n def getName(self) ->str:\n return self.name\n <mask token>\n\n def getPrice(self) ->int:\n return self.__price\n\n def setPrice(self, price: int) ->None:\n self.__price = price\n <mask token>\n\n def setIced(self, iced: bool) ->None:\n self._iced = iced\n\n def getWater(self) ->int:\n pass\n\n def setWater(self, water: int) ->None:\n pass\n\n def getMilk(self) ->int:\n return self.__milk\n <mask token>\n\n def getBlackTea(self) ->int:\n return self.__blackTea\n <mask token>\n\n def getMatcha(self) ->int:\n return self.__matcha\n\n def setMatcha(self, matcha: int) ->None:\n self.__matcha = matcha\n <mask token>\n\n def setCondensedMilk(self, condensedMilk: int) ->None:\n self.__condensedMilk = condensedMilk\n <mask token>\n\n def subBlackTea(self, amount: int) ->None:\n if amount > self.__blackTea:\n raise ValueError\n print(\"You can't subtract more blacktea.\")\n else:\n self.setBlackTea(self.getBlackTea() - amount)\n <mask token>\n\n def addCondensedMilk(self, amount: int) ->None:\n self.setCondensedMilk(self.getCondensedMilk() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n",
"step-3": "<mask token>\n\n\nclass MatchaMilkTea(MilkTea):\n\n def __init__(self):\n super().__init__()\n self.__matcha = 1\n self.__condensedMilk = 1\n self.name = 'MatchaMilkTea'\n self.__price = 4500\n self.__milk = 400\n self.__blackTea = 2\n\n def getName(self) ->str:\n return self.name\n\n def setName(self, name: str) ->None:\n self.name = name\n\n def getPrice(self) ->int:\n return self.__price\n\n def setPrice(self, price: int) ->None:\n self.__price = price\n <mask token>\n\n def setIced(self, iced: bool) ->None:\n self._iced = iced\n\n def getWater(self) ->int:\n pass\n\n def setWater(self, water: int) ->None:\n pass\n\n def getMilk(self) ->int:\n return self.__milk\n\n def setMilk(self, milk: int) ->None:\n self.__milk = milk\n\n def getBlackTea(self) ->int:\n return self.__blackTea\n\n def setBlackTea(self, blacktea: int) ->None:\n self.__blackTea = blacktea\n\n def getMatcha(self) ->int:\n return self.__matcha\n\n def setMatcha(self, matcha: int) ->None:\n self.__matcha = matcha\n <mask token>\n\n def setCondensedMilk(self, condensedMilk: int) ->None:\n self.__condensedMilk = condensedMilk\n\n def addBlackTea(self, amount: int) ->None:\n self.setBlackTea(self.getBlackTea() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n\n def subBlackTea(self, amount: int) ->None:\n if amount > self.__blackTea:\n raise ValueError\n print(\"You can't subtract more blacktea.\")\n else:\n self.setBlackTea(self.getBlackTea() - amount)\n\n def addMatcha(self, amount: int) ->None:\n self.setMatcha(self.getMatcha() + amount)\n self.setPrice(self.getPrice() + amount * 400)\n\n def addCondensedMilk(self, amount: int) ->None:\n self.setCondensedMilk(self.getCondensedMilk() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n",
"step-4": "from com.kakao.cafe.menu.tea.milkTea import MilkTea\n\n\nclass MatchaMilkTea(MilkTea):\n\n def __init__(self):\n super().__init__()\n self.__matcha = 1\n self.__condensedMilk = 1\n self.name = 'MatchaMilkTea'\n self.__price = 4500\n self.__milk = 400\n self.__blackTea = 2\n\n def getName(self) ->str:\n return self.name\n\n def setName(self, name: str) ->None:\n self.name = name\n\n def getPrice(self) ->int:\n return self.__price\n\n def setPrice(self, price: int) ->None:\n self.__price = price\n\n def isIced(self) ->bool:\n return self.iced\n\n def setIced(self, iced: bool) ->None:\n self._iced = iced\n\n def getWater(self) ->int:\n pass\n\n def setWater(self, water: int) ->None:\n pass\n\n def getMilk(self) ->int:\n return self.__milk\n\n def setMilk(self, milk: int) ->None:\n self.__milk = milk\n\n def getBlackTea(self) ->int:\n return self.__blackTea\n\n def setBlackTea(self, blacktea: int) ->None:\n self.__blackTea = blacktea\n\n def getMatcha(self) ->int:\n return self.__matcha\n\n def setMatcha(self, matcha: int) ->None:\n self.__matcha = matcha\n\n def getCondensedMilk(self) ->int:\n return self.__condensedMilk\n\n def setCondensedMilk(self, condensedMilk: int) ->None:\n self.__condensedMilk = condensedMilk\n\n def addBlackTea(self, amount: int) ->None:\n self.setBlackTea(self.getBlackTea() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n\n def subBlackTea(self, amount: int) ->None:\n if amount > self.__blackTea:\n raise ValueError\n print(\"You can't subtract more blacktea.\")\n else:\n self.setBlackTea(self.getBlackTea() - amount)\n\n def addMatcha(self, amount: int) ->None:\n self.setMatcha(self.getMatcha() + amount)\n self.setPrice(self.getPrice() + amount * 400)\n\n def addCondensedMilk(self, amount: int) ->None:\n self.setCondensedMilk(self.getCondensedMilk() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n",
"step-5": "from com.kakao.cafe.menu.tea.milkTea import MilkTea\n\n\nclass MatchaMilkTea(MilkTea):\n def __init__(self):\n super().__init__()\n\n self.__matcha = 1\n self.__condensedMilk = 1\n self.name = \"MatchaMilkTea\"\n self.__price = 4500\n self.__milk = 400\n self.__blackTea = 2\n\n def getName(self) -> str:\n return self.name\n\n def setName(self, name: str) -> None:\n self.name = name\n\n def getPrice(self) -> int:\n return self.__price\n\n def setPrice(self, price: int) -> None:\n self.__price = price\n\n def isIced(self) -> bool:\n return self.iced\n\n def setIced(self, iced: bool) -> None:\n self._iced = iced\n\n def getWater(self) -> int:\n pass\n\n def setWater(self, water: int) -> None:\n pass\n\n def getMilk(self) -> int:\n return self.__milk\n\n def setMilk(self, milk: int) -> None:\n self.__milk = milk\n\n def getBlackTea(self) -> int:\n return self.__blackTea\n\n def setBlackTea(self, blacktea: int) -> None:\n self.__blackTea = blacktea\n\n def getMatcha(self) -> int:\n return self.__matcha\n\n def setMatcha(self, matcha: int) -> None:\n self.__matcha = matcha\n\n def getCondensedMilk(self) -> int:\n return self.__condensedMilk\n\n def setCondensedMilk(self, condensedMilk: int) -> None:\n self.__condensedMilk = condensedMilk\n\n def addBlackTea(self, amount: int) -> None:\n self.setBlackTea(self.getBlackTea() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n\n def subBlackTea(self, amount: int) -> None:\n if amount > self.__blackTea:\n raise ValueError\n print(\"You can't subtract more blacktea.\")\n else:\n self.setBlackTea(self.getBlackTea() - amount)\n\n def addMatcha(self, amount: int) -> None:\n self.setMatcha(self.getMatcha() + amount)\n self.setPrice(self.getPrice() + amount * 400)\n\n def addCondensedMilk(self, amount: int) -> None:\n self.setCondensedMilk(self.getCondensedMilk() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n",
"step-ids": [
9,
15,
20,
23,
24
]
}
|
[
9,
15,
20,
23,
24
] |
class TimeEntry:
def __init__(self, date, duration, togglproject='default toggl',
tdproject='default td', togglID='NULL', tdID='Null'):
self.duration = duration
self.date = date
self.togglProject = togglproject
self.tdProject = tdproject
self.togglID = togglID
self.tdID = tdID
|
normal
|
{
"blob_id": "bdf2c35c12820dd31bd242ce1b6dae7271ceb2b7",
"index": 8433,
"step-1": "<mask token>\n",
"step-2": "class TimeEntry:\n <mask token>\n",
"step-3": "class TimeEntry:\n\n def __init__(self, date, duration, togglproject='default toggl',\n tdproject='default td', togglID='NULL', tdID='Null'):\n self.duration = duration\n self.date = date\n self.togglProject = togglproject\n self.tdProject = tdproject\n self.togglID = togglID\n self.tdID = tdID\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from collections import deque
def my_queue(n=5):
return deque([], n)
pass
if __name__ == '__main__':
mq = my_queue()
for i in range(10):
mq.append(i)
print((i, list(mq)))
"""Queue size does not go beyond n int, this outputs:
(0, [0])
(1, [0, 1])
(2, [0, 1, 2])
(3, [0, 1, 2, 3])
(4, [0, 1, 2, 3, 4])
(5, [1, 2, 3, 4, 5])
(6, [2, 3, 4, 5, 6])
(7, [3, 4, 5, 6, 7])
(8, [4, 5, 6, 7, 8])
(9, [5, 6, 7, 8, 9])
"""
|
normal
|
{
"blob_id": "499baaa8c739c1bd846edc944e510542d76bbed5",
"index": 9312,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef my_queue(n=5):\n return deque([], n)\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef my_queue(n=5):\n return deque([], n)\n pass\n\n\nif __name__ == '__main__':\n mq = my_queue()\n for i in range(10):\n mq.append(i)\n print((i, list(mq)))\n \"\"\"Queue size does not go beyond n int, this outputs:\n (0, [0])\n (1, [0, 1])\n (2, [0, 1, 2])\n (3, [0, 1, 2, 3])\n (4, [0, 1, 2, 3, 4])\n (5, [1, 2, 3, 4, 5])\n (6, [2, 3, 4, 5, 6])\n (7, [3, 4, 5, 6, 7])\n (8, [4, 5, 6, 7, 8])\n (9, [5, 6, 7, 8, 9])\n \"\"\"\n",
"step-4": "from collections import deque\n\n\ndef my_queue(n=5):\n return deque([], n)\n pass\n\n\nif __name__ == '__main__':\n mq = my_queue()\n for i in range(10):\n mq.append(i)\n print((i, list(mq)))\n \"\"\"Queue size does not go beyond n int, this outputs:\n (0, [0])\n (1, [0, 1])\n (2, [0, 1, 2])\n (3, [0, 1, 2, 3])\n (4, [0, 1, 2, 3, 4])\n (5, [1, 2, 3, 4, 5])\n (6, [2, 3, 4, 5, 6])\n (7, [3, 4, 5, 6, 7])\n (8, [4, 5, 6, 7, 8])\n (9, [5, 6, 7, 8, 9])\n \"\"\"\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/user/bin/env python3 -tt
"""
https://adventofcode.com/2017/day/7
"""
import sys
import re
# Global variables
task="d-7"
infile=task + ".input"
with open('input/' + infile) as file:
input = file.read()
file.close()
class Node:
parent = None
children = None
weight_sum = 0
def __init__(self, name, weight, linked):
self.name = name
self.weight = int(weight)
self.weight_sum += self.weight
self.linked = linked
self.children = []
def __str__(self):
children_str = " ".join(self.linked)
parent_name = self.parent.name if self.parent else "KING"
return "" + self.name + " : " + parent_name + ":" + str(self.weight) + " : " + children_str #+ ":||||:" + " ".join([n.name for n in self.children])
nodeTree = []
def traverseTree(node, deep):
if len(nodeTree) <= deep:
nodeTree.append([])
nodeTree[deep].append(node)
if not node.children:
return
for child in node.children:
traverseTree(child, deep+1)
def detectUnstable(node, deep):
weights = []
for n in node.children:
if n.weight_sum not in weights:
weights.append(n.weight_sum)
for n in node.children:
if len(weights) > 1 and max(weights) == n.weight_sum:
print("found unstable for: ", n.name, " in: ", weights)
diff = max(weights) - min(weights)
new_weight = n.weight - diff
print("[", deep, "]New weight:", new_weight, " for: ", n, " diff: ", diff)
for n in node.children:
if len(nodeTree) > deep + 1:
detectUnstable(n, deep +1)
def buildTree(parent, nodes):
if not parent.linked:
return parent
for link_str in parent.linked:
for node in nodes:
#Find matching node
if link_str == node.name:
node.parent = parent
parent.children.append(node)
child = buildTree(node, nodes)
parent.weight_sum += child.weight_sum
return parent
def printNodes(nodes):
print("Print all nodes")
for node in nodes:
print(node)
def solve():
raw_node_list = [s.rstrip() for s in input.split("\n")]
regex = re.compile(r'(\w+) \((\d+)\)')
master_node = None
nodes = []
all_linked_node_names = []
for n in raw_node_list:
pmn = regex.match(n)
np = Node(pmn.group(1).rstrip(), pmn.group(2).rstrip(), [])
links = n.split("->")
if len(links) > 1:
np.linked = [s.strip() for s in links[1].split(",")]
if np:
nodes.append(np)
for link in np.linked:
all_linked_node_names.append(link)
for node in nodes:
if len(node.linked) > 1 and not node.name in ":".join(all_linked_node_names):
master_node = node
buildTree(master_node, nodes)
traverseTree(master_node, 0)
# for row in nodeTree:
# nodes = ""
# for node in row:
# parent = node.parent.name if node.parent else "KING"
# nodes += "[" + parent + "]" + node.name + ":" + str(node.weight_sum)
# print(nodes)
# for weights in weightTree:
# if len(weights) > 1:
# print(":".join([str(s) for s in weights]))
print("Detect unstable:")
detectUnstable(master_node, 0)
if __name__ == '__main__':
solve()
print("Finished executing: " + task)
sys.exit(1)
|
normal
|
{
"blob_id": "679ca76212b90261683d59899c1189280b6b6e8c",
"index": 5953,
"step-1": "<mask token>\n\n\nclass Node:\n parent = None\n children = None\n weight_sum = 0\n\n def __init__(self, name, weight, linked):\n self.name = name\n self.weight = int(weight)\n self.weight_sum += self.weight\n self.linked = linked\n self.children = []\n\n def __str__(self):\n children_str = ' '.join(self.linked)\n parent_name = self.parent.name if self.parent else 'KING'\n return '' + self.name + ' : ' + parent_name + ':' + str(self.weight\n ) + ' : ' + children_str\n\n\n<mask token>\n\n\ndef traverseTree(node, deep):\n if len(nodeTree) <= deep:\n nodeTree.append([])\n nodeTree[deep].append(node)\n if not node.children:\n return\n for child in node.children:\n traverseTree(child, deep + 1)\n\n\n<mask token>\n\n\ndef solve():\n raw_node_list = [s.rstrip() for s in input.split('\\n')]\n regex = re.compile('(\\\\w+) \\\\((\\\\d+)\\\\)')\n master_node = None\n nodes = []\n all_linked_node_names = []\n for n in raw_node_list:\n pmn = regex.match(n)\n np = Node(pmn.group(1).rstrip(), pmn.group(2).rstrip(), [])\n links = n.split('->')\n if len(links) > 1:\n np.linked = [s.strip() for s in links[1].split(',')]\n if np:\n nodes.append(np)\n for link in np.linked:\n all_linked_node_names.append(link)\n for node in nodes:\n if len(node.linked) > 1 and not node.name in ':'.join(\n all_linked_node_names):\n master_node = node\n buildTree(master_node, nodes)\n traverseTree(master_node, 0)\n print('Detect unstable:')\n detectUnstable(master_node, 0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Node:\n parent = None\n children = None\n weight_sum = 0\n\n def __init__(self, name, weight, linked):\n self.name = name\n self.weight = int(weight)\n self.weight_sum += self.weight\n self.linked = linked\n self.children = []\n\n def __str__(self):\n children_str = ' '.join(self.linked)\n parent_name = self.parent.name if self.parent else 'KING'\n return '' + self.name + ' : ' + parent_name + ':' + str(self.weight\n ) + ' : ' + children_str\n\n\n<mask token>\n\n\ndef traverseTree(node, deep):\n if len(nodeTree) <= deep:\n nodeTree.append([])\n nodeTree[deep].append(node)\n if not node.children:\n return\n for child in node.children:\n traverseTree(child, deep + 1)\n\n\ndef detectUnstable(node, deep):\n weights = []\n for n in node.children:\n if n.weight_sum not in weights:\n weights.append(n.weight_sum)\n for n in node.children:\n if len(weights) > 1 and max(weights) == n.weight_sum:\n print('found unstable for: ', n.name, ' in: ', weights)\n diff = max(weights) - min(weights)\n new_weight = n.weight - diff\n print('[', deep, ']New weight:', new_weight, ' for: ', n,\n ' diff: ', diff)\n for n in node.children:\n if len(nodeTree) > deep + 1:\n detectUnstable(n, deep + 1)\n\n\n<mask token>\n\n\ndef printNodes(nodes):\n print('Print all nodes')\n for node in nodes:\n print(node)\n\n\ndef solve():\n raw_node_list = [s.rstrip() for s in input.split('\\n')]\n regex = re.compile('(\\\\w+) \\\\((\\\\d+)\\\\)')\n master_node = None\n nodes = []\n all_linked_node_names = []\n for n in raw_node_list:\n pmn = regex.match(n)\n np = Node(pmn.group(1).rstrip(), pmn.group(2).rstrip(), [])\n links = n.split('->')\n if len(links) > 1:\n np.linked = [s.strip() for s in links[1].split(',')]\n if np:\n nodes.append(np)\n for link in np.linked:\n all_linked_node_names.append(link)\n for node in nodes:\n if len(node.linked) > 1 and not node.name in ':'.join(\n all_linked_node_names):\n master_node = node\n buildTree(master_node, nodes)\n traverseTree(master_node, 0)\n print('Detect unstable:')\n detectUnstable(master_node, 0)\n\n\n<mask token>\n",
"step-3": "<mask token>\nwith open('input/' + infile) as file:\n input = file.read()\nfile.close()\n\n\nclass Node:\n parent = None\n children = None\n weight_sum = 0\n\n def __init__(self, name, weight, linked):\n self.name = name\n self.weight = int(weight)\n self.weight_sum += self.weight\n self.linked = linked\n self.children = []\n\n def __str__(self):\n children_str = ' '.join(self.linked)\n parent_name = self.parent.name if self.parent else 'KING'\n return '' + self.name + ' : ' + parent_name + ':' + str(self.weight\n ) + ' : ' + children_str\n\n\n<mask token>\n\n\ndef traverseTree(node, deep):\n if len(nodeTree) <= deep:\n nodeTree.append([])\n nodeTree[deep].append(node)\n if not node.children:\n return\n for child in node.children:\n traverseTree(child, deep + 1)\n\n\ndef detectUnstable(node, deep):\n weights = []\n for n in node.children:\n if n.weight_sum not in weights:\n weights.append(n.weight_sum)\n for n in node.children:\n if len(weights) > 1 and max(weights) == n.weight_sum:\n print('found unstable for: ', n.name, ' in: ', weights)\n diff = max(weights) - min(weights)\n new_weight = n.weight - diff\n print('[', deep, ']New weight:', new_weight, ' for: ', n,\n ' diff: ', diff)\n for n in node.children:\n if len(nodeTree) > deep + 1:\n detectUnstable(n, deep + 1)\n\n\ndef buildTree(parent, nodes):\n if not parent.linked:\n return parent\n for link_str in parent.linked:\n for node in nodes:\n if link_str == node.name:\n node.parent = parent\n parent.children.append(node)\n child = buildTree(node, nodes)\n parent.weight_sum += child.weight_sum\n return parent\n\n\ndef printNodes(nodes):\n print('Print all nodes')\n for node in nodes:\n print(node)\n\n\ndef solve():\n raw_node_list = [s.rstrip() for s in input.split('\\n')]\n regex = re.compile('(\\\\w+) \\\\((\\\\d+)\\\\)')\n master_node = None\n nodes = []\n all_linked_node_names = []\n for n in raw_node_list:\n pmn = regex.match(n)\n np = Node(pmn.group(1).rstrip(), pmn.group(2).rstrip(), [])\n links = n.split('->')\n if len(links) > 1:\n np.linked = [s.strip() for s in links[1].split(',')]\n if np:\n nodes.append(np)\n for link in np.linked:\n all_linked_node_names.append(link)\n for node in nodes:\n if len(node.linked) > 1 and not node.name in ':'.join(\n all_linked_node_names):\n master_node = node\n buildTree(master_node, nodes)\n traverseTree(master_node, 0)\n print('Detect unstable:')\n detectUnstable(master_node, 0)\n\n\nif __name__ == '__main__':\n solve()\n print('Finished executing: ' + task)\n sys.exit(1)\n",
"step-4": "<mask token>\nimport sys\nimport re\ntask = 'd-7'\ninfile = task + '.input'\nwith open('input/' + infile) as file:\n input = file.read()\nfile.close()\n\n\nclass Node:\n parent = None\n children = None\n weight_sum = 0\n\n def __init__(self, name, weight, linked):\n self.name = name\n self.weight = int(weight)\n self.weight_sum += self.weight\n self.linked = linked\n self.children = []\n\n def __str__(self):\n children_str = ' '.join(self.linked)\n parent_name = self.parent.name if self.parent else 'KING'\n return '' + self.name + ' : ' + parent_name + ':' + str(self.weight\n ) + ' : ' + children_str\n\n\nnodeTree = []\n\n\ndef traverseTree(node, deep):\n if len(nodeTree) <= deep:\n nodeTree.append([])\n nodeTree[deep].append(node)\n if not node.children:\n return\n for child in node.children:\n traverseTree(child, deep + 1)\n\n\ndef detectUnstable(node, deep):\n weights = []\n for n in node.children:\n if n.weight_sum not in weights:\n weights.append(n.weight_sum)\n for n in node.children:\n if len(weights) > 1 and max(weights) == n.weight_sum:\n print('found unstable for: ', n.name, ' in: ', weights)\n diff = max(weights) - min(weights)\n new_weight = n.weight - diff\n print('[', deep, ']New weight:', new_weight, ' for: ', n,\n ' diff: ', diff)\n for n in node.children:\n if len(nodeTree) > deep + 1:\n detectUnstable(n, deep + 1)\n\n\ndef buildTree(parent, nodes):\n if not parent.linked:\n return parent\n for link_str in parent.linked:\n for node in nodes:\n if link_str == node.name:\n node.parent = parent\n parent.children.append(node)\n child = buildTree(node, nodes)\n parent.weight_sum += child.weight_sum\n return parent\n\n\ndef printNodes(nodes):\n print('Print all nodes')\n for node in nodes:\n print(node)\n\n\ndef solve():\n raw_node_list = [s.rstrip() for s in input.split('\\n')]\n regex = re.compile('(\\\\w+) \\\\((\\\\d+)\\\\)')\n master_node = None\n nodes = []\n all_linked_node_names = []\n for n in raw_node_list:\n pmn = regex.match(n)\n np = Node(pmn.group(1).rstrip(), pmn.group(2).rstrip(), [])\n links = n.split('->')\n if len(links) > 1:\n np.linked = [s.strip() for s in links[1].split(',')]\n if np:\n nodes.append(np)\n for link in np.linked:\n all_linked_node_names.append(link)\n for node in nodes:\n if len(node.linked) > 1 and not node.name in ':'.join(\n all_linked_node_names):\n master_node = node\n buildTree(master_node, nodes)\n traverseTree(master_node, 0)\n print('Detect unstable:')\n detectUnstable(master_node, 0)\n\n\nif __name__ == '__main__':\n solve()\n print('Finished executing: ' + task)\n sys.exit(1)\n",
"step-5": "#!/user/bin/env python3 -tt\n\n\"\"\"\nhttps://adventofcode.com/2017/day/7\n\"\"\"\nimport sys\nimport re\n\n# Global variables\ntask=\"d-7\"\ninfile=task + \".input\"\n\nwith open('input/' + infile) as file:\n input = file.read()\nfile.close()\n\nclass Node:\n parent = None\n children = None\n weight_sum = 0\n\n def __init__(self, name, weight, linked):\n self.name = name\n self.weight = int(weight)\n self.weight_sum += self.weight\n self.linked = linked\n self.children = []\n\n def __str__(self):\n children_str = \" \".join(self.linked)\n parent_name = self.parent.name if self.parent else \"KING\"\n return \"\" + self.name + \" : \" + parent_name + \":\" + str(self.weight) + \" : \" + children_str #+ \":||||:\" + \" \".join([n.name for n in self.children])\n\nnodeTree = []\ndef traverseTree(node, deep):\n\n if len(nodeTree) <= deep:\n nodeTree.append([])\n\n nodeTree[deep].append(node)\n if not node.children:\n return\n for child in node.children:\n traverseTree(child, deep+1)\n\ndef detectUnstable(node, deep):\n weights = []\n\n for n in node.children:\n if n.weight_sum not in weights:\n weights.append(n.weight_sum)\n\n for n in node.children:\n if len(weights) > 1 and max(weights) == n.weight_sum:\n print(\"found unstable for: \", n.name, \" in: \", weights)\n diff = max(weights) - min(weights)\n new_weight = n.weight - diff\n print(\"[\", deep, \"]New weight:\", new_weight, \" for: \", n, \" diff: \", diff)\n\n for n in node.children:\n if len(nodeTree) > deep + 1:\n detectUnstable(n, deep +1)\n\n\ndef buildTree(parent, nodes):\n if not parent.linked:\n return parent\n for link_str in parent.linked:\n for node in nodes:\n #Find matching node\n if link_str == node.name:\n node.parent = parent\n parent.children.append(node)\n child = buildTree(node, nodes)\n parent.weight_sum += child.weight_sum\n\n return parent\n\ndef printNodes(nodes):\n print(\"Print all nodes\")\n for node in nodes:\n print(node)\n\n\ndef solve():\n raw_node_list = [s.rstrip() for s in input.split(\"\\n\")]\n regex = re.compile(r'(\\w+) \\((\\d+)\\)')\n\n master_node = None\n nodes = []\n all_linked_node_names = []\n for n in raw_node_list:\n pmn = regex.match(n)\n np = Node(pmn.group(1).rstrip(), pmn.group(2).rstrip(), [])\n links = n.split(\"->\")\n if len(links) > 1:\n np.linked = [s.strip() for s in links[1].split(\",\")]\n if np:\n nodes.append(np)\n for link in np.linked:\n all_linked_node_names.append(link)\n\n for node in nodes:\n if len(node.linked) > 1 and not node.name in \":\".join(all_linked_node_names):\n master_node = node\n\n buildTree(master_node, nodes)\n traverseTree(master_node, 0)\n\n# for row in nodeTree:\n# nodes = \"\"\n# for node in row:\n# parent = node.parent.name if node.parent else \"KING\"\n# nodes += \"[\" + parent + \"]\" + node.name + \":\" + str(node.weight_sum)\n# print(nodes)\n\n# for weights in weightTree:\n# if len(weights) > 1:\n# print(\":\".join([str(s) for s in weights]))\n\n print(\"Detect unstable:\")\n detectUnstable(master_node, 0)\n\nif __name__ == '__main__':\n\n solve()\n\n print(\"Finished executing: \" + task)\n sys.exit(1)\n",
"step-ids": [
6,
8,
10,
12,
13
]
}
|
[
6,
8,
10,
12,
13
] |
import multiprocessing
import time
def foo():
time.sleep(0.1)
p = multiprocessing.Process(target=foo)
p.start()
print("process running: ", p, p.is_alive())
p.terminate()
print("process running: ", p, p.is_alive())
p.join()
print("process running: ", p, p.is_alive())
print("process exit code:", p.exitcode)
|
normal
|
{
"blob_id": "19aad7d45416e311530aa2ce3e854cf1f65d18f5",
"index": 960,
"step-1": "<mask token>\n\n\ndef foo():\n time.sleep(0.1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef foo():\n time.sleep(0.1)\n\n\n<mask token>\np.start()\nprint('process running: ', p, p.is_alive())\np.terminate()\nprint('process running: ', p, p.is_alive())\np.join()\nprint('process running: ', p, p.is_alive())\nprint('process exit code:', p.exitcode)\n",
"step-3": "<mask token>\n\n\ndef foo():\n time.sleep(0.1)\n\n\np = multiprocessing.Process(target=foo)\np.start()\nprint('process running: ', p, p.is_alive())\np.terminate()\nprint('process running: ', p, p.is_alive())\np.join()\nprint('process running: ', p, p.is_alive())\nprint('process exit code:', p.exitcode)\n",
"step-4": "import multiprocessing\nimport time\n\n\ndef foo():\n time.sleep(0.1)\n\n\np = multiprocessing.Process(target=foo)\np.start()\nprint('process running: ', p, p.is_alive())\np.terminate()\nprint('process running: ', p, p.is_alive())\np.join()\nprint('process running: ', p, p.is_alive())\nprint('process exit code:', p.exitcode)\n",
"step-5": "import multiprocessing\nimport time\n\n\ndef foo():\n time.sleep(0.1)\n\n\np = multiprocessing.Process(target=foo)\np.start()\nprint(\"process running: \", p, p.is_alive())\np.terminate()\nprint(\"process running: \", p, p.is_alive())\np.join()\nprint(\"process running: \", p, p.is_alive())\nprint(\"process exit code:\", p.exitcode)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 17:16:12 2019
@author: Meagatron
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import defaultdict
import math
import itertools
from dtw import dtw
import timeit
from helper_functions import normalize,alphabetize_ts,hamming_distance
"""------------- Intialization ------------- """
start = timeit.default_timer()
data = pd.read_csv('test_data2.csv', sep=',', header=None)
x1 = data.iloc[1:,1].values.flatten()
x1=np.asfarray(x1,float)
y_alphabet_size=4
word_lenth=3
window_size=round( len(x1) *10 /100 )
skip_offset=round(window_size/2)
ham_distance=1
epsilon = 1e-6
def segment_ts():
ts_len=len(x1)
mod = ts_len%window_size
rnge=0
if(skip_offset==0):
ts_len=int((ts_len-mod-window_size)/1)
rnge=int(ts_len/window_size)
else:
ts_len=int(math.ceil((ts_len-window_size)/skip_offset))
rnge=int(ts_len)
curr_count=0
words=list()
indices=list()
complete_indices=list()
for i in range(0, rnge):
sub_section = x1[curr_count:(curr_count+window_size)]
sub_section=normalize(sub_section)
curr_word=""
chunk_size=int(len(sub_section)/word_lenth)
num=0
curr_letter=""
for j in range(0,word_lenth):
chunk = sub_section[num:num + chunk_size]
curr_letter=alphabetize_ts(chunk)
curr_word+=str(curr_letter)
complete_indices.append(curr_count)
num+=chunk_size
words.append(curr_word)
indices.append(curr_count)
temp_list=[]
temp_list.append(sub_section)
temp_df = pd.DataFrame()
temp_df.insert(loc=0, column='sub_section', value=temp_list)
temp_df.insert(loc=0, column='keys', value=curr_word)
temp_df.insert(loc=0, column='position', value=sorted(sub_section)[len(sub_section) // 2])
temp_df.insert(loc=0, column='scale_high', value=np.max(sub_section))
temp_df.insert(loc=0, column='scale_low', value=np.min(sub_section))
temp_df.insert(loc=0, column='indices', value=curr_count)
curr_count=curr_count+skip_offset-1
if(i==0):
df_sax =temp_df.copy()
else:
df_sax=df_sax.append(temp_df, ignore_index=True)
return (words,indices,df_sax)
alphabetize,indices,df_sax=segment_ts()
""" Complete Words """
def complete_word():
complete_word=list()
complete_indices=indices
""" Simillar Words """
complete_word=alphabetize
sax = defaultdict(list)
for i in range(0,len(complete_word)):
if(len(complete_word[i])==word_lenth):
sax[complete_word[i]].append(complete_indices[i])
return sax
#alphabetize1,indices1,df_sax=segment_ts()
def Compare_Shape():
simillar_word=complete_word()
map_keys = defaultdict(list)
map_indices=defaultdict(list)
for key_i in simillar_word:
temp_list=list()
temp_list.append(simillar_word.get(key_i))
map_keys[key_i].append(key_i)
for key_j in simillar_word:
dist=hamming_distance(key_i, key_j)
if(dist==ham_distance and key_i !=key_j):
map_keys[key_i].append(key_j)
temp_list.append(simillar_word.get(key_j))
else:
map_keys[key_i].append([])
tempp = list(itertools.chain(*temp_list))
map_indices[key_i].append(tempp)
return (map_keys,map_indices)
compare_strings,compare_list=Compare_Shape()
def dtw_test2 ():
df_dtw_prep=df_sax
dtw_df=pd.DataFrame()
for k, v in compare_list.items():
v_temp=str(v)[2:-2]
v1=[int(s) for s in v_temp.split(',')]
for i in range(0,len(v1)-1):
for j in range(i,len(v1)):
if(v1[i] != v1[j]):
row1 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[i]]
row2 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[j]]
sub_section1 = row1.iloc[0]['sub_section']
sub_section2 = row2.iloc[0]['sub_section']
index1 = row1.iloc[0]['indices']
index2 = row2.iloc[0]['indices']
x=np.array(sub_section1).reshape(-1, 1)
y=np.array(sub_section2).reshape(-1, 1)
euclidean_norm = lambda x, y: np.abs(x - y)
dtw_value, cost_matrix, acc_cost_matrix, path = dtw(x, y, dist=euclidean_norm)
temp_df = pd.DataFrame([[k,index1,index2,sub_section1,sub_section2,dtw_value]],
columns=['keyy','index1','index2','sub_section1','sub_section2','dtw_value'])
dtw_df=dtw_df.append(temp_df,ignore_index=True)
return(dtw_df)
dt_test=dtw_test2 ()
stop = timeit.default_timer()
print('Time: ', stop - start)
|
normal
|
{
"blob_id": "16215ee42c4ea284dca0ebb7372fef04c0cc54b9",
"index": 2149,
"step-1": "<mask token>\n\n\ndef segment_ts():\n ts_len = len(x1)\n mod = ts_len % window_size\n rnge = 0\n if skip_offset == 0:\n ts_len = int((ts_len - mod - window_size) / 1)\n rnge = int(ts_len / window_size)\n else:\n ts_len = int(math.ceil((ts_len - window_size) / skip_offset))\n rnge = int(ts_len)\n curr_count = 0\n words = list()\n indices = list()\n complete_indices = list()\n for i in range(0, rnge):\n sub_section = x1[curr_count:curr_count + window_size]\n sub_section = normalize(sub_section)\n curr_word = ''\n chunk_size = int(len(sub_section) / word_lenth)\n num = 0\n curr_letter = ''\n for j in range(0, word_lenth):\n chunk = sub_section[num:num + chunk_size]\n curr_letter = alphabetize_ts(chunk)\n curr_word += str(curr_letter)\n complete_indices.append(curr_count)\n num += chunk_size\n words.append(curr_word)\n indices.append(curr_count)\n temp_list = []\n temp_list.append(sub_section)\n temp_df = pd.DataFrame()\n temp_df.insert(loc=0, column='sub_section', value=temp_list)\n temp_df.insert(loc=0, column='keys', value=curr_word)\n temp_df.insert(loc=0, column='position', value=sorted(sub_section)[\n len(sub_section) // 2])\n temp_df.insert(loc=0, column='scale_high', value=np.max(sub_section))\n temp_df.insert(loc=0, column='scale_low', value=np.min(sub_section))\n temp_df.insert(loc=0, column='indices', value=curr_count)\n curr_count = curr_count + skip_offset - 1\n if i == 0:\n df_sax = temp_df.copy()\n else:\n df_sax = df_sax.append(temp_df, ignore_index=True)\n return words, indices, df_sax\n\n\n<mask token>\n\n\ndef complete_word():\n complete_word = list()\n complete_indices = indices\n \"\"\" Simillar Words \"\"\"\n complete_word = alphabetize\n sax = defaultdict(list)\n for i in range(0, len(complete_word)):\n if len(complete_word[i]) == word_lenth:\n sax[complete_word[i]].append(complete_indices[i])\n return sax\n\n\ndef Compare_Shape():\n simillar_word = complete_word()\n map_keys = defaultdict(list)\n map_indices = defaultdict(list)\n for key_i in simillar_word:\n temp_list = list()\n temp_list.append(simillar_word.get(key_i))\n map_keys[key_i].append(key_i)\n for key_j in simillar_word:\n dist = hamming_distance(key_i, key_j)\n if dist == ham_distance and key_i != key_j:\n map_keys[key_i].append(key_j)\n temp_list.append(simillar_word.get(key_j))\n else:\n map_keys[key_i].append([])\n tempp = list(itertools.chain(*temp_list))\n map_indices[key_i].append(tempp)\n return map_keys, map_indices\n\n\n<mask token>\n\n\ndef dtw_test2():\n df_dtw_prep = df_sax\n dtw_df = pd.DataFrame()\n for k, v in compare_list.items():\n v_temp = str(v)[2:-2]\n v1 = [int(s) for s in v_temp.split(',')]\n for i in range(0, len(v1) - 1):\n for j in range(i, len(v1)):\n if v1[i] != v1[j]:\n row1 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[i]]\n row2 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[j]]\n sub_section1 = row1.iloc[0]['sub_section']\n sub_section2 = row2.iloc[0]['sub_section']\n index1 = row1.iloc[0]['indices']\n index2 = row2.iloc[0]['indices']\n x = np.array(sub_section1).reshape(-1, 1)\n y = np.array(sub_section2).reshape(-1, 1)\n euclidean_norm = lambda x, y: np.abs(x - y)\n dtw_value, cost_matrix, acc_cost_matrix, path = dtw(x,\n y, dist=euclidean_norm)\n temp_df = pd.DataFrame([[k, index1, index2,\n sub_section1, sub_section2, dtw_value]], columns=[\n 'keyy', 'index1', 'index2', 'sub_section1',\n 'sub_section2', 'dtw_value'])\n dtw_df = dtw_df.append(temp_df, ignore_index=True)\n return dtw_df\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef segment_ts():\n ts_len = len(x1)\n mod = ts_len % window_size\n rnge = 0\n if skip_offset == 0:\n ts_len = int((ts_len - mod - window_size) / 1)\n rnge = int(ts_len / window_size)\n else:\n ts_len = int(math.ceil((ts_len - window_size) / skip_offset))\n rnge = int(ts_len)\n curr_count = 0\n words = list()\n indices = list()\n complete_indices = list()\n for i in range(0, rnge):\n sub_section = x1[curr_count:curr_count + window_size]\n sub_section = normalize(sub_section)\n curr_word = ''\n chunk_size = int(len(sub_section) / word_lenth)\n num = 0\n curr_letter = ''\n for j in range(0, word_lenth):\n chunk = sub_section[num:num + chunk_size]\n curr_letter = alphabetize_ts(chunk)\n curr_word += str(curr_letter)\n complete_indices.append(curr_count)\n num += chunk_size\n words.append(curr_word)\n indices.append(curr_count)\n temp_list = []\n temp_list.append(sub_section)\n temp_df = pd.DataFrame()\n temp_df.insert(loc=0, column='sub_section', value=temp_list)\n temp_df.insert(loc=0, column='keys', value=curr_word)\n temp_df.insert(loc=0, column='position', value=sorted(sub_section)[\n len(sub_section) // 2])\n temp_df.insert(loc=0, column='scale_high', value=np.max(sub_section))\n temp_df.insert(loc=0, column='scale_low', value=np.min(sub_section))\n temp_df.insert(loc=0, column='indices', value=curr_count)\n curr_count = curr_count + skip_offset - 1\n if i == 0:\n df_sax = temp_df.copy()\n else:\n df_sax = df_sax.append(temp_df, ignore_index=True)\n return words, indices, df_sax\n\n\n<mask token>\n\n\ndef complete_word():\n complete_word = list()\n complete_indices = indices\n \"\"\" Simillar Words \"\"\"\n complete_word = alphabetize\n sax = defaultdict(list)\n for i in range(0, len(complete_word)):\n if len(complete_word[i]) == word_lenth:\n sax[complete_word[i]].append(complete_indices[i])\n return sax\n\n\ndef Compare_Shape():\n simillar_word = complete_word()\n map_keys = defaultdict(list)\n map_indices = defaultdict(list)\n for key_i in simillar_word:\n temp_list = list()\n temp_list.append(simillar_word.get(key_i))\n map_keys[key_i].append(key_i)\n for key_j in simillar_word:\n dist = hamming_distance(key_i, key_j)\n if dist == ham_distance and key_i != key_j:\n map_keys[key_i].append(key_j)\n temp_list.append(simillar_word.get(key_j))\n else:\n map_keys[key_i].append([])\n tempp = list(itertools.chain(*temp_list))\n map_indices[key_i].append(tempp)\n return map_keys, map_indices\n\n\n<mask token>\n\n\ndef dtw_test2():\n df_dtw_prep = df_sax\n dtw_df = pd.DataFrame()\n for k, v in compare_list.items():\n v_temp = str(v)[2:-2]\n v1 = [int(s) for s in v_temp.split(',')]\n for i in range(0, len(v1) - 1):\n for j in range(i, len(v1)):\n if v1[i] != v1[j]:\n row1 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[i]]\n row2 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[j]]\n sub_section1 = row1.iloc[0]['sub_section']\n sub_section2 = row2.iloc[0]['sub_section']\n index1 = row1.iloc[0]['indices']\n index2 = row2.iloc[0]['indices']\n x = np.array(sub_section1).reshape(-1, 1)\n y = np.array(sub_section2).reshape(-1, 1)\n euclidean_norm = lambda x, y: np.abs(x - y)\n dtw_value, cost_matrix, acc_cost_matrix, path = dtw(x,\n y, dist=euclidean_norm)\n temp_df = pd.DataFrame([[k, index1, index2,\n sub_section1, sub_section2, dtw_value]], columns=[\n 'keyy', 'index1', 'index2', 'sub_section1',\n 'sub_section2', 'dtw_value'])\n dtw_df = dtw_df.append(temp_df, ignore_index=True)\n return dtw_df\n\n\n<mask token>\nprint('Time: ', stop - start)\n",
"step-3": "<mask token>\nstart = timeit.default_timer()\ndata = pd.read_csv('test_data2.csv', sep=',', header=None)\nx1 = data.iloc[1:, 1].values.flatten()\nx1 = np.asfarray(x1, float)\ny_alphabet_size = 4\nword_lenth = 3\nwindow_size = round(len(x1) * 10 / 100)\nskip_offset = round(window_size / 2)\nham_distance = 1\nepsilon = 1e-06\n\n\ndef segment_ts():\n ts_len = len(x1)\n mod = ts_len % window_size\n rnge = 0\n if skip_offset == 0:\n ts_len = int((ts_len - mod - window_size) / 1)\n rnge = int(ts_len / window_size)\n else:\n ts_len = int(math.ceil((ts_len - window_size) / skip_offset))\n rnge = int(ts_len)\n curr_count = 0\n words = list()\n indices = list()\n complete_indices = list()\n for i in range(0, rnge):\n sub_section = x1[curr_count:curr_count + window_size]\n sub_section = normalize(sub_section)\n curr_word = ''\n chunk_size = int(len(sub_section) / word_lenth)\n num = 0\n curr_letter = ''\n for j in range(0, word_lenth):\n chunk = sub_section[num:num + chunk_size]\n curr_letter = alphabetize_ts(chunk)\n curr_word += str(curr_letter)\n complete_indices.append(curr_count)\n num += chunk_size\n words.append(curr_word)\n indices.append(curr_count)\n temp_list = []\n temp_list.append(sub_section)\n temp_df = pd.DataFrame()\n temp_df.insert(loc=0, column='sub_section', value=temp_list)\n temp_df.insert(loc=0, column='keys', value=curr_word)\n temp_df.insert(loc=0, column='position', value=sorted(sub_section)[\n len(sub_section) // 2])\n temp_df.insert(loc=0, column='scale_high', value=np.max(sub_section))\n temp_df.insert(loc=0, column='scale_low', value=np.min(sub_section))\n temp_df.insert(loc=0, column='indices', value=curr_count)\n curr_count = curr_count + skip_offset - 1\n if i == 0:\n df_sax = temp_df.copy()\n else:\n df_sax = df_sax.append(temp_df, ignore_index=True)\n return words, indices, df_sax\n\n\nalphabetize, indices, df_sax = segment_ts()\n<mask token>\n\n\ndef complete_word():\n complete_word = list()\n complete_indices = indices\n \"\"\" Simillar Words \"\"\"\n complete_word = alphabetize\n sax = defaultdict(list)\n for i in range(0, len(complete_word)):\n if len(complete_word[i]) == word_lenth:\n sax[complete_word[i]].append(complete_indices[i])\n return sax\n\n\ndef Compare_Shape():\n simillar_word = complete_word()\n map_keys = defaultdict(list)\n map_indices = defaultdict(list)\n for key_i in simillar_word:\n temp_list = list()\n temp_list.append(simillar_word.get(key_i))\n map_keys[key_i].append(key_i)\n for key_j in simillar_word:\n dist = hamming_distance(key_i, key_j)\n if dist == ham_distance and key_i != key_j:\n map_keys[key_i].append(key_j)\n temp_list.append(simillar_word.get(key_j))\n else:\n map_keys[key_i].append([])\n tempp = list(itertools.chain(*temp_list))\n map_indices[key_i].append(tempp)\n return map_keys, map_indices\n\n\ncompare_strings, compare_list = Compare_Shape()\n\n\ndef dtw_test2():\n df_dtw_prep = df_sax\n dtw_df = pd.DataFrame()\n for k, v in compare_list.items():\n v_temp = str(v)[2:-2]\n v1 = [int(s) for s in v_temp.split(',')]\n for i in range(0, len(v1) - 1):\n for j in range(i, len(v1)):\n if v1[i] != v1[j]:\n row1 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[i]]\n row2 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[j]]\n sub_section1 = row1.iloc[0]['sub_section']\n sub_section2 = row2.iloc[0]['sub_section']\n index1 = row1.iloc[0]['indices']\n index2 = row2.iloc[0]['indices']\n x = np.array(sub_section1).reshape(-1, 1)\n y = np.array(sub_section2).reshape(-1, 1)\n euclidean_norm = lambda x, y: np.abs(x - y)\n dtw_value, cost_matrix, acc_cost_matrix, path = dtw(x,\n y, dist=euclidean_norm)\n temp_df = pd.DataFrame([[k, index1, index2,\n sub_section1, sub_section2, dtw_value]], columns=[\n 'keyy', 'index1', 'index2', 'sub_section1',\n 'sub_section2', 'dtw_value'])\n dtw_df = dtw_df.append(temp_df, ignore_index=True)\n return dtw_df\n\n\ndt_test = dtw_test2()\nstop = timeit.default_timer()\nprint('Time: ', stop - start)\n",
"step-4": "<mask token>\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nimport math\nimport itertools\nfrom dtw import dtw\nimport timeit\nfrom helper_functions import normalize, alphabetize_ts, hamming_distance\n<mask token>\nstart = timeit.default_timer()\ndata = pd.read_csv('test_data2.csv', sep=',', header=None)\nx1 = data.iloc[1:, 1].values.flatten()\nx1 = np.asfarray(x1, float)\ny_alphabet_size = 4\nword_lenth = 3\nwindow_size = round(len(x1) * 10 / 100)\nskip_offset = round(window_size / 2)\nham_distance = 1\nepsilon = 1e-06\n\n\ndef segment_ts():\n ts_len = len(x1)\n mod = ts_len % window_size\n rnge = 0\n if skip_offset == 0:\n ts_len = int((ts_len - mod - window_size) / 1)\n rnge = int(ts_len / window_size)\n else:\n ts_len = int(math.ceil((ts_len - window_size) / skip_offset))\n rnge = int(ts_len)\n curr_count = 0\n words = list()\n indices = list()\n complete_indices = list()\n for i in range(0, rnge):\n sub_section = x1[curr_count:curr_count + window_size]\n sub_section = normalize(sub_section)\n curr_word = ''\n chunk_size = int(len(sub_section) / word_lenth)\n num = 0\n curr_letter = ''\n for j in range(0, word_lenth):\n chunk = sub_section[num:num + chunk_size]\n curr_letter = alphabetize_ts(chunk)\n curr_word += str(curr_letter)\n complete_indices.append(curr_count)\n num += chunk_size\n words.append(curr_word)\n indices.append(curr_count)\n temp_list = []\n temp_list.append(sub_section)\n temp_df = pd.DataFrame()\n temp_df.insert(loc=0, column='sub_section', value=temp_list)\n temp_df.insert(loc=0, column='keys', value=curr_word)\n temp_df.insert(loc=0, column='position', value=sorted(sub_section)[\n len(sub_section) // 2])\n temp_df.insert(loc=0, column='scale_high', value=np.max(sub_section))\n temp_df.insert(loc=0, column='scale_low', value=np.min(sub_section))\n temp_df.insert(loc=0, column='indices', value=curr_count)\n curr_count = curr_count + skip_offset - 1\n if i == 0:\n df_sax = temp_df.copy()\n else:\n df_sax = df_sax.append(temp_df, ignore_index=True)\n return words, indices, df_sax\n\n\nalphabetize, indices, df_sax = segment_ts()\n<mask token>\n\n\ndef complete_word():\n complete_word = list()\n complete_indices = indices\n \"\"\" Simillar Words \"\"\"\n complete_word = alphabetize\n sax = defaultdict(list)\n for i in range(0, len(complete_word)):\n if len(complete_word[i]) == word_lenth:\n sax[complete_word[i]].append(complete_indices[i])\n return sax\n\n\ndef Compare_Shape():\n simillar_word = complete_word()\n map_keys = defaultdict(list)\n map_indices = defaultdict(list)\n for key_i in simillar_word:\n temp_list = list()\n temp_list.append(simillar_word.get(key_i))\n map_keys[key_i].append(key_i)\n for key_j in simillar_word:\n dist = hamming_distance(key_i, key_j)\n if dist == ham_distance and key_i != key_j:\n map_keys[key_i].append(key_j)\n temp_list.append(simillar_word.get(key_j))\n else:\n map_keys[key_i].append([])\n tempp = list(itertools.chain(*temp_list))\n map_indices[key_i].append(tempp)\n return map_keys, map_indices\n\n\ncompare_strings, compare_list = Compare_Shape()\n\n\ndef dtw_test2():\n df_dtw_prep = df_sax\n dtw_df = pd.DataFrame()\n for k, v in compare_list.items():\n v_temp = str(v)[2:-2]\n v1 = [int(s) for s in v_temp.split(',')]\n for i in range(0, len(v1) - 1):\n for j in range(i, len(v1)):\n if v1[i] != v1[j]:\n row1 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[i]]\n row2 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[j]]\n sub_section1 = row1.iloc[0]['sub_section']\n sub_section2 = row2.iloc[0]['sub_section']\n index1 = row1.iloc[0]['indices']\n index2 = row2.iloc[0]['indices']\n x = np.array(sub_section1).reshape(-1, 1)\n y = np.array(sub_section2).reshape(-1, 1)\n euclidean_norm = lambda x, y: np.abs(x - y)\n dtw_value, cost_matrix, acc_cost_matrix, path = dtw(x,\n y, dist=euclidean_norm)\n temp_df = pd.DataFrame([[k, index1, index2,\n sub_section1, sub_section2, dtw_value]], columns=[\n 'keyy', 'index1', 'index2', 'sub_section1',\n 'sub_section2', 'dtw_value'])\n dtw_df = dtw_df.append(temp_df, ignore_index=True)\n return dtw_df\n\n\ndt_test = dtw_test2()\nstop = timeit.default_timer()\nprint('Time: ', stop - start)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 3 17:16:12 2019\n\n@author: Meagatron\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nimport math\nimport itertools\nfrom dtw import dtw\nimport timeit\n\nfrom helper_functions import normalize,alphabetize_ts,hamming_distance\n\n\n\n\"\"\"------------- Intialization ------------- \"\"\"\nstart = timeit.default_timer()\n\ndata = pd.read_csv('test_data2.csv', sep=',', header=None)\nx1 = data.iloc[1:,1].values.flatten() \nx1=np.asfarray(x1,float)\n\n\n\n\n\ny_alphabet_size=4\nword_lenth=3\nwindow_size=round( len(x1) *10 /100 )\nskip_offset=round(window_size/2)\nham_distance=1\nepsilon = 1e-6\n\n\ndef segment_ts():\n\n\n ts_len=len(x1)\n\n mod = ts_len%window_size\n rnge=0\n if(skip_offset==0):\n ts_len=int((ts_len-mod-window_size)/1)\n rnge=int(ts_len/window_size)\n else:\n ts_len=int(math.ceil((ts_len-window_size)/skip_offset))\n rnge=int(ts_len)\n\n curr_count=0\n words=list()\n indices=list()\n complete_indices=list()\n \n for i in range(0, rnge):\n\n sub_section = x1[curr_count:(curr_count+window_size)]\n sub_section=normalize(sub_section)\n \n curr_word=\"\"\n chunk_size=int(len(sub_section)/word_lenth)\n num=0\n curr_letter=\"\"\n for j in range(0,word_lenth):\n chunk = sub_section[num:num + chunk_size]\n curr_letter=alphabetize_ts(chunk)\n curr_word+=str(curr_letter)\n complete_indices.append(curr_count)\n num+=chunk_size\n\n words.append(curr_word)\n indices.append(curr_count)\n \n\n temp_list=[]\n temp_list.append(sub_section)\n \n \n temp_df = pd.DataFrame()\n temp_df.insert(loc=0, column='sub_section', value=temp_list)\n temp_df.insert(loc=0, column='keys', value=curr_word)\n temp_df.insert(loc=0, column='position', value=sorted(sub_section)[len(sub_section) // 2])\n temp_df.insert(loc=0, column='scale_high', value=np.max(sub_section))\n temp_df.insert(loc=0, column='scale_low', value=np.min(sub_section))\n temp_df.insert(loc=0, column='indices', value=curr_count)\n \n \n curr_count=curr_count+skip_offset-1\n\n if(i==0):\n\n df_sax =temp_df.copy()\n else:\n df_sax=df_sax.append(temp_df, ignore_index=True)\n\n return (words,indices,df_sax)\n\n\nalphabetize,indices,df_sax=segment_ts()\n\n\n\n\"\"\" Complete Words \"\"\"\ndef complete_word():\n \n complete_word=list()\n complete_indices=indices\n\n \"\"\" Simillar Words \"\"\"\n complete_word=alphabetize\n sax = defaultdict(list)\n for i in range(0,len(complete_word)):\n if(len(complete_word[i])==word_lenth):\n sax[complete_word[i]].append(complete_indices[i])\n return sax\n\n#alphabetize1,indices1,df_sax=segment_ts()\n\n\ndef Compare_Shape():\n simillar_word=complete_word()\n map_keys = defaultdict(list)\n map_indices=defaultdict(list)\n \n \n for key_i in simillar_word:\n temp_list=list()\n temp_list.append(simillar_word.get(key_i))\n map_keys[key_i].append(key_i)\n \n for key_j in simillar_word:\n dist=hamming_distance(key_i, key_j)\n if(dist==ham_distance and key_i !=key_j):\n map_keys[key_i].append(key_j)\n temp_list.append(simillar_word.get(key_j))\n else:\n map_keys[key_i].append([])\n\n tempp = list(itertools.chain(*temp_list))\n map_indices[key_i].append(tempp) \n return (map_keys,map_indices)\n\n\n\n\ncompare_strings,compare_list=Compare_Shape()\n\n\ndef dtw_test2 ():\n df_dtw_prep=df_sax\n \n dtw_df=pd.DataFrame()\n \n \n for k, v in compare_list.items():\n \n v_temp=str(v)[2:-2]\n v1=[int(s) for s in v_temp.split(',')]\n\n for i in range(0,len(v1)-1):\n for j in range(i,len(v1)):\n \n \n if(v1[i] != v1[j]):\n \n \n\n row1 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[i]]\n row2 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[j]]\n \n sub_section1 = row1.iloc[0]['sub_section']\n sub_section2 = row2.iloc[0]['sub_section']\n \n \n index1 = row1.iloc[0]['indices']\n index2 = row2.iloc[0]['indices']\n \n\n x=np.array(sub_section1).reshape(-1, 1)\n y=np.array(sub_section2).reshape(-1, 1)\n\n euclidean_norm = lambda x, y: np.abs(x - y)\n dtw_value, cost_matrix, acc_cost_matrix, path = dtw(x, y, dist=euclidean_norm)\n \n \n temp_df = pd.DataFrame([[k,index1,index2,sub_section1,sub_section2,dtw_value]], \n columns=['keyy','index1','index2','sub_section1','sub_section2','dtw_value'])\n dtw_df=dtw_df.append(temp_df,ignore_index=True)\n \n \n return(dtw_df)\n\n\ndt_test=dtw_test2 ()\n\n\nstop = timeit.default_timer()\nprint('Time: ', stop - start) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
"""Coroutine utilities."""
from decorator import decorator
@decorator
def coroutine(f, *a, **kw):
"""This decorator starts the coroutine for us."""
i = f(*a, **kw)
i.next()
return i
|
normal
|
{
"blob_id": "6bde0ce30f33b155cc4c9ce9aa2ea6a6c5a1231d",
"index": 5472,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@decorator\ndef coroutine(f, *a, **kw):\n \"\"\"This decorator starts the coroutine for us.\"\"\"\n i = f(*a, **kw)\n i.next()\n return i\n",
"step-3": "<mask token>\nfrom decorator import decorator\n\n\n@decorator\ndef coroutine(f, *a, **kw):\n \"\"\"This decorator starts the coroutine for us.\"\"\"\n i = f(*a, **kw)\n i.next()\n return i\n",
"step-4": "\"\"\"Coroutine utilities.\"\"\"\n\nfrom decorator import decorator\n\n@decorator\ndef coroutine(f, *a, **kw):\n \"\"\"This decorator starts the coroutine for us.\"\"\"\n i = f(*a, **kw)\n i.next()\n return i\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CachedS3Storage(CachedFilesMixin, S3Boto3Storage):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CachedS3Storage(CachedFilesMixin, S3Boto3Storage):
pass
StaticRootS3BotoStorage = lambda : CachedS3Storage(location='static')
MediaRootS3BotoStorage = lambda : S3Boto3Storage(location='media')
<|reserved_special_token_1|>
from django.contrib.staticfiles.storage import CachedFilesMixin
from storages.backends.s3boto3 import S3Boto3Storage
class CachedS3Storage(CachedFilesMixin, S3Boto3Storage):
pass
StaticRootS3BotoStorage = lambda : CachedS3Storage(location='static')
MediaRootS3BotoStorage = lambda : S3Boto3Storage(location='media')
|
flexible
|
{
"blob_id": "e99ff1c75d5108efc8d587d4533c34eeb15c6978",
"index": 9425,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CachedS3Storage(CachedFilesMixin, S3Boto3Storage):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CachedS3Storage(CachedFilesMixin, S3Boto3Storage):\n pass\n\n\nStaticRootS3BotoStorage = lambda : CachedS3Storage(location='static')\nMediaRootS3BotoStorage = lambda : S3Boto3Storage(location='media')\n",
"step-4": "from django.contrib.staticfiles.storage import CachedFilesMixin\nfrom storages.backends.s3boto3 import S3Boto3Storage\n\n\nclass CachedS3Storage(CachedFilesMixin, S3Boto3Storage):\n pass\n\n\nStaticRootS3BotoStorage = lambda : CachedS3Storage(location='static')\nMediaRootS3BotoStorage = lambda : S3Boto3Storage(location='media')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import absolute_import, division, print_function
from .core import Bag, Item, from_sequence, from_filenames
from ..context import set_options
|
normal
|
{
"blob_id": "4e77c7ac784ec235e9925004069131d16717e89a",
"index": 9676,
"step-1": "<mask token>\n",
"step-2": "from __future__ import absolute_import, division, print_function\nfrom .core import Bag, Item, from_sequence, from_filenames\nfrom ..context import set_options\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#
# Copyright (C) 2017 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Change varchar ID to UUID
Revision ID: 1bb42ff54435
Revises: 6bbbf58ed9de
Create Date: 2017-02-07 09:28:37.493302
"""
# revision identifiers, used by Alembic.
revision = '1bb42ff54435'
down_revision = '6bbbf58ed9de'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
# Drop constraint
op.drop_constraint('component_files_component_id_fkey', 'component_files')
op.drop_constraint('components_topic_id_fkey', 'components')
op.drop_constraint('files_job_id_fkey', 'files')
op.drop_constraint('files_jobstate_id_fkey', 'files')
op.drop_constraint('files_team_id_fkey', 'files')
op.drop_constraint('files_test_id_fkey', 'files')
op.drop_constraint('jobdefinition_tests_jobdefinition_id_fkey',
'jobdefinition_tests')
op.drop_constraint('jobdefinition_tests_test_id_fkey',
'jobdefinition_tests')
op.drop_constraint('jobdefinitions_topic_id_fkey', 'jobdefinitions')
op.drop_constraint('jobs_team_id_fkey', 'jobs')
op.drop_constraint('jobs_jobdefinition_id_fkey', 'jobs')
op.drop_constraint('jobs_remoteci_id_fkey', 'jobs')
op.drop_constraint('jobs_previous_job_id_fkey', 'jobs')
op.drop_constraint('jobs_components_component_id_fkey', 'jobs_components')
op.drop_constraint('jobs_components_job_id_fkey', 'jobs_components')
op.drop_constraint('jobs_issues_issue_id_fkey', 'jobs_issues')
op.drop_constraint('jobs_issues_job_id_fkey', 'jobs_issues')
op.drop_constraint('jobstates_team_id_fkey', 'jobstates')
op.drop_constraint('jobstates_job_id_fkey', 'jobstates')
op.drop_constraint('logs_team_id_fkey', 'logs')
op.drop_constraint('logs_user_id_fkey', 'logs')
op.drop_constraint('metas_job_id_fkey', 'metas')
op.drop_constraint('remoteci_tests_test_id_fkey', 'remoteci_tests')
op.drop_constraint('remoteci_tests_remoteci_id_fkey', 'remoteci_tests')
op.drop_constraint('remotecis_team_id_fkey', 'remotecis')
op.drop_constraint('tests_team_id_fkey', 'tests')
op.drop_constraint('topic_tests_test_id_fkey', 'topic_tests')
op.drop_constraint('topic_tests_topic_id_fkey', 'topic_tests')
op.drop_constraint('topics_next_topic_fkey', 'topics')
op.drop_constraint('topics_teams_topic_id_fkey', 'topics_teams')
op.drop_constraint('topics_teams_team_id_fkey', 'topics_teams')
op.drop_constraint('user_remotecis_user_id_fkey', 'user_remotecis')
op.drop_constraint('user_remotecis_remoteci_id_fkey', 'user_remotecis')
op.drop_constraint('users_team_id_fkey', 'users')
# Change type
# Table component_files
op.execute("ALTER TABLE component_files ALTER COLUMN component_id TYPE \
UUID USING component_id::uuid")
op.execute("ALTER TABLE component_files ALTER COLUMN id TYPE \
UUID USING id::uuid")
# Table components
op.execute("ALTER TABLE components ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE components ALTER COLUMN topic_id TYPE \
UUID USING topic_id::uuid")
# Table files
op.execute("ALTER TABLE files ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE files ALTER COLUMN jobstate_id TYPE \
UUID USING jobstate_id::uuid")
op.execute("ALTER TABLE files ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
op.execute("ALTER TABLE files ALTER COLUMN job_id TYPE \
UUID USING job_id::uuid")
op.execute("ALTER TABLE files ALTER COLUMN test_id TYPE \
UUID USING test_id::uuid")
# Table issues
op.execute("ALTER TABLE issues ALTER COLUMN id TYPE \
UUID USING id::uuid")
# Table jobdefinition_tests
op.execute("ALTER TABLE jobdefinition_tests ALTER COLUMN jobdefinition_id \
TYPE UUID USING jobdefinition_id::uuid")
op.execute("ALTER TABLE jobdefinition_tests ALTER COLUMN test_id TYPE \
UUID USING test_id::uuid")
# Table jobdefinitions
op.execute("ALTER TABLE jobdefinitions ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE jobdefinitions ALTER COLUMN topic_id TYPE \
UUID USING topic_id::uuid")
# Table jobs
op.execute("ALTER TABLE jobs ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE jobs ALTER COLUMN jobdefinition_id TYPE \
UUID USING jobdefinition_id::uuid")
op.execute("ALTER TABLE jobs ALTER COLUMN remoteci_id TYPE \
UUID USING remoteci_id::uuid")
op.execute("ALTER TABLE jobs ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
op.execute("ALTER TABLE jobs ALTER COLUMN previous_job_id TYPE \
UUID USING previous_job_id::uuid")
# Table jobs_components
op.execute("ALTER TABLE jobs_components ALTER COLUMN component_id TYPE \
UUID USING component_id::uuid")
op.execute("ALTER TABLE jobs_components ALTER COLUMN job_id TYPE \
UUID USING job_id::uuid")
# Table jobs_issues
op.execute("ALTER TABLE jobs_issues ALTER COLUMN job_id TYPE \
UUID USING job_id::uuid")
op.execute("ALTER TABLE jobs_issues ALTER COLUMN issue_id TYPE \
UUID USING issue_id::uuid")
# Table jobstates
op.execute("ALTER TABLE jobstates ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE jobstates ALTER COLUMN job_id TYPE \
UUID USING job_id::uuid")
op.execute("ALTER TABLE jobstates ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
# Table logs
op.execute("ALTER TABLE logs ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE logs ALTER COLUMN user_id TYPE \
UUID USING user_id::uuid")
op.execute("ALTER TABLE logs ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
# Table metas
op.execute("ALTER TABLE metas ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE metas ALTER COLUMN job_id TYPE \
UUID USING job_id::uuid")
# Table remoteci_tests
op.execute("ALTER TABLE remoteci_tests ALTER COLUMN remoteci_id TYPE \
UUID USING remoteci_id::uuid")
op.execute("ALTER TABLE remoteci_tests ALTER COLUMN test_id TYPE \
UUID USING test_id::uuid")
# Table remotecis
op.execute("ALTER TABLE remotecis ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE remotecis ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
# Table teams
op.execute("ALTER TABLE teams ALTER COLUMN id TYPE \
UUID USING id::uuid")
# Table tests
op.execute("ALTER TABLE tests ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE tests ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
# Table topic_tests
op.execute("ALTER TABLE topic_tests ALTER COLUMN topic_id TYPE \
UUID USING topic_id::uuid")
op.execute("ALTER TABLE topic_tests ALTER COLUMN test_id TYPE \
UUID USING test_id::uuid")
# Table topics
op.execute("ALTER TABLE topics ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE topics ALTER COLUMN next_topic TYPE \
UUID USING next_topic::uuid")
# Table topics_teams
op.execute("ALTER TABLE topics_teams ALTER COLUMN topic_id TYPE \
UUID USING topic_id::uuid")
op.execute("ALTER TABLE topics_teams ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
# Table user_remotecis
op.execute("ALTER TABLE user_remotecis ALTER COLUMN user_id TYPE \
UUID USING user_id::uuid")
op.execute("ALTER TABLE user_remotecis ALTER COLUMN remoteci_id TYPE \
UUID USING remoteci_id::uuid")
# Table users
op.execute("ALTER TABLE users ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE users ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
# Re-Create constraint
op.create_foreign_key('component_files_component_id_fkey',
'component_files', 'components',
['component_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('components_topic_id_fkey',
'components', 'topics',
['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_job_id_fkey',
'files', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_jobstate_id_fkey',
'files', 'jobstates',
['jobstate_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_team_id_fkey',
'files', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_test_id_fkey',
'files', 'tests',
['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinition_tests_jobdefinition_id_fkey',
'jobdefinition_tests', 'jobdefinitions',
['jobdefinition_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinition_tests_test_id_fkey',
'jobdefinition_tests', 'tests',
['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinitions_topic_id_fkey',
'jobdefinitions', 'topics',
['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_team_id_fkey',
'jobs', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_jobdefinition_id_fkey',
'jobs', 'jobdefinitions',
['jobdefinition_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_remoteci_id_fkey',
'jobs', 'remotecis',
['remoteci_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_previous_job_id_fkey',
'jobs', 'jobs',
['previous_job_id'], ['id'])
op.create_foreign_key('jobs_components_component_id_fkey',
'jobs_components', 'components',
['component_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_components_job_id_fkey',
'jobs_components', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_issues_issue_id_fkey',
'jobs_issues', 'issues',
['issue_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_issues_job_id_fkey',
'jobs_issues', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobstates_team_id_fkey',
'jobstates', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobstates_job_id_fkey',
'jobstates', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('logs_team_id_fkey',
'logs', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('logs_user_id_fkey',
'logs', 'users',
['user_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('metas_job_id_fkey',
'metas', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('remoteci_tests_test_id_fkey',
'remoteci_tests', 'tests',
['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('remoteci_tests_remoteci_id_fkey',
'remoteci_tests', 'remotecis',
['remoteci_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('remotecis_team_id_fkey',
'remotecis', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('tests_team_id_fkey',
'tests', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topic_tests_test_id_fkey',
'topic_tests', 'tests',
['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topic_tests_topic_id_fkey',
'topic_tests', 'topics',
['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topics_next_topic_fkey',
'topics', 'topics',
['next_topic'], ['id'])
op.create_foreign_key('topics_teams_topic_id_fkey',
'topics_teams', 'topics',
['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topics_teams_team_id_fkey',
'topics_teams', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('user_remotecis_user_id_fkey',
'user_remotecis', 'users',
['user_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('user_remotecis_remoteci_id_fkey',
'user_remotecis', 'remotecis',
['remoteci_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('users_team_id_fkey',
'users', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
def downgrade():
pass
|
normal
|
{
"blob_id": "a34584a71fdff65e5b1bb15a6304af79774dac2c",
"index": 1315,
"step-1": "<mask token>\n\n\ndef upgrade():\n op.drop_constraint('component_files_component_id_fkey', 'component_files')\n op.drop_constraint('components_topic_id_fkey', 'components')\n op.drop_constraint('files_job_id_fkey', 'files')\n op.drop_constraint('files_jobstate_id_fkey', 'files')\n op.drop_constraint('files_team_id_fkey', 'files')\n op.drop_constraint('files_test_id_fkey', 'files')\n op.drop_constraint('jobdefinition_tests_jobdefinition_id_fkey',\n 'jobdefinition_tests')\n op.drop_constraint('jobdefinition_tests_test_id_fkey',\n 'jobdefinition_tests')\n op.drop_constraint('jobdefinitions_topic_id_fkey', 'jobdefinitions')\n op.drop_constraint('jobs_team_id_fkey', 'jobs')\n op.drop_constraint('jobs_jobdefinition_id_fkey', 'jobs')\n op.drop_constraint('jobs_remoteci_id_fkey', 'jobs')\n op.drop_constraint('jobs_previous_job_id_fkey', 'jobs')\n op.drop_constraint('jobs_components_component_id_fkey', 'jobs_components')\n op.drop_constraint('jobs_components_job_id_fkey', 'jobs_components')\n op.drop_constraint('jobs_issues_issue_id_fkey', 'jobs_issues')\n op.drop_constraint('jobs_issues_job_id_fkey', 'jobs_issues')\n op.drop_constraint('jobstates_team_id_fkey', 'jobstates')\n op.drop_constraint('jobstates_job_id_fkey', 'jobstates')\n op.drop_constraint('logs_team_id_fkey', 'logs')\n op.drop_constraint('logs_user_id_fkey', 'logs')\n op.drop_constraint('metas_job_id_fkey', 'metas')\n op.drop_constraint('remoteci_tests_test_id_fkey', 'remoteci_tests')\n op.drop_constraint('remoteci_tests_remoteci_id_fkey', 'remoteci_tests')\n op.drop_constraint('remotecis_team_id_fkey', 'remotecis')\n op.drop_constraint('tests_team_id_fkey', 'tests')\n op.drop_constraint('topic_tests_test_id_fkey', 'topic_tests')\n op.drop_constraint('topic_tests_topic_id_fkey', 'topic_tests')\n op.drop_constraint('topics_next_topic_fkey', 'topics')\n op.drop_constraint('topics_teams_topic_id_fkey', 'topics_teams')\n op.drop_constraint('topics_teams_team_id_fkey', 'topics_teams')\n op.drop_constraint('user_remotecis_user_id_fkey', 'user_remotecis')\n op.drop_constraint('user_remotecis_remoteci_id_fkey', 'user_remotecis')\n op.drop_constraint('users_team_id_fkey', 'users')\n op.execute(\n 'ALTER TABLE component_files ALTER COLUMN component_id TYPE UUID USING component_id::uuid'\n )\n op.execute(\n 'ALTER TABLE component_files ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE components ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE components ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN jobstate_id TYPE UUID USING jobstate_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE issues ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinition_tests ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinition_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinitions ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinitions ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN previous_job_id TYPE UUID USING previous_job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_components ALTER COLUMN component_id TYPE UUID USING component_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_components ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_issues ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_issues ALTER COLUMN issue_id TYPE UUID USING issue_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobstates ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobstates ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobstates ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE logs ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE logs ALTER COLUMN user_id TYPE UUID USING user_id::uuid'\n )\n op.execute(\n 'ALTER TABLE logs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE metas ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE metas ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE remoteci_tests ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'\n )\n op.execute(\n 'ALTER TABLE remoteci_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE remotecis ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE remotecis ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE teams ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE tests ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE tests ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topic_tests ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topic_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topics ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE topics ALTER COLUMN next_topic TYPE UUID USING next_topic::uuid'\n )\n op.execute(\n 'ALTER TABLE topics_teams ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topics_teams ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE user_remotecis ALTER COLUMN user_id TYPE UUID USING user_id::uuid'\n )\n op.execute(\n 'ALTER TABLE user_remotecis ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'\n )\n op.execute(\n 'ALTER TABLE users ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE users ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.create_foreign_key('component_files_component_id_fkey',\n 'component_files', 'components', ['component_id'], ['id'], ondelete\n ='CASCADE')\n op.create_foreign_key('components_topic_id_fkey', 'components',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_job_id_fkey', 'files', 'jobs', ['job_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_jobstate_id_fkey', 'files', 'jobstates', [\n 'jobstate_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_team_id_fkey', 'files', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_test_id_fkey', 'files', 'tests', [\n 'test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobdefinition_tests_jobdefinition_id_fkey',\n 'jobdefinition_tests', 'jobdefinitions', ['jobdefinition_id'], [\n 'id'], ondelete='CASCADE')\n op.create_foreign_key('jobdefinition_tests_test_id_fkey',\n 'jobdefinition_tests', 'tests', ['test_id'], ['id'], ondelete='CASCADE'\n )\n op.create_foreign_key('jobdefinitions_topic_id_fkey', 'jobdefinitions',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_team_id_fkey', 'jobs', 'teams', ['team_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_jobdefinition_id_fkey', 'jobs',\n 'jobdefinitions', ['jobdefinition_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_remoteci_id_fkey', 'jobs', 'remotecis', [\n 'remoteci_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_previous_job_id_fkey', 'jobs', 'jobs', [\n 'previous_job_id'], ['id'])\n op.create_foreign_key('jobs_components_component_id_fkey',\n 'jobs_components', 'components', ['component_id'], ['id'], ondelete\n ='CASCADE')\n op.create_foreign_key('jobs_components_job_id_fkey', 'jobs_components',\n 'jobs', ['job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_issues_issue_id_fkey', 'jobs_issues',\n 'issues', ['issue_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_issues_job_id_fkey', 'jobs_issues', 'jobs',\n ['job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobstates_team_id_fkey', 'jobstates', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobstates_job_id_fkey', 'jobstates', 'jobs', [\n 'job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('logs_team_id_fkey', 'logs', 'teams', ['team_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('logs_user_id_fkey', 'logs', 'users', ['user_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('metas_job_id_fkey', 'metas', 'jobs', ['job_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('remoteci_tests_test_id_fkey', 'remoteci_tests',\n 'tests', ['test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('remoteci_tests_remoteci_id_fkey',\n 'remoteci_tests', 'remotecis', ['remoteci_id'], ['id'], ondelete=\n 'CASCADE')\n op.create_foreign_key('remotecis_team_id_fkey', 'remotecis', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('tests_team_id_fkey', 'tests', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topic_tests_test_id_fkey', 'topic_tests',\n 'tests', ['test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topic_tests_topic_id_fkey', 'topic_tests',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topics_next_topic_fkey', 'topics', 'topics', [\n 'next_topic'], ['id'])\n op.create_foreign_key('topics_teams_topic_id_fkey', 'topics_teams',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topics_teams_team_id_fkey', 'topics_teams',\n 'teams', ['team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('user_remotecis_user_id_fkey', 'user_remotecis',\n 'users', ['user_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('user_remotecis_remoteci_id_fkey',\n 'user_remotecis', 'remotecis', ['remoteci_id'], ['id'], ondelete=\n 'CASCADE')\n op.create_foreign_key('users_team_id_fkey', 'users', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.drop_constraint('component_files_component_id_fkey', 'component_files')\n op.drop_constraint('components_topic_id_fkey', 'components')\n op.drop_constraint('files_job_id_fkey', 'files')\n op.drop_constraint('files_jobstate_id_fkey', 'files')\n op.drop_constraint('files_team_id_fkey', 'files')\n op.drop_constraint('files_test_id_fkey', 'files')\n op.drop_constraint('jobdefinition_tests_jobdefinition_id_fkey',\n 'jobdefinition_tests')\n op.drop_constraint('jobdefinition_tests_test_id_fkey',\n 'jobdefinition_tests')\n op.drop_constraint('jobdefinitions_topic_id_fkey', 'jobdefinitions')\n op.drop_constraint('jobs_team_id_fkey', 'jobs')\n op.drop_constraint('jobs_jobdefinition_id_fkey', 'jobs')\n op.drop_constraint('jobs_remoteci_id_fkey', 'jobs')\n op.drop_constraint('jobs_previous_job_id_fkey', 'jobs')\n op.drop_constraint('jobs_components_component_id_fkey', 'jobs_components')\n op.drop_constraint('jobs_components_job_id_fkey', 'jobs_components')\n op.drop_constraint('jobs_issues_issue_id_fkey', 'jobs_issues')\n op.drop_constraint('jobs_issues_job_id_fkey', 'jobs_issues')\n op.drop_constraint('jobstates_team_id_fkey', 'jobstates')\n op.drop_constraint('jobstates_job_id_fkey', 'jobstates')\n op.drop_constraint('logs_team_id_fkey', 'logs')\n op.drop_constraint('logs_user_id_fkey', 'logs')\n op.drop_constraint('metas_job_id_fkey', 'metas')\n op.drop_constraint('remoteci_tests_test_id_fkey', 'remoteci_tests')\n op.drop_constraint('remoteci_tests_remoteci_id_fkey', 'remoteci_tests')\n op.drop_constraint('remotecis_team_id_fkey', 'remotecis')\n op.drop_constraint('tests_team_id_fkey', 'tests')\n op.drop_constraint('topic_tests_test_id_fkey', 'topic_tests')\n op.drop_constraint('topic_tests_topic_id_fkey', 'topic_tests')\n op.drop_constraint('topics_next_topic_fkey', 'topics')\n op.drop_constraint('topics_teams_topic_id_fkey', 'topics_teams')\n op.drop_constraint('topics_teams_team_id_fkey', 'topics_teams')\n op.drop_constraint('user_remotecis_user_id_fkey', 'user_remotecis')\n op.drop_constraint('user_remotecis_remoteci_id_fkey', 'user_remotecis')\n op.drop_constraint('users_team_id_fkey', 'users')\n op.execute(\n 'ALTER TABLE component_files ALTER COLUMN component_id TYPE UUID USING component_id::uuid'\n )\n op.execute(\n 'ALTER TABLE component_files ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE components ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE components ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN jobstate_id TYPE UUID USING jobstate_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE issues ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinition_tests ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinition_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinitions ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinitions ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN previous_job_id TYPE UUID USING previous_job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_components ALTER COLUMN component_id TYPE UUID USING component_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_components ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_issues ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_issues ALTER COLUMN issue_id TYPE UUID USING issue_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobstates ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobstates ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobstates ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE logs ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE logs ALTER COLUMN user_id TYPE UUID USING user_id::uuid'\n )\n op.execute(\n 'ALTER TABLE logs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE metas ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE metas ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE remoteci_tests ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'\n )\n op.execute(\n 'ALTER TABLE remoteci_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE remotecis ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE remotecis ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE teams ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE tests ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE tests ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topic_tests ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topic_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topics ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE topics ALTER COLUMN next_topic TYPE UUID USING next_topic::uuid'\n )\n op.execute(\n 'ALTER TABLE topics_teams ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topics_teams ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE user_remotecis ALTER COLUMN user_id TYPE UUID USING user_id::uuid'\n )\n op.execute(\n 'ALTER TABLE user_remotecis ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'\n )\n op.execute(\n 'ALTER TABLE users ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE users ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.create_foreign_key('component_files_component_id_fkey',\n 'component_files', 'components', ['component_id'], ['id'], ondelete\n ='CASCADE')\n op.create_foreign_key('components_topic_id_fkey', 'components',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_job_id_fkey', 'files', 'jobs', ['job_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_jobstate_id_fkey', 'files', 'jobstates', [\n 'jobstate_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_team_id_fkey', 'files', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_test_id_fkey', 'files', 'tests', [\n 'test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobdefinition_tests_jobdefinition_id_fkey',\n 'jobdefinition_tests', 'jobdefinitions', ['jobdefinition_id'], [\n 'id'], ondelete='CASCADE')\n op.create_foreign_key('jobdefinition_tests_test_id_fkey',\n 'jobdefinition_tests', 'tests', ['test_id'], ['id'], ondelete='CASCADE'\n )\n op.create_foreign_key('jobdefinitions_topic_id_fkey', 'jobdefinitions',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_team_id_fkey', 'jobs', 'teams', ['team_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_jobdefinition_id_fkey', 'jobs',\n 'jobdefinitions', ['jobdefinition_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_remoteci_id_fkey', 'jobs', 'remotecis', [\n 'remoteci_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_previous_job_id_fkey', 'jobs', 'jobs', [\n 'previous_job_id'], ['id'])\n op.create_foreign_key('jobs_components_component_id_fkey',\n 'jobs_components', 'components', ['component_id'], ['id'], ondelete\n ='CASCADE')\n op.create_foreign_key('jobs_components_job_id_fkey', 'jobs_components',\n 'jobs', ['job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_issues_issue_id_fkey', 'jobs_issues',\n 'issues', ['issue_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_issues_job_id_fkey', 'jobs_issues', 'jobs',\n ['job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobstates_team_id_fkey', 'jobstates', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobstates_job_id_fkey', 'jobstates', 'jobs', [\n 'job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('logs_team_id_fkey', 'logs', 'teams', ['team_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('logs_user_id_fkey', 'logs', 'users', ['user_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('metas_job_id_fkey', 'metas', 'jobs', ['job_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('remoteci_tests_test_id_fkey', 'remoteci_tests',\n 'tests', ['test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('remoteci_tests_remoteci_id_fkey',\n 'remoteci_tests', 'remotecis', ['remoteci_id'], ['id'], ondelete=\n 'CASCADE')\n op.create_foreign_key('remotecis_team_id_fkey', 'remotecis', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('tests_team_id_fkey', 'tests', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topic_tests_test_id_fkey', 'topic_tests',\n 'tests', ['test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topic_tests_topic_id_fkey', 'topic_tests',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topics_next_topic_fkey', 'topics', 'topics', [\n 'next_topic'], ['id'])\n op.create_foreign_key('topics_teams_topic_id_fkey', 'topics_teams',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topics_teams_team_id_fkey', 'topics_teams',\n 'teams', ['team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('user_remotecis_user_id_fkey', 'user_remotecis',\n 'users', ['user_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('user_remotecis_remoteci_id_fkey',\n 'user_remotecis', 'remotecis', ['remoteci_id'], ['id'], ondelete=\n 'CASCADE')\n op.create_foreign_key('users_team_id_fkey', 'users', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n\n\ndef downgrade():\n pass\n",
"step-3": "<mask token>\nrevision = '1bb42ff54435'\ndown_revision = '6bbbf58ed9de'\nbranch_labels = None\ndepends_on = None\n<mask token>\n\n\ndef upgrade():\n op.drop_constraint('component_files_component_id_fkey', 'component_files')\n op.drop_constraint('components_topic_id_fkey', 'components')\n op.drop_constraint('files_job_id_fkey', 'files')\n op.drop_constraint('files_jobstate_id_fkey', 'files')\n op.drop_constraint('files_team_id_fkey', 'files')\n op.drop_constraint('files_test_id_fkey', 'files')\n op.drop_constraint('jobdefinition_tests_jobdefinition_id_fkey',\n 'jobdefinition_tests')\n op.drop_constraint('jobdefinition_tests_test_id_fkey',\n 'jobdefinition_tests')\n op.drop_constraint('jobdefinitions_topic_id_fkey', 'jobdefinitions')\n op.drop_constraint('jobs_team_id_fkey', 'jobs')\n op.drop_constraint('jobs_jobdefinition_id_fkey', 'jobs')\n op.drop_constraint('jobs_remoteci_id_fkey', 'jobs')\n op.drop_constraint('jobs_previous_job_id_fkey', 'jobs')\n op.drop_constraint('jobs_components_component_id_fkey', 'jobs_components')\n op.drop_constraint('jobs_components_job_id_fkey', 'jobs_components')\n op.drop_constraint('jobs_issues_issue_id_fkey', 'jobs_issues')\n op.drop_constraint('jobs_issues_job_id_fkey', 'jobs_issues')\n op.drop_constraint('jobstates_team_id_fkey', 'jobstates')\n op.drop_constraint('jobstates_job_id_fkey', 'jobstates')\n op.drop_constraint('logs_team_id_fkey', 'logs')\n op.drop_constraint('logs_user_id_fkey', 'logs')\n op.drop_constraint('metas_job_id_fkey', 'metas')\n op.drop_constraint('remoteci_tests_test_id_fkey', 'remoteci_tests')\n op.drop_constraint('remoteci_tests_remoteci_id_fkey', 'remoteci_tests')\n op.drop_constraint('remotecis_team_id_fkey', 'remotecis')\n op.drop_constraint('tests_team_id_fkey', 'tests')\n op.drop_constraint('topic_tests_test_id_fkey', 'topic_tests')\n op.drop_constraint('topic_tests_topic_id_fkey', 'topic_tests')\n op.drop_constraint('topics_next_topic_fkey', 'topics')\n op.drop_constraint('topics_teams_topic_id_fkey', 'topics_teams')\n op.drop_constraint('topics_teams_team_id_fkey', 'topics_teams')\n op.drop_constraint('user_remotecis_user_id_fkey', 'user_remotecis')\n op.drop_constraint('user_remotecis_remoteci_id_fkey', 'user_remotecis')\n op.drop_constraint('users_team_id_fkey', 'users')\n op.execute(\n 'ALTER TABLE component_files ALTER COLUMN component_id TYPE UUID USING component_id::uuid'\n )\n op.execute(\n 'ALTER TABLE component_files ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE components ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE components ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN jobstate_id TYPE UUID USING jobstate_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE issues ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinition_tests ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinition_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinitions ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinitions ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN previous_job_id TYPE UUID USING previous_job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_components ALTER COLUMN component_id TYPE UUID USING component_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_components ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_issues ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_issues ALTER COLUMN issue_id TYPE UUID USING issue_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobstates ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobstates ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobstates ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE logs ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE logs ALTER COLUMN user_id TYPE UUID USING user_id::uuid'\n )\n op.execute(\n 'ALTER TABLE logs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE metas ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE metas ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE remoteci_tests ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'\n )\n op.execute(\n 'ALTER TABLE remoteci_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE remotecis ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE remotecis ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE teams ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE tests ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE tests ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topic_tests ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topic_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topics ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE topics ALTER COLUMN next_topic TYPE UUID USING next_topic::uuid'\n )\n op.execute(\n 'ALTER TABLE topics_teams ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topics_teams ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE user_remotecis ALTER COLUMN user_id TYPE UUID USING user_id::uuid'\n )\n op.execute(\n 'ALTER TABLE user_remotecis ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'\n )\n op.execute(\n 'ALTER TABLE users ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE users ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.create_foreign_key('component_files_component_id_fkey',\n 'component_files', 'components', ['component_id'], ['id'], ondelete\n ='CASCADE')\n op.create_foreign_key('components_topic_id_fkey', 'components',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_job_id_fkey', 'files', 'jobs', ['job_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_jobstate_id_fkey', 'files', 'jobstates', [\n 'jobstate_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_team_id_fkey', 'files', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_test_id_fkey', 'files', 'tests', [\n 'test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobdefinition_tests_jobdefinition_id_fkey',\n 'jobdefinition_tests', 'jobdefinitions', ['jobdefinition_id'], [\n 'id'], ondelete='CASCADE')\n op.create_foreign_key('jobdefinition_tests_test_id_fkey',\n 'jobdefinition_tests', 'tests', ['test_id'], ['id'], ondelete='CASCADE'\n )\n op.create_foreign_key('jobdefinitions_topic_id_fkey', 'jobdefinitions',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_team_id_fkey', 'jobs', 'teams', ['team_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_jobdefinition_id_fkey', 'jobs',\n 'jobdefinitions', ['jobdefinition_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_remoteci_id_fkey', 'jobs', 'remotecis', [\n 'remoteci_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_previous_job_id_fkey', 'jobs', 'jobs', [\n 'previous_job_id'], ['id'])\n op.create_foreign_key('jobs_components_component_id_fkey',\n 'jobs_components', 'components', ['component_id'], ['id'], ondelete\n ='CASCADE')\n op.create_foreign_key('jobs_components_job_id_fkey', 'jobs_components',\n 'jobs', ['job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_issues_issue_id_fkey', 'jobs_issues',\n 'issues', ['issue_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_issues_job_id_fkey', 'jobs_issues', 'jobs',\n ['job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobstates_team_id_fkey', 'jobstates', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobstates_job_id_fkey', 'jobstates', 'jobs', [\n 'job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('logs_team_id_fkey', 'logs', 'teams', ['team_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('logs_user_id_fkey', 'logs', 'users', ['user_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('metas_job_id_fkey', 'metas', 'jobs', ['job_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('remoteci_tests_test_id_fkey', 'remoteci_tests',\n 'tests', ['test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('remoteci_tests_remoteci_id_fkey',\n 'remoteci_tests', 'remotecis', ['remoteci_id'], ['id'], ondelete=\n 'CASCADE')\n op.create_foreign_key('remotecis_team_id_fkey', 'remotecis', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('tests_team_id_fkey', 'tests', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topic_tests_test_id_fkey', 'topic_tests',\n 'tests', ['test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topic_tests_topic_id_fkey', 'topic_tests',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topics_next_topic_fkey', 'topics', 'topics', [\n 'next_topic'], ['id'])\n op.create_foreign_key('topics_teams_topic_id_fkey', 'topics_teams',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topics_teams_team_id_fkey', 'topics_teams',\n 'teams', ['team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('user_remotecis_user_id_fkey', 'user_remotecis',\n 'users', ['user_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('user_remotecis_remoteci_id_fkey',\n 'user_remotecis', 'remotecis', ['remoteci_id'], ['id'], ondelete=\n 'CASCADE')\n op.create_foreign_key('users_team_id_fkey', 'users', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n\n\ndef downgrade():\n pass\n",
"step-4": "<mask token>\nrevision = '1bb42ff54435'\ndown_revision = '6bbbf58ed9de'\nbranch_labels = None\ndepends_on = None\nfrom alembic import op\n\n\ndef upgrade():\n op.drop_constraint('component_files_component_id_fkey', 'component_files')\n op.drop_constraint('components_topic_id_fkey', 'components')\n op.drop_constraint('files_job_id_fkey', 'files')\n op.drop_constraint('files_jobstate_id_fkey', 'files')\n op.drop_constraint('files_team_id_fkey', 'files')\n op.drop_constraint('files_test_id_fkey', 'files')\n op.drop_constraint('jobdefinition_tests_jobdefinition_id_fkey',\n 'jobdefinition_tests')\n op.drop_constraint('jobdefinition_tests_test_id_fkey',\n 'jobdefinition_tests')\n op.drop_constraint('jobdefinitions_topic_id_fkey', 'jobdefinitions')\n op.drop_constraint('jobs_team_id_fkey', 'jobs')\n op.drop_constraint('jobs_jobdefinition_id_fkey', 'jobs')\n op.drop_constraint('jobs_remoteci_id_fkey', 'jobs')\n op.drop_constraint('jobs_previous_job_id_fkey', 'jobs')\n op.drop_constraint('jobs_components_component_id_fkey', 'jobs_components')\n op.drop_constraint('jobs_components_job_id_fkey', 'jobs_components')\n op.drop_constraint('jobs_issues_issue_id_fkey', 'jobs_issues')\n op.drop_constraint('jobs_issues_job_id_fkey', 'jobs_issues')\n op.drop_constraint('jobstates_team_id_fkey', 'jobstates')\n op.drop_constraint('jobstates_job_id_fkey', 'jobstates')\n op.drop_constraint('logs_team_id_fkey', 'logs')\n op.drop_constraint('logs_user_id_fkey', 'logs')\n op.drop_constraint('metas_job_id_fkey', 'metas')\n op.drop_constraint('remoteci_tests_test_id_fkey', 'remoteci_tests')\n op.drop_constraint('remoteci_tests_remoteci_id_fkey', 'remoteci_tests')\n op.drop_constraint('remotecis_team_id_fkey', 'remotecis')\n op.drop_constraint('tests_team_id_fkey', 'tests')\n op.drop_constraint('topic_tests_test_id_fkey', 'topic_tests')\n op.drop_constraint('topic_tests_topic_id_fkey', 'topic_tests')\n op.drop_constraint('topics_next_topic_fkey', 'topics')\n op.drop_constraint('topics_teams_topic_id_fkey', 'topics_teams')\n op.drop_constraint('topics_teams_team_id_fkey', 'topics_teams')\n op.drop_constraint('user_remotecis_user_id_fkey', 'user_remotecis')\n op.drop_constraint('user_remotecis_remoteci_id_fkey', 'user_remotecis')\n op.drop_constraint('users_team_id_fkey', 'users')\n op.execute(\n 'ALTER TABLE component_files ALTER COLUMN component_id TYPE UUID USING component_id::uuid'\n )\n op.execute(\n 'ALTER TABLE component_files ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE components ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE components ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN jobstate_id TYPE UUID USING jobstate_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE files ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE issues ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinition_tests ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinition_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinitions ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobdefinitions ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs ALTER COLUMN previous_job_id TYPE UUID USING previous_job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_components ALTER COLUMN component_id TYPE UUID USING component_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_components ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_issues ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobs_issues ALTER COLUMN issue_id TYPE UUID USING issue_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobstates ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobstates ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE jobstates ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE logs ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE logs ALTER COLUMN user_id TYPE UUID USING user_id::uuid'\n )\n op.execute(\n 'ALTER TABLE logs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE metas ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE metas ALTER COLUMN job_id TYPE UUID USING job_id::uuid'\n )\n op.execute(\n 'ALTER TABLE remoteci_tests ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'\n )\n op.execute(\n 'ALTER TABLE remoteci_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE remotecis ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE remotecis ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE teams ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE tests ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE tests ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topic_tests ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topic_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topics ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE topics ALTER COLUMN next_topic TYPE UUID USING next_topic::uuid'\n )\n op.execute(\n 'ALTER TABLE topics_teams ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'\n )\n op.execute(\n 'ALTER TABLE topics_teams ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.execute(\n 'ALTER TABLE user_remotecis ALTER COLUMN user_id TYPE UUID USING user_id::uuid'\n )\n op.execute(\n 'ALTER TABLE user_remotecis ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'\n )\n op.execute(\n 'ALTER TABLE users ALTER COLUMN id TYPE UUID USING id::uuid'\n )\n op.execute(\n 'ALTER TABLE users ALTER COLUMN team_id TYPE UUID USING team_id::uuid'\n )\n op.create_foreign_key('component_files_component_id_fkey',\n 'component_files', 'components', ['component_id'], ['id'], ondelete\n ='CASCADE')\n op.create_foreign_key('components_topic_id_fkey', 'components',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_job_id_fkey', 'files', 'jobs', ['job_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_jobstate_id_fkey', 'files', 'jobstates', [\n 'jobstate_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_team_id_fkey', 'files', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_test_id_fkey', 'files', 'tests', [\n 'test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobdefinition_tests_jobdefinition_id_fkey',\n 'jobdefinition_tests', 'jobdefinitions', ['jobdefinition_id'], [\n 'id'], ondelete='CASCADE')\n op.create_foreign_key('jobdefinition_tests_test_id_fkey',\n 'jobdefinition_tests', 'tests', ['test_id'], ['id'], ondelete='CASCADE'\n )\n op.create_foreign_key('jobdefinitions_topic_id_fkey', 'jobdefinitions',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_team_id_fkey', 'jobs', 'teams', ['team_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_jobdefinition_id_fkey', 'jobs',\n 'jobdefinitions', ['jobdefinition_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_remoteci_id_fkey', 'jobs', 'remotecis', [\n 'remoteci_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_previous_job_id_fkey', 'jobs', 'jobs', [\n 'previous_job_id'], ['id'])\n op.create_foreign_key('jobs_components_component_id_fkey',\n 'jobs_components', 'components', ['component_id'], ['id'], ondelete\n ='CASCADE')\n op.create_foreign_key('jobs_components_job_id_fkey', 'jobs_components',\n 'jobs', ['job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_issues_issue_id_fkey', 'jobs_issues',\n 'issues', ['issue_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_issues_job_id_fkey', 'jobs_issues', 'jobs',\n ['job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobstates_team_id_fkey', 'jobstates', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobstates_job_id_fkey', 'jobstates', 'jobs', [\n 'job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('logs_team_id_fkey', 'logs', 'teams', ['team_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('logs_user_id_fkey', 'logs', 'users', ['user_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('metas_job_id_fkey', 'metas', 'jobs', ['job_id'],\n ['id'], ondelete='CASCADE')\n op.create_foreign_key('remoteci_tests_test_id_fkey', 'remoteci_tests',\n 'tests', ['test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('remoteci_tests_remoteci_id_fkey',\n 'remoteci_tests', 'remotecis', ['remoteci_id'], ['id'], ondelete=\n 'CASCADE')\n op.create_foreign_key('remotecis_team_id_fkey', 'remotecis', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('tests_team_id_fkey', 'tests', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topic_tests_test_id_fkey', 'topic_tests',\n 'tests', ['test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topic_tests_topic_id_fkey', 'topic_tests',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topics_next_topic_fkey', 'topics', 'topics', [\n 'next_topic'], ['id'])\n op.create_foreign_key('topics_teams_topic_id_fkey', 'topics_teams',\n 'topics', ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topics_teams_team_id_fkey', 'topics_teams',\n 'teams', ['team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('user_remotecis_user_id_fkey', 'user_remotecis',\n 'users', ['user_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('user_remotecis_remoteci_id_fkey',\n 'user_remotecis', 'remotecis', ['remoteci_id'], ['id'], ondelete=\n 'CASCADE')\n op.create_foreign_key('users_team_id_fkey', 'users', 'teams', [\n 'team_id'], ['id'], ondelete='CASCADE')\n\n\ndef downgrade():\n pass\n",
"step-5": "#\n# Copyright (C) 2017 Red Hat, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Change varchar ID to UUID\n\nRevision ID: 1bb42ff54435\nRevises: 6bbbf58ed9de\nCreate Date: 2017-02-07 09:28:37.493302\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1bb42ff54435'\ndown_revision = '6bbbf58ed9de'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\n\n\ndef upgrade():\n # Drop constraint\n op.drop_constraint('component_files_component_id_fkey', 'component_files')\n op.drop_constraint('components_topic_id_fkey', 'components')\n op.drop_constraint('files_job_id_fkey', 'files')\n op.drop_constraint('files_jobstate_id_fkey', 'files')\n op.drop_constraint('files_team_id_fkey', 'files')\n op.drop_constraint('files_test_id_fkey', 'files')\n op.drop_constraint('jobdefinition_tests_jobdefinition_id_fkey',\n 'jobdefinition_tests')\n op.drop_constraint('jobdefinition_tests_test_id_fkey',\n 'jobdefinition_tests')\n op.drop_constraint('jobdefinitions_topic_id_fkey', 'jobdefinitions')\n op.drop_constraint('jobs_team_id_fkey', 'jobs')\n op.drop_constraint('jobs_jobdefinition_id_fkey', 'jobs')\n op.drop_constraint('jobs_remoteci_id_fkey', 'jobs')\n op.drop_constraint('jobs_previous_job_id_fkey', 'jobs')\n op.drop_constraint('jobs_components_component_id_fkey', 'jobs_components')\n op.drop_constraint('jobs_components_job_id_fkey', 'jobs_components')\n op.drop_constraint('jobs_issues_issue_id_fkey', 'jobs_issues')\n op.drop_constraint('jobs_issues_job_id_fkey', 'jobs_issues')\n op.drop_constraint('jobstates_team_id_fkey', 'jobstates')\n op.drop_constraint('jobstates_job_id_fkey', 'jobstates')\n op.drop_constraint('logs_team_id_fkey', 'logs')\n op.drop_constraint('logs_user_id_fkey', 'logs')\n op.drop_constraint('metas_job_id_fkey', 'metas')\n op.drop_constraint('remoteci_tests_test_id_fkey', 'remoteci_tests')\n op.drop_constraint('remoteci_tests_remoteci_id_fkey', 'remoteci_tests')\n op.drop_constraint('remotecis_team_id_fkey', 'remotecis')\n op.drop_constraint('tests_team_id_fkey', 'tests')\n op.drop_constraint('topic_tests_test_id_fkey', 'topic_tests')\n op.drop_constraint('topic_tests_topic_id_fkey', 'topic_tests')\n op.drop_constraint('topics_next_topic_fkey', 'topics')\n op.drop_constraint('topics_teams_topic_id_fkey', 'topics_teams')\n op.drop_constraint('topics_teams_team_id_fkey', 'topics_teams')\n op.drop_constraint('user_remotecis_user_id_fkey', 'user_remotecis')\n op.drop_constraint('user_remotecis_remoteci_id_fkey', 'user_remotecis')\n op.drop_constraint('users_team_id_fkey', 'users')\n\n # Change type\n # Table component_files\n op.execute(\"ALTER TABLE component_files ALTER COLUMN component_id TYPE \\\n UUID USING component_id::uuid\")\n op.execute(\"ALTER TABLE component_files ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n\n # Table components\n op.execute(\"ALTER TABLE components ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n op.execute(\"ALTER TABLE components ALTER COLUMN topic_id TYPE \\\n UUID USING topic_id::uuid\")\n\n # Table files\n op.execute(\"ALTER TABLE files ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n op.execute(\"ALTER TABLE files ALTER COLUMN jobstate_id TYPE \\\n UUID USING jobstate_id::uuid\")\n op.execute(\"ALTER TABLE files ALTER COLUMN team_id TYPE \\\n UUID USING team_id::uuid\")\n op.execute(\"ALTER TABLE files ALTER COLUMN job_id TYPE \\\n UUID USING job_id::uuid\")\n op.execute(\"ALTER TABLE files ALTER COLUMN test_id TYPE \\\n UUID USING test_id::uuid\")\n\n # Table issues\n op.execute(\"ALTER TABLE issues ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n\n # Table jobdefinition_tests\n op.execute(\"ALTER TABLE jobdefinition_tests ALTER COLUMN jobdefinition_id \\\n TYPE UUID USING jobdefinition_id::uuid\")\n op.execute(\"ALTER TABLE jobdefinition_tests ALTER COLUMN test_id TYPE \\\n UUID USING test_id::uuid\")\n\n # Table jobdefinitions\n op.execute(\"ALTER TABLE jobdefinitions ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n op.execute(\"ALTER TABLE jobdefinitions ALTER COLUMN topic_id TYPE \\\n UUID USING topic_id::uuid\")\n\n # Table jobs\n op.execute(\"ALTER TABLE jobs ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n op.execute(\"ALTER TABLE jobs ALTER COLUMN jobdefinition_id TYPE \\\n UUID USING jobdefinition_id::uuid\")\n op.execute(\"ALTER TABLE jobs ALTER COLUMN remoteci_id TYPE \\\n UUID USING remoteci_id::uuid\")\n op.execute(\"ALTER TABLE jobs ALTER COLUMN team_id TYPE \\\n UUID USING team_id::uuid\")\n op.execute(\"ALTER TABLE jobs ALTER COLUMN previous_job_id TYPE \\\n UUID USING previous_job_id::uuid\")\n\n # Table jobs_components\n op.execute(\"ALTER TABLE jobs_components ALTER COLUMN component_id TYPE \\\n UUID USING component_id::uuid\")\n op.execute(\"ALTER TABLE jobs_components ALTER COLUMN job_id TYPE \\\n UUID USING job_id::uuid\")\n\n # Table jobs_issues\n op.execute(\"ALTER TABLE jobs_issues ALTER COLUMN job_id TYPE \\\n UUID USING job_id::uuid\")\n op.execute(\"ALTER TABLE jobs_issues ALTER COLUMN issue_id TYPE \\\n UUID USING issue_id::uuid\")\n\n # Table jobstates\n op.execute(\"ALTER TABLE jobstates ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n op.execute(\"ALTER TABLE jobstates ALTER COLUMN job_id TYPE \\\n UUID USING job_id::uuid\")\n op.execute(\"ALTER TABLE jobstates ALTER COLUMN team_id TYPE \\\n UUID USING team_id::uuid\")\n\n # Table logs\n op.execute(\"ALTER TABLE logs ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n op.execute(\"ALTER TABLE logs ALTER COLUMN user_id TYPE \\\n UUID USING user_id::uuid\")\n op.execute(\"ALTER TABLE logs ALTER COLUMN team_id TYPE \\\n UUID USING team_id::uuid\")\n\n # Table metas\n op.execute(\"ALTER TABLE metas ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n op.execute(\"ALTER TABLE metas ALTER COLUMN job_id TYPE \\\n UUID USING job_id::uuid\")\n\n # Table remoteci_tests\n op.execute(\"ALTER TABLE remoteci_tests ALTER COLUMN remoteci_id TYPE \\\n UUID USING remoteci_id::uuid\")\n op.execute(\"ALTER TABLE remoteci_tests ALTER COLUMN test_id TYPE \\\n UUID USING test_id::uuid\")\n\n # Table remotecis\n op.execute(\"ALTER TABLE remotecis ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n op.execute(\"ALTER TABLE remotecis ALTER COLUMN team_id TYPE \\\n UUID USING team_id::uuid\")\n\n # Table teams\n op.execute(\"ALTER TABLE teams ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n\n # Table tests\n op.execute(\"ALTER TABLE tests ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n op.execute(\"ALTER TABLE tests ALTER COLUMN team_id TYPE \\\n UUID USING team_id::uuid\")\n\n # Table topic_tests\n op.execute(\"ALTER TABLE topic_tests ALTER COLUMN topic_id TYPE \\\n UUID USING topic_id::uuid\")\n op.execute(\"ALTER TABLE topic_tests ALTER COLUMN test_id TYPE \\\n UUID USING test_id::uuid\")\n\n # Table topics\n op.execute(\"ALTER TABLE topics ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n op.execute(\"ALTER TABLE topics ALTER COLUMN next_topic TYPE \\\n UUID USING next_topic::uuid\")\n\n # Table topics_teams\n op.execute(\"ALTER TABLE topics_teams ALTER COLUMN topic_id TYPE \\\n UUID USING topic_id::uuid\")\n op.execute(\"ALTER TABLE topics_teams ALTER COLUMN team_id TYPE \\\n UUID USING team_id::uuid\")\n\n # Table user_remotecis\n op.execute(\"ALTER TABLE user_remotecis ALTER COLUMN user_id TYPE \\\n UUID USING user_id::uuid\")\n op.execute(\"ALTER TABLE user_remotecis ALTER COLUMN remoteci_id TYPE \\\n UUID USING remoteci_id::uuid\")\n\n # Table users\n op.execute(\"ALTER TABLE users ALTER COLUMN id TYPE \\\n UUID USING id::uuid\")\n op.execute(\"ALTER TABLE users ALTER COLUMN team_id TYPE \\\n UUID USING team_id::uuid\")\n\n # Re-Create constraint\n op.create_foreign_key('component_files_component_id_fkey',\n 'component_files', 'components',\n ['component_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('components_topic_id_fkey',\n 'components', 'topics',\n ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_job_id_fkey',\n 'files', 'jobs',\n ['job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_jobstate_id_fkey',\n 'files', 'jobstates',\n ['jobstate_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_team_id_fkey',\n 'files', 'teams',\n ['team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('files_test_id_fkey',\n 'files', 'tests',\n ['test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobdefinition_tests_jobdefinition_id_fkey',\n 'jobdefinition_tests', 'jobdefinitions',\n ['jobdefinition_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobdefinition_tests_test_id_fkey',\n 'jobdefinition_tests', 'tests',\n ['test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobdefinitions_topic_id_fkey',\n 'jobdefinitions', 'topics',\n ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_team_id_fkey',\n 'jobs', 'teams',\n ['team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_jobdefinition_id_fkey',\n 'jobs', 'jobdefinitions',\n ['jobdefinition_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_remoteci_id_fkey',\n 'jobs', 'remotecis',\n ['remoteci_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_previous_job_id_fkey',\n 'jobs', 'jobs',\n ['previous_job_id'], ['id'])\n op.create_foreign_key('jobs_components_component_id_fkey',\n 'jobs_components', 'components',\n ['component_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_components_job_id_fkey',\n 'jobs_components', 'jobs',\n ['job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_issues_issue_id_fkey',\n 'jobs_issues', 'issues',\n ['issue_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobs_issues_job_id_fkey',\n 'jobs_issues', 'jobs',\n ['job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobstates_team_id_fkey',\n 'jobstates', 'teams',\n ['team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('jobstates_job_id_fkey',\n 'jobstates', 'jobs',\n ['job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('logs_team_id_fkey',\n 'logs', 'teams',\n ['team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('logs_user_id_fkey',\n 'logs', 'users',\n ['user_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('metas_job_id_fkey',\n 'metas', 'jobs',\n ['job_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('remoteci_tests_test_id_fkey',\n 'remoteci_tests', 'tests',\n ['test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('remoteci_tests_remoteci_id_fkey',\n 'remoteci_tests', 'remotecis',\n ['remoteci_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('remotecis_team_id_fkey',\n 'remotecis', 'teams',\n ['team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('tests_team_id_fkey',\n 'tests', 'teams',\n ['team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topic_tests_test_id_fkey',\n 'topic_tests', 'tests',\n ['test_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topic_tests_topic_id_fkey',\n 'topic_tests', 'topics',\n ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topics_next_topic_fkey',\n 'topics', 'topics',\n ['next_topic'], ['id'])\n op.create_foreign_key('topics_teams_topic_id_fkey',\n 'topics_teams', 'topics',\n ['topic_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('topics_teams_team_id_fkey',\n 'topics_teams', 'teams',\n ['team_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('user_remotecis_user_id_fkey',\n 'user_remotecis', 'users',\n ['user_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('user_remotecis_remoteci_id_fkey',\n 'user_remotecis', 'remotecis',\n ['remoteci_id'], ['id'], ondelete='CASCADE')\n op.create_foreign_key('users_team_id_fkey',\n 'users', 'teams',\n ['team_id'], ['id'], ondelete='CASCADE')\n\n\ndef downgrade():\n pass\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def classify(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
diffMat = tile(inX, (dataSetSize, 1)) - dataSet
sqDiffMat = diffMat ** 2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances ** 0.5
sortedDistIndicies = distances.argsort()
classCount = {}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1
), reverse=True)
return sortedClassCount[0][0]
<|reserved_special_token_0|>
def loadTrainingSet(dir_trainingSet):
print('把trainingDigits文件夹里的所有训练集导入')
trainingFileList = listdir(dir_trainingSet)
m = len(trainingFileList)
trainingMat = zeros((m, 1024))
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
trainingMat[i, :] = img2vector(dir_trainingSet + '/%s' % fileNameStr)
return hwLabels, trainingMat
<|reserved_special_token_0|>
def changeImg2Text(filePath, filename):
fileNameStr = filename.split('\\')[-1].split('.')[0] + '.txt'
fr = open(fileNameStr, 'w')
"""
https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe
"""
im = Image.open(BytesIO(filePath))
im2 = im.resize((32, 32), Image.ANTIALIAS)
img = array(im2)
print(img.shape, Image.ANTIALIAS)
m, n = img.shape[:2]
for i in range(m):
for j in range(n):
R, G, B = img[i, j, :]
"""
这部分的颜色用 PhotoShop 取色器,调参。
RGB的值选择 白色点 和 目标颜色点的中点的RGB
"""
if R < 185 and G < 100 and B < 100:
fr.write('1')
else:
fr.write('0')
fr.write('\n')
fr.close()
return fileNameStr
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def classify(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
diffMat = tile(inX, (dataSetSize, 1)) - dataSet
sqDiffMat = diffMat ** 2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances ** 0.5
sortedDistIndicies = distances.argsort()
classCount = {}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1
), reverse=True)
return sortedClassCount[0][0]
def img2vector(filename):
returnVect = zeros((1, 1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0, 32 * i + j] = int(lineStr[j])
return returnVect
<|reserved_special_token_0|>
def loadTrainingSet(dir_trainingSet):
print('把trainingDigits文件夹里的所有训练集导入')
trainingFileList = listdir(dir_trainingSet)
m = len(trainingFileList)
trainingMat = zeros((m, 1024))
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
trainingMat[i, :] = img2vector(dir_trainingSet + '/%s' % fileNameStr)
return hwLabels, trainingMat
def getResult(filename, trainingDigits):
"""
filename 测试集dir
trainingDigits 训练集dir
"""
hwLabels, trainingMat = loadTrainingSet(trainingDigits)
with open(filename, 'rb') as f:
filePath = f.read()
fileNameStr = changeImg2Text(filePath, filename)
inputVect = img2vector(fileNameStr)
classifierResult = classify(inputVect, trainingMat, hwLabels, 3)
print('预测手写数字识别为:', classifierResult)
return classifierResult
def changeImg2Text(filePath, filename):
fileNameStr = filename.split('\\')[-1].split('.')[0] + '.txt'
fr = open(fileNameStr, 'w')
"""
https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe
"""
im = Image.open(BytesIO(filePath))
im2 = im.resize((32, 32), Image.ANTIALIAS)
img = array(im2)
print(img.shape, Image.ANTIALIAS)
m, n = img.shape[:2]
for i in range(m):
for j in range(n):
R, G, B = img[i, j, :]
"""
这部分的颜色用 PhotoShop 取色器,调参。
RGB的值选择 白色点 和 目标颜色点的中点的RGB
"""
if R < 185 and G < 100 and B < 100:
fr.write('1')
else:
fr.write('0')
fr.write('\n')
fr.close()
return fileNameStr
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def classify(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
diffMat = tile(inX, (dataSetSize, 1)) - dataSet
sqDiffMat = diffMat ** 2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances ** 0.5
sortedDistIndicies = distances.argsort()
classCount = {}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1
), reverse=True)
return sortedClassCount[0][0]
def img2vector(filename):
returnVect = zeros((1, 1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0, 32 * i + j] = int(lineStr[j])
return returnVect
<|reserved_special_token_0|>
hwLabels, trainingMat = [], []
def loadTrainingSet(dir_trainingSet):
print('把trainingDigits文件夹里的所有训练集导入')
trainingFileList = listdir(dir_trainingSet)
m = len(trainingFileList)
trainingMat = zeros((m, 1024))
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
trainingMat[i, :] = img2vector(dir_trainingSet + '/%s' % fileNameStr)
return hwLabels, trainingMat
def getResult(filename, trainingDigits):
"""
filename 测试集dir
trainingDigits 训练集dir
"""
hwLabels, trainingMat = loadTrainingSet(trainingDigits)
with open(filename, 'rb') as f:
filePath = f.read()
fileNameStr = changeImg2Text(filePath, filename)
inputVect = img2vector(fileNameStr)
classifierResult = classify(inputVect, trainingMat, hwLabels, 3)
print('预测手写数字识别为:', classifierResult)
return classifierResult
def changeImg2Text(filePath, filename):
fileNameStr = filename.split('\\')[-1].split('.')[0] + '.txt'
fr = open(fileNameStr, 'w')
"""
https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe
"""
im = Image.open(BytesIO(filePath))
im2 = im.resize((32, 32), Image.ANTIALIAS)
img = array(im2)
print(img.shape, Image.ANTIALIAS)
m, n = img.shape[:2]
for i in range(m):
for j in range(n):
R, G, B = img[i, j, :]
"""
这部分的颜色用 PhotoShop 取色器,调参。
RGB的值选择 白色点 和 目标颜色点的中点的RGB
"""
if R < 185 and G < 100 and B < 100:
fr.write('1')
else:
fr.write('0')
fr.write('\n')
fr.close()
return fileNameStr
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import operator
from numpy import *
from PIL import Image
from os import listdir
from io import BytesIO
def classify(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
diffMat = tile(inX, (dataSetSize, 1)) - dataSet
sqDiffMat = diffMat ** 2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances ** 0.5
sortedDistIndicies = distances.argsort()
classCount = {}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1
), reverse=True)
return sortedClassCount[0][0]
def img2vector(filename):
returnVect = zeros((1, 1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0, 32 * i + j] = int(lineStr[j])
return returnVect
<|reserved_special_token_0|>
hwLabels, trainingMat = [], []
def loadTrainingSet(dir_trainingSet):
print('把trainingDigits文件夹里的所有训练集导入')
trainingFileList = listdir(dir_trainingSet)
m = len(trainingFileList)
trainingMat = zeros((m, 1024))
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
trainingMat[i, :] = img2vector(dir_trainingSet + '/%s' % fileNameStr)
return hwLabels, trainingMat
def getResult(filename, trainingDigits):
"""
filename 测试集dir
trainingDigits 训练集dir
"""
hwLabels, trainingMat = loadTrainingSet(trainingDigits)
with open(filename, 'rb') as f:
filePath = f.read()
fileNameStr = changeImg2Text(filePath, filename)
inputVect = img2vector(fileNameStr)
classifierResult = classify(inputVect, trainingMat, hwLabels, 3)
print('预测手写数字识别为:', classifierResult)
return classifierResult
def changeImg2Text(filePath, filename):
fileNameStr = filename.split('\\')[-1].split('.')[0] + '.txt'
fr = open(fileNameStr, 'w')
"""
https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe
"""
im = Image.open(BytesIO(filePath))
im2 = im.resize((32, 32), Image.ANTIALIAS)
img = array(im2)
print(img.shape, Image.ANTIALIAS)
m, n = img.shape[:2]
for i in range(m):
for j in range(n):
R, G, B = img[i, j, :]
"""
这部分的颜色用 PhotoShop 取色器,调参。
RGB的值选择 白色点 和 目标颜色点的中点的RGB
"""
if R < 185 and G < 100 and B < 100:
fr.write('1')
else:
fr.write('0')
fr.write('\n')
fr.close()
return fileNameStr
<|reserved_special_token_1|>
'''
手写识别系统
构建识别类
Recognize
调用getResult()函数即可
'''
import operator
from numpy import *
from PIL import Image
from os import listdir
from io import BytesIO
def classify(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0] #训练数据集的行数
# 计算距离
diffMat = tile(inX, (dataSetSize,1)) - dataSet
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
# 返还距离排序的索引
sortedDistIndicies = distances.argsort()
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
sortedClassCount = sorted(classCount.items(),
key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
# 将图片转化为行向量
def img2vector(filename):
returnVect = zeros((1,1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0,32*i+j] = int(lineStr[j])
return returnVect
'''
如何让加载训练集值运行一次?
'''
hwLabels , trainingMat = [] , []
def loadTrainingSet(dir_trainingSet):
print('把trainingDigits文件夹里的所有训练集导入')
#把trainingDigits文件夹里的所有训练集导入
trainingFileList = listdir(dir_trainingSet)
#print(trainingFileList)
m = len(trainingFileList)
trainingMat = zeros((m,1024)) # 初始化训练矩阵
for i in range(m):
# 此三步,将所有训练集的名称分割只取出第一个
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
# 得到一个由训练集 名称首个number的矩阵
hwLabels.append(classNumStr)
# 每一个 训练集的 txt 都转成一个 1行1025列的向量
trainingMat[i,:] = img2vector(dir_trainingSet+'/%s' % fileNameStr)
return hwLabels , trainingMat
def getResult(filename,trainingDigits):
'''
filename 测试集dir
trainingDigits 训练集dir
'''
hwLabels , trainingMat = loadTrainingSet(trainingDigits)
# 为输入的数字图片分类,读取图片为
with open(filename, 'rb') as f:
filePath = f.read()
# 此时 filePath 是十六进制字节 如: \x7f\x12\xdf
fileNameStr = changeImg2Text(filePath,filename)
inputVect = img2vector(fileNameStr)
classifierResult = classify(inputVect, trainingMat, hwLabels, 3)
print( '预测手写数字识别为:',classifierResult)
return classifierResult
# 原demo里有这句话,可以这句话,会将预测的图片失效,暂注释 保留
#with open(filename, 'w') as f:
# f.write(str(classifierResult))
# 处理初始图形
def changeImg2Text(filePath,filename):
# 就是字符串 \ 分割后(其中 \\ 是加了转译),取最后一个 2.jpg,再 以 . 分割取 名字
fileNameStr = filename.split('\\')[-1].split('.')[0] + '.txt'
fr = open(fileNameStr, 'w')
#读图片转矩阵,Python 3 要加 BytesIO(filePath)
'''
https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe
'''
im = Image.open(BytesIO(filePath))
#print(im) # <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=206x376 at 0x8D99C50>
im2 = im.resize((32, 32), Image.ANTIALIAS)
img = array(im2)
print( img.shape , Image.ANTIALIAS )
m, n = img.shape[:2]
for i in range(m):
for j in range(n):
R, G, B = img[i, j, :]
# 因为,图片首先要 处理成灰度图,所以根据,灰度进而识别
'''
这部分的颜色用 PhotoShop 取色器,调参。
RGB的值选择 白色点 和 目标颜色点的中点的RGB
'''
#if R < 40 and G < 40 and B < 40: # 这些参数时对于黑白色的区分
#if R < 245 and G < 153 and B < 120: # 对 0 文件里,橙色图片的划分
if R < 185 and G < 100 and B < 100: # 对 2 文件里,灰色图片的划分
fr.write('1')
else:
fr.write('0')
fr.write('\n')
fr.close()
return fileNameStr
|
flexible
|
{
"blob_id": "1ab5147ed8ce808de9667052b6d17f320d62484f",
"index": 4694,
"step-1": "<mask token>\n\n\ndef classify(inX, dataSet, labels, k):\n dataSetSize = dataSet.shape[0]\n diffMat = tile(inX, (dataSetSize, 1)) - dataSet\n sqDiffMat = diffMat ** 2\n sqDistances = sqDiffMat.sum(axis=1)\n distances = sqDistances ** 0.5\n sortedDistIndicies = distances.argsort()\n classCount = {}\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1\n ), reverse=True)\n return sortedClassCount[0][0]\n\n\n<mask token>\n\n\ndef loadTrainingSet(dir_trainingSet):\n print('把trainingDigits文件夹里的所有训练集导入')\n trainingFileList = listdir(dir_trainingSet)\n m = len(trainingFileList)\n trainingMat = zeros((m, 1024))\n for i in range(m):\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n hwLabels.append(classNumStr)\n trainingMat[i, :] = img2vector(dir_trainingSet + '/%s' % fileNameStr)\n return hwLabels, trainingMat\n\n\n<mask token>\n\n\ndef changeImg2Text(filePath, filename):\n fileNameStr = filename.split('\\\\')[-1].split('.')[0] + '.txt'\n fr = open(fileNameStr, 'w')\n \"\"\"\n https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe\n \"\"\"\n im = Image.open(BytesIO(filePath))\n im2 = im.resize((32, 32), Image.ANTIALIAS)\n img = array(im2)\n print(img.shape, Image.ANTIALIAS)\n m, n = img.shape[:2]\n for i in range(m):\n for j in range(n):\n R, G, B = img[i, j, :]\n \"\"\"\n 这部分的颜色用 PhotoShop 取色器,调参。\n RGB的值选择 白色点 和 目标颜色点的中点的RGB\n \"\"\"\n if R < 185 and G < 100 and B < 100:\n fr.write('1')\n else:\n fr.write('0')\n fr.write('\\n')\n fr.close()\n return fileNameStr\n",
"step-2": "<mask token>\n\n\ndef classify(inX, dataSet, labels, k):\n dataSetSize = dataSet.shape[0]\n diffMat = tile(inX, (dataSetSize, 1)) - dataSet\n sqDiffMat = diffMat ** 2\n sqDistances = sqDiffMat.sum(axis=1)\n distances = sqDistances ** 0.5\n sortedDistIndicies = distances.argsort()\n classCount = {}\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1\n ), reverse=True)\n return sortedClassCount[0][0]\n\n\ndef img2vector(filename):\n returnVect = zeros((1, 1024))\n fr = open(filename)\n for i in range(32):\n lineStr = fr.readline()\n for j in range(32):\n returnVect[0, 32 * i + j] = int(lineStr[j])\n return returnVect\n\n\n<mask token>\n\n\ndef loadTrainingSet(dir_trainingSet):\n print('把trainingDigits文件夹里的所有训练集导入')\n trainingFileList = listdir(dir_trainingSet)\n m = len(trainingFileList)\n trainingMat = zeros((m, 1024))\n for i in range(m):\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n hwLabels.append(classNumStr)\n trainingMat[i, :] = img2vector(dir_trainingSet + '/%s' % fileNameStr)\n return hwLabels, trainingMat\n\n\ndef getResult(filename, trainingDigits):\n \"\"\"\n filename 测试集dir\n trainingDigits 训练集dir\n \"\"\"\n hwLabels, trainingMat = loadTrainingSet(trainingDigits)\n with open(filename, 'rb') as f:\n filePath = f.read()\n fileNameStr = changeImg2Text(filePath, filename)\n inputVect = img2vector(fileNameStr)\n classifierResult = classify(inputVect, trainingMat, hwLabels, 3)\n print('预测手写数字识别为:', classifierResult)\n return classifierResult\n\n\ndef changeImg2Text(filePath, filename):\n fileNameStr = filename.split('\\\\')[-1].split('.')[0] + '.txt'\n fr = open(fileNameStr, 'w')\n \"\"\"\n https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe\n \"\"\"\n im = Image.open(BytesIO(filePath))\n im2 = im.resize((32, 32), Image.ANTIALIAS)\n img = array(im2)\n print(img.shape, Image.ANTIALIAS)\n m, n = img.shape[:2]\n for i in range(m):\n for j in range(n):\n R, G, B = img[i, j, :]\n \"\"\"\n 这部分的颜色用 PhotoShop 取色器,调参。\n RGB的值选择 白色点 和 目标颜色点的中点的RGB\n \"\"\"\n if R < 185 and G < 100 and B < 100:\n fr.write('1')\n else:\n fr.write('0')\n fr.write('\\n')\n fr.close()\n return fileNameStr\n",
"step-3": "<mask token>\n\n\ndef classify(inX, dataSet, labels, k):\n dataSetSize = dataSet.shape[0]\n diffMat = tile(inX, (dataSetSize, 1)) - dataSet\n sqDiffMat = diffMat ** 2\n sqDistances = sqDiffMat.sum(axis=1)\n distances = sqDistances ** 0.5\n sortedDistIndicies = distances.argsort()\n classCount = {}\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1\n ), reverse=True)\n return sortedClassCount[0][0]\n\n\ndef img2vector(filename):\n returnVect = zeros((1, 1024))\n fr = open(filename)\n for i in range(32):\n lineStr = fr.readline()\n for j in range(32):\n returnVect[0, 32 * i + j] = int(lineStr[j])\n return returnVect\n\n\n<mask token>\nhwLabels, trainingMat = [], []\n\n\ndef loadTrainingSet(dir_trainingSet):\n print('把trainingDigits文件夹里的所有训练集导入')\n trainingFileList = listdir(dir_trainingSet)\n m = len(trainingFileList)\n trainingMat = zeros((m, 1024))\n for i in range(m):\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n hwLabels.append(classNumStr)\n trainingMat[i, :] = img2vector(dir_trainingSet + '/%s' % fileNameStr)\n return hwLabels, trainingMat\n\n\ndef getResult(filename, trainingDigits):\n \"\"\"\n filename 测试集dir\n trainingDigits 训练集dir\n \"\"\"\n hwLabels, trainingMat = loadTrainingSet(trainingDigits)\n with open(filename, 'rb') as f:\n filePath = f.read()\n fileNameStr = changeImg2Text(filePath, filename)\n inputVect = img2vector(fileNameStr)\n classifierResult = classify(inputVect, trainingMat, hwLabels, 3)\n print('预测手写数字识别为:', classifierResult)\n return classifierResult\n\n\ndef changeImg2Text(filePath, filename):\n fileNameStr = filename.split('\\\\')[-1].split('.')[0] + '.txt'\n fr = open(fileNameStr, 'w')\n \"\"\"\n https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe\n \"\"\"\n im = Image.open(BytesIO(filePath))\n im2 = im.resize((32, 32), Image.ANTIALIAS)\n img = array(im2)\n print(img.shape, Image.ANTIALIAS)\n m, n = img.shape[:2]\n for i in range(m):\n for j in range(n):\n R, G, B = img[i, j, :]\n \"\"\"\n 这部分的颜色用 PhotoShop 取色器,调参。\n RGB的值选择 白色点 和 目标颜色点的中点的RGB\n \"\"\"\n if R < 185 and G < 100 and B < 100:\n fr.write('1')\n else:\n fr.write('0')\n fr.write('\\n')\n fr.close()\n return fileNameStr\n",
"step-4": "<mask token>\nimport operator\nfrom numpy import *\nfrom PIL import Image\nfrom os import listdir\nfrom io import BytesIO\n\n\ndef classify(inX, dataSet, labels, k):\n dataSetSize = dataSet.shape[0]\n diffMat = tile(inX, (dataSetSize, 1)) - dataSet\n sqDiffMat = diffMat ** 2\n sqDistances = sqDiffMat.sum(axis=1)\n distances = sqDistances ** 0.5\n sortedDistIndicies = distances.argsort()\n classCount = {}\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1\n ), reverse=True)\n return sortedClassCount[0][0]\n\n\ndef img2vector(filename):\n returnVect = zeros((1, 1024))\n fr = open(filename)\n for i in range(32):\n lineStr = fr.readline()\n for j in range(32):\n returnVect[0, 32 * i + j] = int(lineStr[j])\n return returnVect\n\n\n<mask token>\nhwLabels, trainingMat = [], []\n\n\ndef loadTrainingSet(dir_trainingSet):\n print('把trainingDigits文件夹里的所有训练集导入')\n trainingFileList = listdir(dir_trainingSet)\n m = len(trainingFileList)\n trainingMat = zeros((m, 1024))\n for i in range(m):\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n hwLabels.append(classNumStr)\n trainingMat[i, :] = img2vector(dir_trainingSet + '/%s' % fileNameStr)\n return hwLabels, trainingMat\n\n\ndef getResult(filename, trainingDigits):\n \"\"\"\n filename 测试集dir\n trainingDigits 训练集dir\n \"\"\"\n hwLabels, trainingMat = loadTrainingSet(trainingDigits)\n with open(filename, 'rb') as f:\n filePath = f.read()\n fileNameStr = changeImg2Text(filePath, filename)\n inputVect = img2vector(fileNameStr)\n classifierResult = classify(inputVect, trainingMat, hwLabels, 3)\n print('预测手写数字识别为:', classifierResult)\n return classifierResult\n\n\ndef changeImg2Text(filePath, filename):\n fileNameStr = filename.split('\\\\')[-1].split('.')[0] + '.txt'\n fr = open(fileNameStr, 'w')\n \"\"\"\n https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe\n \"\"\"\n im = Image.open(BytesIO(filePath))\n im2 = im.resize((32, 32), Image.ANTIALIAS)\n img = array(im2)\n print(img.shape, Image.ANTIALIAS)\n m, n = img.shape[:2]\n for i in range(m):\n for j in range(n):\n R, G, B = img[i, j, :]\n \"\"\"\n 这部分的颜色用 PhotoShop 取色器,调参。\n RGB的值选择 白色点 和 目标颜色点的中点的RGB\n \"\"\"\n if R < 185 and G < 100 and B < 100:\n fr.write('1')\n else:\n fr.write('0')\n fr.write('\\n')\n fr.close()\n return fileNameStr\n",
"step-5": "'''\n手写识别系统\n构建识别类\nRecognize\n调用getResult()函数即可\n'''\n\nimport operator\nfrom numpy import *\nfrom PIL import Image\nfrom os import listdir\nfrom io import BytesIO\n\ndef classify(inX, dataSet, labels, k):\n dataSetSize = dataSet.shape[0] #训练数据集的行数\n # 计算距离\n diffMat = tile(inX, (dataSetSize,1)) - dataSet\n sqDiffMat = diffMat**2\n sqDistances = sqDiffMat.sum(axis=1)\n distances = sqDistances**0.5\n # 返还距离排序的索引\n sortedDistIndicies = distances.argsort() \n classCount={} \n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1\n sortedClassCount = sorted(classCount.items(), \n key=operator.itemgetter(1), reverse=True)\n \n return sortedClassCount[0][0]\n\n# 将图片转化为行向量\ndef img2vector(filename):\n returnVect = zeros((1,1024))\n fr = open(filename)\n for i in range(32):\n lineStr = fr.readline()\n for j in range(32):\n returnVect[0,32*i+j] = int(lineStr[j])\n return returnVect\n\n'''\n如何让加载训练集值运行一次?\n'''\nhwLabels , trainingMat = [] , []\n\ndef loadTrainingSet(dir_trainingSet):\n \n print('把trainingDigits文件夹里的所有训练集导入')\n #把trainingDigits文件夹里的所有训练集导入\n trainingFileList = listdir(dir_trainingSet)\n #print(trainingFileList)\n m = len(trainingFileList)\n trainingMat = zeros((m,1024)) # 初始化训练矩阵\n for i in range(m):\n # 此三步,将所有训练集的名称分割只取出第一个\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n \n # 得到一个由训练集 名称首个number的矩阵\n hwLabels.append(classNumStr)\n \n # 每一个 训练集的 txt 都转成一个 1行1025列的向量\n trainingMat[i,:] = img2vector(dir_trainingSet+'/%s' % fileNameStr)\n\n return hwLabels , trainingMat\n\n\ndef getResult(filename,trainingDigits):\n '''\n filename 测试集dir\n trainingDigits 训练集dir\n '''\n hwLabels , trainingMat = loadTrainingSet(trainingDigits)\n \n # 为输入的数字图片分类,读取图片为\n with open(filename, 'rb') as f:\n filePath = f.read()\n # 此时 filePath 是十六进制字节 如: \\x7f\\x12\\xdf\n fileNameStr = changeImg2Text(filePath,filename)\n inputVect = img2vector(fileNameStr)\n \n classifierResult = classify(inputVect, trainingMat, hwLabels, 3)\n print( '预测手写数字识别为:',classifierResult)\n return classifierResult\n \n # 原demo里有这句话,可以这句话,会将预测的图片失效,暂注释 保留\n #with open(filename, 'w') as f:\n # f.write(str(classifierResult))\n\n# 处理初始图形\ndef changeImg2Text(filePath,filename):\n # 就是字符串 \\ 分割后(其中 \\\\ 是加了转译),取最后一个 2.jpg,再 以 . 分割取 名字\n fileNameStr = filename.split('\\\\')[-1].split('.')[0] + '.txt'\n fr = open(fileNameStr, 'w')\n \n #读图片转矩阵,Python 3 要加 BytesIO(filePath)\n '''\n https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe\n '''\n im = Image.open(BytesIO(filePath))\n #print(im) # <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=206x376 at 0x8D99C50>\n im2 = im.resize((32, 32), Image.ANTIALIAS)\n img = array(im2)\n print( img.shape , Image.ANTIALIAS )\n \n m, n = img.shape[:2]\n\n for i in range(m):\n for j in range(n):\n R, G, B = img[i, j, :]\n # 因为,图片首先要 处理成灰度图,所以根据,灰度进而识别\n '''\n 这部分的颜色用 PhotoShop 取色器,调参。\n RGB的值选择 白色点 和 目标颜色点的中点的RGB\n '''\n #if R < 40 and G < 40 and B < 40: # 这些参数时对于黑白色的区分\n #if R < 245 and G < 153 and B < 120: # 对 0 文件里,橙色图片的划分\n if R < 185 and G < 100 and B < 100: # 对 2 文件里,灰色图片的划分\n fr.write('1')\n else:\n fr.write('0')\n fr.write('\\n')\n\n fr.close()\n return fileNameStr\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from recensioni_site import settings
from django.contrib.auth.models import User
from forum.models import Sezione,Post,UserDataReccomandation
class testRegistrazione(TestCase):
def setUp(self):
self.credential = {'username': 'dummy', 'password': 'dummypassword', 'is_staff': 'False'}
self.credentialp = {'username':'dummyp', 'password':'dummypasswordp', 'is_staff':'True'}
self.proprietario1 = User.objects.create(username="Proprietario1",
email="proprietario1@gmail.com",
password="PasswordProprietario1",
is_staff="True")
self.proprietario2 = User.objects.create(username="Proprietario2",
email="proprietario2@gmail.com",
password="PasswordProprietario2",
is_staff="True")
#--------------------------------------------
self.user1 = User.objects.create(username="User1",
email="user1@gmail.com",
password="PasswordUser1",
is_staff="False")
self.user2 = User.objects.create(username="User2",
email="user2@gmail.com",
password="PasswordUser2",
is_staff="False")
# --------------------------------------------
self.sezione1 = Sezione.objects.create(user=self.proprietario1,
nome_sezione="hotel1",
descrizione="descrizione",
citta="città_test",
provincia="provincia_test",
indirizzo="indirizzo_test",
logo_sezione="null",
hotelB="True",
ristoranteB="False",
fastFoodB="False",
casaVacanzaB="False",
agriturismoB="False")
self.post1 = Post.objects.create(autore_post=self.user1,
contenuto="post_test",
rating=5,
data_creazione=timezone.now(),
sezione=self.sezione1)
self.post2 = Post.objects.create(autore_post=self.user1,
contenuto="post_test",
rating=3,
data_creazione=timezone.now(),
sezione=self.sezione1)
def tearDown(self):
self.proprietario1.delete()
self.proprietario2.delete()
# --------------------------------------------
self.user1.delete()
self.user2.delete()
# --------------------------------------------
self.sezione1.delete()
self.post1.delete()
def test_vsualizzaSezione(self):
self.client.login(**self.credential)
response= self.client.get('/forum/sezione/' + str(self.sezione1.id) + '/')
self.assertTemplateUsed(response, 'forum/singola_sezione.html')
self.assertEqual(response.status_code, 200)
def test_rating(self):
self.client.login(**self.credential)
response = self.client.get('/forum/sezione/' + str(self.sezione1.id) + '/')
self.assertEqual(response.context['sezione'], self.sezione1)
self.assertEqual(response.context['media_rating'], 4)
|
normal
|
{
"blob_id": "cf9339659f49b4093c07e3723a2ede1543be41b8",
"index": 4900,
"step-1": "<mask token>\n\n\nclass testRegistrazione(TestCase):\n <mask token>\n\n def tearDown(self):\n self.proprietario1.delete()\n self.proprietario2.delete()\n self.user1.delete()\n self.user2.delete()\n self.sezione1.delete()\n self.post1.delete()\n\n def test_vsualizzaSezione(self):\n self.client.login(**self.credential)\n response = self.client.get('/forum/sezione/' + str(self.sezione1.id\n ) + '/')\n self.assertTemplateUsed(response, 'forum/singola_sezione.html')\n self.assertEqual(response.status_code, 200)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass testRegistrazione(TestCase):\n\n def setUp(self):\n self.credential = {'username': 'dummy', 'password': 'dummypassword',\n 'is_staff': 'False'}\n self.credentialp = {'username': 'dummyp', 'password':\n 'dummypasswordp', 'is_staff': 'True'}\n self.proprietario1 = User.objects.create(username='Proprietario1',\n email='proprietario1@gmail.com', password=\n 'PasswordProprietario1', is_staff='True')\n self.proprietario2 = User.objects.create(username='Proprietario2',\n email='proprietario2@gmail.com', password=\n 'PasswordProprietario2', is_staff='True')\n self.user1 = User.objects.create(username='User1', email=\n 'user1@gmail.com', password='PasswordUser1', is_staff='False')\n self.user2 = User.objects.create(username='User2', email=\n 'user2@gmail.com', password='PasswordUser2', is_staff='False')\n self.sezione1 = Sezione.objects.create(user=self.proprietario1,\n nome_sezione='hotel1', descrizione='descrizione', citta=\n 'città_test', provincia='provincia_test', indirizzo=\n 'indirizzo_test', logo_sezione='null', hotelB='True',\n ristoranteB='False', fastFoodB='False', casaVacanzaB='False',\n agriturismoB='False')\n self.post1 = Post.objects.create(autore_post=self.user1, contenuto=\n 'post_test', rating=5, data_creazione=timezone.now(), sezione=\n self.sezione1)\n self.post2 = Post.objects.create(autore_post=self.user1, contenuto=\n 'post_test', rating=3, data_creazione=timezone.now(), sezione=\n self.sezione1)\n\n def tearDown(self):\n self.proprietario1.delete()\n self.proprietario2.delete()\n self.user1.delete()\n self.user2.delete()\n self.sezione1.delete()\n self.post1.delete()\n\n def test_vsualizzaSezione(self):\n self.client.login(**self.credential)\n response = self.client.get('/forum/sezione/' + str(self.sezione1.id\n ) + '/')\n self.assertTemplateUsed(response, 'forum/singola_sezione.html')\n self.assertEqual(response.status_code, 200)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass testRegistrazione(TestCase):\n\n def setUp(self):\n self.credential = {'username': 'dummy', 'password': 'dummypassword',\n 'is_staff': 'False'}\n self.credentialp = {'username': 'dummyp', 'password':\n 'dummypasswordp', 'is_staff': 'True'}\n self.proprietario1 = User.objects.create(username='Proprietario1',\n email='proprietario1@gmail.com', password=\n 'PasswordProprietario1', is_staff='True')\n self.proprietario2 = User.objects.create(username='Proprietario2',\n email='proprietario2@gmail.com', password=\n 'PasswordProprietario2', is_staff='True')\n self.user1 = User.objects.create(username='User1', email=\n 'user1@gmail.com', password='PasswordUser1', is_staff='False')\n self.user2 = User.objects.create(username='User2', email=\n 'user2@gmail.com', password='PasswordUser2', is_staff='False')\n self.sezione1 = Sezione.objects.create(user=self.proprietario1,\n nome_sezione='hotel1', descrizione='descrizione', citta=\n 'città_test', provincia='provincia_test', indirizzo=\n 'indirizzo_test', logo_sezione='null', hotelB='True',\n ristoranteB='False', fastFoodB='False', casaVacanzaB='False',\n agriturismoB='False')\n self.post1 = Post.objects.create(autore_post=self.user1, contenuto=\n 'post_test', rating=5, data_creazione=timezone.now(), sezione=\n self.sezione1)\n self.post2 = Post.objects.create(autore_post=self.user1, contenuto=\n 'post_test', rating=3, data_creazione=timezone.now(), sezione=\n self.sezione1)\n\n def tearDown(self):\n self.proprietario1.delete()\n self.proprietario2.delete()\n self.user1.delete()\n self.user2.delete()\n self.sezione1.delete()\n self.post1.delete()\n\n def test_vsualizzaSezione(self):\n self.client.login(**self.credential)\n response = self.client.get('/forum/sezione/' + str(self.sezione1.id\n ) + '/')\n self.assertTemplateUsed(response, 'forum/singola_sezione.html')\n self.assertEqual(response.status_code, 200)\n\n def test_rating(self):\n self.client.login(**self.credential)\n response = self.client.get('/forum/sezione/' + str(self.sezione1.id\n ) + '/')\n self.assertEqual(response.context['sezione'], self.sezione1)\n self.assertEqual(response.context['media_rating'], 4)\n",
"step-4": "from django.test import TestCase\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom recensioni_site import settings\nfrom django.contrib.auth.models import User\nfrom forum.models import Sezione, Post, UserDataReccomandation\n\n\nclass testRegistrazione(TestCase):\n\n def setUp(self):\n self.credential = {'username': 'dummy', 'password': 'dummypassword',\n 'is_staff': 'False'}\n self.credentialp = {'username': 'dummyp', 'password':\n 'dummypasswordp', 'is_staff': 'True'}\n self.proprietario1 = User.objects.create(username='Proprietario1',\n email='proprietario1@gmail.com', password=\n 'PasswordProprietario1', is_staff='True')\n self.proprietario2 = User.objects.create(username='Proprietario2',\n email='proprietario2@gmail.com', password=\n 'PasswordProprietario2', is_staff='True')\n self.user1 = User.objects.create(username='User1', email=\n 'user1@gmail.com', password='PasswordUser1', is_staff='False')\n self.user2 = User.objects.create(username='User2', email=\n 'user2@gmail.com', password='PasswordUser2', is_staff='False')\n self.sezione1 = Sezione.objects.create(user=self.proprietario1,\n nome_sezione='hotel1', descrizione='descrizione', citta=\n 'città_test', provincia='provincia_test', indirizzo=\n 'indirizzo_test', logo_sezione='null', hotelB='True',\n ristoranteB='False', fastFoodB='False', casaVacanzaB='False',\n agriturismoB='False')\n self.post1 = Post.objects.create(autore_post=self.user1, contenuto=\n 'post_test', rating=5, data_creazione=timezone.now(), sezione=\n self.sezione1)\n self.post2 = Post.objects.create(autore_post=self.user1, contenuto=\n 'post_test', rating=3, data_creazione=timezone.now(), sezione=\n self.sezione1)\n\n def tearDown(self):\n self.proprietario1.delete()\n self.proprietario2.delete()\n self.user1.delete()\n self.user2.delete()\n self.sezione1.delete()\n self.post1.delete()\n\n def test_vsualizzaSezione(self):\n self.client.login(**self.credential)\n response = self.client.get('/forum/sezione/' + str(self.sezione1.id\n ) + '/')\n self.assertTemplateUsed(response, 'forum/singola_sezione.html')\n self.assertEqual(response.status_code, 200)\n\n def test_rating(self):\n self.client.login(**self.credential)\n response = self.client.get('/forum/sezione/' + str(self.sezione1.id\n ) + '/')\n self.assertEqual(response.context['sezione'], self.sezione1)\n self.assertEqual(response.context['media_rating'], 4)\n",
"step-5": "from django.test import TestCase\nfrom django.urls import reverse\nfrom django.utils import timezone\n\nfrom recensioni_site import settings\nfrom django.contrib.auth.models import User\nfrom forum.models import Sezione,Post,UserDataReccomandation\n\nclass testRegistrazione(TestCase):\n\n def setUp(self):\n self.credential = {'username': 'dummy', 'password': 'dummypassword', 'is_staff': 'False'}\n\n self.credentialp = {'username':'dummyp', 'password':'dummypasswordp', 'is_staff':'True'}\n\n self.proprietario1 = User.objects.create(username=\"Proprietario1\",\n email=\"proprietario1@gmail.com\",\n password=\"PasswordProprietario1\",\n is_staff=\"True\")\n\n self.proprietario2 = User.objects.create(username=\"Proprietario2\",\n email=\"proprietario2@gmail.com\",\n password=\"PasswordProprietario2\",\n is_staff=\"True\")\n\n #--------------------------------------------\n\n self.user1 = User.objects.create(username=\"User1\",\n email=\"user1@gmail.com\",\n password=\"PasswordUser1\",\n is_staff=\"False\")\n\n self.user2 = User.objects.create(username=\"User2\",\n email=\"user2@gmail.com\",\n password=\"PasswordUser2\",\n is_staff=\"False\")\n\n # --------------------------------------------\n\n self.sezione1 = Sezione.objects.create(user=self.proprietario1,\n nome_sezione=\"hotel1\",\n descrizione=\"descrizione\",\n citta=\"città_test\",\n provincia=\"provincia_test\",\n indirizzo=\"indirizzo_test\",\n logo_sezione=\"null\",\n hotelB=\"True\",\n ristoranteB=\"False\",\n fastFoodB=\"False\",\n casaVacanzaB=\"False\",\n agriturismoB=\"False\")\n\n self.post1 = Post.objects.create(autore_post=self.user1,\n contenuto=\"post_test\",\n rating=5,\n data_creazione=timezone.now(),\n sezione=self.sezione1)\n\n self.post2 = Post.objects.create(autore_post=self.user1,\n contenuto=\"post_test\",\n rating=3,\n data_creazione=timezone.now(),\n sezione=self.sezione1)\n\n def tearDown(self):\n self.proprietario1.delete()\n self.proprietario2.delete()\n # --------------------------------------------\n self.user1.delete()\n self.user2.delete()\n # --------------------------------------------\n self.sezione1.delete()\n self.post1.delete()\n\n\n def test_vsualizzaSezione(self):\n self.client.login(**self.credential)\n response= self.client.get('/forum/sezione/' + str(self.sezione1.id) + '/')\n self.assertTemplateUsed(response, 'forum/singola_sezione.html')\n self.assertEqual(response.status_code, 200)\n\n def test_rating(self):\n self.client.login(**self.credential)\n response = self.client.get('/forum/sezione/' + str(self.sezione1.id) + '/')\n self.assertEqual(response.context['sezione'], self.sezione1)\n self.assertEqual(response.context['media_rating'], 4)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import requests
import time
import urllib
import argparse
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from fake_useragent import UserAgent
from multiprocessing import Pool
from lxml.html import fromstring
import os, sys
#text = 'chowchowbaby'
#url='https://www.google.co.kr/search?q=' + text + '&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiF2fPLn7zdAhUBEbwKHSLWBowQ_AUICigB&biw=809&bih=868&dpr=1.13'
def search(url):
#Create a browser
browser=webdriver.Chrome(executable_path='C:\\Users\\inaee\\Downloads\\chromedriver_win32\\chromedriver.exe')
#Open the link
browser.get(url)
time.sleep(1)
element=browser.find_element_by_tag_name("body")
#Scroll down
for i in range(30):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
browser.find_element_by_id("smb").click()
for i in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
time.sleep(1)
#Get page source and close the browser
source=browser.page_source
browser.close()
return source
def download_image(link):
# Use a random user agent header
headers = {"User-Agent": ua.random}
# Get the image link
try:
r = requests.get("https://www.google.com" + link.get("href"), headers=headers)
except:
print("Cannot get link.")
title = str(fromstring(r.content).findtext(".//title"))
link = title.split(" ")[-1]
# Download the image
print("At : " + os.getcwd() + ", Downloading from " + link)
try:
if link.split(".")[-1] == ('jpg' or 'png' or 'jpeg'):
urllib.request.urlretrieve(link, link.split("/")[-1])
except:
pass
if __name__ == "__main__":
# parse command line options
parser = argparse.ArgumentParser()
parser.add_argument("keyword", help="the keyword to search")
args = parser.parse_args()
# set stack limit
sys.setrecursionlimit(100000000)
# get user input and search on google
query = args.keyword
#query = input("Enter the name you want to search")
url = "https://www.google.com/search?as_st=y&tbm=isch&as_q=" + query + \
"&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg"
source = search(url)
# Parse the page source and download pics
soup = BeautifulSoup(str(source), "html.parser")
ua = UserAgent()
# check directory and create if necessary
if not os.path.isdir(args.keyword):
os.makedirs(args.keyword)
os.chdir(str(os.getcwd()) + "/" + str(args.keyword))
# get the links
links = soup.find_all("a", class_="rg_l")
# open some processes to download
with Pool() as pool:
pool.map(download_image, links)
# 검색어
#search = 'chowchowbaby'
#url='https://www.google.co.kr/search?q=' + search + '&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiF2fPLn7zdAhUBEbwKHSLWBowQ_AUICigB&biw=809&bih=868&dpr=1.13'
# url
#driver = webdriver.Chrome(executable_path="C:\\Users\\inaee\\Downloads\\chromedriver_win32\\chromedriver.exe")
#driver.get(url)
#driver.implicitly_wait(2)
#num_of_pagedowns = 50
#elem = driver.find_element_by_xpath('/html/body')
#i = 0
#count = 1
#img = driver.find_elements_by_tag_name("img")
#while i < num_of_pagedowns:
#for item in img:
# if(count>0 and count<502):
# elem.send_keys(Keys.DOWN)
# time.sleep(1)
# full_name = "C:\\Program Files\\Python35\\강아지크롤러\\chowchowbaby\\" + str(count) + "_chowchowbaby.jpg"
# try:
# urllib.request.urlretrieve(item.get_attribute('src'), full_name)
# tfp=open(full_name,url)
# print(item.get_attribute('src')[:30] + " : ")
# except:
# urllib.request.urlretrieve(item.get_attribute('data-src'), full_name)
# tfp=open(full_name,url)
# print(item.get_attribute('data-src')[:30] + " : ")
# count = count+1
# i =i+1
#driver.Quit()
#print("Done.")
|
normal
|
{
"blob_id": "142a2ba3ec2f6b35f4339ed9fffe7357c1a85fa0",
"index": 219,
"step-1": "<mask token>\n\n\ndef search(url):\n browser = webdriver.Chrome(executable_path=\n 'C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n browser.get(url)\n time.sleep(1)\n element = browser.find_element_by_tag_name('body')\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n browser.find_element_by_id('smb').click()\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n time.sleep(1)\n source = browser.page_source\n browser.close()\n return source\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef search(url):\n browser = webdriver.Chrome(executable_path=\n 'C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n browser.get(url)\n time.sleep(1)\n element = browser.find_element_by_tag_name('body')\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n browser.find_element_by_id('smb').click()\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n time.sleep(1)\n source = browser.page_source\n browser.close()\n return source\n\n\ndef download_image(link):\n headers = {'User-Agent': ua.random}\n try:\n r = requests.get('https://www.google.com' + link.get('href'),\n headers=headers)\n except:\n print('Cannot get link.')\n title = str(fromstring(r.content).findtext('.//title'))\n link = title.split(' ')[-1]\n print('At : ' + os.getcwd() + ', Downloading from ' + link)\n try:\n if link.split('.')[-1] == ('jpg' or 'png' or 'jpeg'):\n urllib.request.urlretrieve(link, link.split('/')[-1])\n except:\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef search(url):\n browser = webdriver.Chrome(executable_path=\n 'C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n browser.get(url)\n time.sleep(1)\n element = browser.find_element_by_tag_name('body')\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n browser.find_element_by_id('smb').click()\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n time.sleep(1)\n source = browser.page_source\n browser.close()\n return source\n\n\ndef download_image(link):\n headers = {'User-Agent': ua.random}\n try:\n r = requests.get('https://www.google.com' + link.get('href'),\n headers=headers)\n except:\n print('Cannot get link.')\n title = str(fromstring(r.content).findtext('.//title'))\n link = title.split(' ')[-1]\n print('At : ' + os.getcwd() + ', Downloading from ' + link)\n try:\n if link.split('.')[-1] == ('jpg' or 'png' or 'jpeg'):\n urllib.request.urlretrieve(link, link.split('/')[-1])\n except:\n pass\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('keyword', help='the keyword to search')\n args = parser.parse_args()\n sys.setrecursionlimit(100000000)\n query = args.keyword\n url = ('https://www.google.com/search?as_st=y&tbm=isch&as_q=' + query +\n '&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg'\n )\n source = search(url)\n soup = BeautifulSoup(str(source), 'html.parser')\n ua = UserAgent()\n if not os.path.isdir(args.keyword):\n os.makedirs(args.keyword)\n os.chdir(str(os.getcwd()) + '/' + str(args.keyword))\n links = soup.find_all('a', class_='rg_l')\n with Pool() as pool:\n pool.map(download_image, links)\n",
"step-4": "import requests\nimport time\nimport urllib\nimport argparse\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom fake_useragent import UserAgent\nfrom multiprocessing import Pool\nfrom lxml.html import fromstring\nimport os, sys\n\n\ndef search(url):\n browser = webdriver.Chrome(executable_path=\n 'C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n browser.get(url)\n time.sleep(1)\n element = browser.find_element_by_tag_name('body')\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n browser.find_element_by_id('smb').click()\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n time.sleep(1)\n source = browser.page_source\n browser.close()\n return source\n\n\ndef download_image(link):\n headers = {'User-Agent': ua.random}\n try:\n r = requests.get('https://www.google.com' + link.get('href'),\n headers=headers)\n except:\n print('Cannot get link.')\n title = str(fromstring(r.content).findtext('.//title'))\n link = title.split(' ')[-1]\n print('At : ' + os.getcwd() + ', Downloading from ' + link)\n try:\n if link.split('.')[-1] == ('jpg' or 'png' or 'jpeg'):\n urllib.request.urlretrieve(link, link.split('/')[-1])\n except:\n pass\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('keyword', help='the keyword to search')\n args = parser.parse_args()\n sys.setrecursionlimit(100000000)\n query = args.keyword\n url = ('https://www.google.com/search?as_st=y&tbm=isch&as_q=' + query +\n '&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg'\n )\n source = search(url)\n soup = BeautifulSoup(str(source), 'html.parser')\n ua = UserAgent()\n if not os.path.isdir(args.keyword):\n os.makedirs(args.keyword)\n os.chdir(str(os.getcwd()) + '/' + str(args.keyword))\n links = soup.find_all('a', class_='rg_l')\n with Pool() as pool:\n pool.map(download_image, links)\n",
"step-5": "import requests\nimport time\nimport urllib\nimport argparse\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom fake_useragent import UserAgent\nfrom multiprocessing import Pool\nfrom lxml.html import fromstring\nimport os, sys\n\n#text = 'chowchowbaby'\n#url='https://www.google.co.kr/search?q=' + text + '&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiF2fPLn7zdAhUBEbwKHSLWBowQ_AUICigB&biw=809&bih=868&dpr=1.13'\n\ndef search(url):\n #Create a browser\n browser=webdriver.Chrome(executable_path='C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n\n #Open the link\n browser.get(url)\n time.sleep(1)\n\n element=browser.find_element_by_tag_name(\"body\")\n\n #Scroll down\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n\n browser.find_element_by_id(\"smb\").click()\n\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n\n time.sleep(1)\n\n #Get page source and close the browser\n source=browser.page_source\n browser.close()\n\n return source\n\n\ndef download_image(link):\n # Use a random user agent header\n headers = {\"User-Agent\": ua.random}\n\n # Get the image link\n try:\n r = requests.get(\"https://www.google.com\" + link.get(\"href\"), headers=headers)\n except:\n print(\"Cannot get link.\")\n title = str(fromstring(r.content).findtext(\".//title\"))\n link = title.split(\" \")[-1]\n\n # Download the image\n print(\"At : \" + os.getcwd() + \", Downloading from \" + link)\n try:\n if link.split(\".\")[-1] == ('jpg' or 'png' or 'jpeg'):\n\n urllib.request.urlretrieve(link, link.split(\"/\")[-1])\n except:\n pass\n\n\nif __name__ == \"__main__\":\n # parse command line options\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"keyword\", help=\"the keyword to search\")\n args = parser.parse_args()\n\n # set stack limit\n sys.setrecursionlimit(100000000)\n\n # get user input and search on google\n query = args.keyword\n\n\n #query = input(\"Enter the name you want to search\")\n\n\n\n url = \"https://www.google.com/search?as_st=y&tbm=isch&as_q=\" + query + \\\n \"&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg\"\n source = search(url)\n\n # Parse the page source and download pics\n soup = BeautifulSoup(str(source), \"html.parser\")\n ua = UserAgent()\n\n # check directory and create if necessary\n if not os.path.isdir(args.keyword):\n os.makedirs(args.keyword)\n\n os.chdir(str(os.getcwd()) + \"/\" + str(args.keyword))\n # get the links\n links = soup.find_all(\"a\", class_=\"rg_l\")\n\n # open some processes to download\n with Pool() as pool:\n pool.map(download_image, links)\n \n\n\n\n\n\n# 검색어\n#search = 'chowchowbaby'\n#url='https://www.google.co.kr/search?q=' + search + '&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiF2fPLn7zdAhUBEbwKHSLWBowQ_AUICigB&biw=809&bih=868&dpr=1.13'\n# url\n#driver = webdriver.Chrome(executable_path=\"C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe\")\n#driver.get(url)\n#driver.implicitly_wait(2)\n\n\n#num_of_pagedowns = 50\n#elem = driver.find_element_by_xpath('/html/body') \n\n#i = 0\n#count = 1\n#img = driver.find_elements_by_tag_name(\"img\")\n\n#while i < num_of_pagedowns:\n#for item in img:\n# if(count>0 and count<502):\n# elem.send_keys(Keys.DOWN)\n# time.sleep(1)\n# full_name = \"C:\\\\Program Files\\\\Python35\\\\강아지크롤러\\\\chowchowbaby\\\\\" + str(count) + \"_chowchowbaby.jpg\"\n# try:\n# urllib.request.urlretrieve(item.get_attribute('src'), full_name)\n# tfp=open(full_name,url)\n# print(item.get_attribute('src')[:30] + \" : \")\n# except:\n# urllib.request.urlretrieve(item.get_attribute('data-src'), full_name)\n# tfp=open(full_name,url)\n# print(item.get_attribute('data-src')[:30] + \" : \")\n# count = count+1\n# i =i+1\n\n \n#driver.Quit()\n#print(\"Done.\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BruteForce(Mono):
<|reserved_special_token_0|>
def _count_inconsistencies(self):
if self.num_cores == 1:
for ni in self.nonmatch_indices:
self.index2count[ni] = 0
for mi in self.match_indices:
match_features = self.features[mi]
count = 0
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features,
self.features[ni], self.min_con_dim)
if inconsistent == True:
count += 1
self.index2count[ni] += 1
self.index2count[mi] = count
else:
nmatch = len(self.match_indices)
threads2incons_count = pymp.shared.dict()
with pymp.Parallel(self.num_cores) as p:
local_index2incons_count = {}
for index in p.range(nmatch):
mi = self.match_indices[index]
match_features = self.features[mi]
count = 0
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features,
self.features[ni], self.min_con_dim)
if inconsistent == True:
count += 1
if ni in local_index2incons_count:
local_index2incons_count[ni] += 1
else:
local_index2incons_count[ni] = 1
if count > 0:
local_index2incons_count[mi] = count
threads2incons_count[p.thread_num] = local_index2incons_count
for _, local_index2incons_count in threads2incons_count.items():
for index, count in local_index2incons_count.items():
if index in self.index2count:
self.index2count[index] += count
else:
self.index2count[index] = count
return self.index2count
def _get_inconsistency_indices(self):
if self.num_cores == 1:
for mi in self.match_indices:
match_features = self.features[mi]
incons_indices = []
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features,
self.features[ni], self.min_con_dim)
if inconsistent == True:
incons_indices.append(ni)
if len(incons_indices) > 0:
self.index2incons[mi] = incons_indices
for ni in incons_indices:
if ni in self.index2incons:
self.index2incons[ni].append(mi)
else:
self.index2incons[ni] = [mi]
else:
nmatch = len(self.match_indices)
threads2incons = pymp.shared.dict()
with pymp.Parallel(self.num_cores) as p:
local_index2incons = {}
for index in p.range(nmatch):
mi = self.match_indices[index]
match_features = self.features[mi]
incons_indices = []
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features,
self.features[ni], self.min_con_dim)
if inconsistent == True:
incons_indices.append(ni)
if len(incons_indices) > 0:
local_index2incons[mi] = incons_indices
threads2incons[p.thread_num] = local_index2incons
for _, local_index2incons in threads2incons.items():
for mi, ni_indices in local_index2incons.items():
self.index2incons[mi] = ni_indices
for ni in ni_indices:
if ni in self.index2incons:
self.index2incons[ni].append(mi)
else:
self.index2incons[ni] = [mi]
return self.index2incons
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BruteForce(Mono):
def __init__(self, features, labels, params):
super(BruteForce, self).__init__(features, labels, params)
def _count_inconsistencies(self):
if self.num_cores == 1:
for ni in self.nonmatch_indices:
self.index2count[ni] = 0
for mi in self.match_indices:
match_features = self.features[mi]
count = 0
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features,
self.features[ni], self.min_con_dim)
if inconsistent == True:
count += 1
self.index2count[ni] += 1
self.index2count[mi] = count
else:
nmatch = len(self.match_indices)
threads2incons_count = pymp.shared.dict()
with pymp.Parallel(self.num_cores) as p:
local_index2incons_count = {}
for index in p.range(nmatch):
mi = self.match_indices[index]
match_features = self.features[mi]
count = 0
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features,
self.features[ni], self.min_con_dim)
if inconsistent == True:
count += 1
if ni in local_index2incons_count:
local_index2incons_count[ni] += 1
else:
local_index2incons_count[ni] = 1
if count > 0:
local_index2incons_count[mi] = count
threads2incons_count[p.thread_num] = local_index2incons_count
for _, local_index2incons_count in threads2incons_count.items():
for index, count in local_index2incons_count.items():
if index in self.index2count:
self.index2count[index] += count
else:
self.index2count[index] = count
return self.index2count
def _get_inconsistency_indices(self):
if self.num_cores == 1:
for mi in self.match_indices:
match_features = self.features[mi]
incons_indices = []
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features,
self.features[ni], self.min_con_dim)
if inconsistent == True:
incons_indices.append(ni)
if len(incons_indices) > 0:
self.index2incons[mi] = incons_indices
for ni in incons_indices:
if ni in self.index2incons:
self.index2incons[ni].append(mi)
else:
self.index2incons[ni] = [mi]
else:
nmatch = len(self.match_indices)
threads2incons = pymp.shared.dict()
with pymp.Parallel(self.num_cores) as p:
local_index2incons = {}
for index in p.range(nmatch):
mi = self.match_indices[index]
match_features = self.features[mi]
incons_indices = []
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features,
self.features[ni], self.min_con_dim)
if inconsistent == True:
incons_indices.append(ni)
if len(incons_indices) > 0:
local_index2incons[mi] = incons_indices
threads2incons[p.thread_num] = local_index2incons
for _, local_index2incons in threads2incons.items():
for mi, ni_indices in local_index2incons.items():
self.index2incons[mi] = ni_indices
for ni in ni_indices:
if ni in self.index2incons:
self.index2incons[ni].append(mi)
else:
self.index2incons[ni] = [mi]
return self.index2incons
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pymp
from v6.mono import Mono
class BruteForce(Mono):
def __init__(self, features, labels, params):
super(BruteForce, self).__init__(features, labels, params)
def _count_inconsistencies(self):
if self.num_cores == 1:
for ni in self.nonmatch_indices:
self.index2count[ni] = 0
for mi in self.match_indices:
match_features = self.features[mi]
count = 0
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features,
self.features[ni], self.min_con_dim)
if inconsistent == True:
count += 1
self.index2count[ni] += 1
self.index2count[mi] = count
else:
nmatch = len(self.match_indices)
threads2incons_count = pymp.shared.dict()
with pymp.Parallel(self.num_cores) as p:
local_index2incons_count = {}
for index in p.range(nmatch):
mi = self.match_indices[index]
match_features = self.features[mi]
count = 0
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features,
self.features[ni], self.min_con_dim)
if inconsistent == True:
count += 1
if ni in local_index2incons_count:
local_index2incons_count[ni] += 1
else:
local_index2incons_count[ni] = 1
if count > 0:
local_index2incons_count[mi] = count
threads2incons_count[p.thread_num] = local_index2incons_count
for _, local_index2incons_count in threads2incons_count.items():
for index, count in local_index2incons_count.items():
if index in self.index2count:
self.index2count[index] += count
else:
self.index2count[index] = count
return self.index2count
def _get_inconsistency_indices(self):
if self.num_cores == 1:
for mi in self.match_indices:
match_features = self.features[mi]
incons_indices = []
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features,
self.features[ni], self.min_con_dim)
if inconsistent == True:
incons_indices.append(ni)
if len(incons_indices) > 0:
self.index2incons[mi] = incons_indices
for ni in incons_indices:
if ni in self.index2incons:
self.index2incons[ni].append(mi)
else:
self.index2incons[ni] = [mi]
else:
nmatch = len(self.match_indices)
threads2incons = pymp.shared.dict()
with pymp.Parallel(self.num_cores) as p:
local_index2incons = {}
for index in p.range(nmatch):
mi = self.match_indices[index]
match_features = self.features[mi]
incons_indices = []
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features,
self.features[ni], self.min_con_dim)
if inconsistent == True:
incons_indices.append(ni)
if len(incons_indices) > 0:
local_index2incons[mi] = incons_indices
threads2incons[p.thread_num] = local_index2incons
for _, local_index2incons in threads2incons.items():
for mi, ni_indices in local_index2incons.items():
self.index2incons[mi] = ni_indices
for ni in ni_indices:
if ni in self.index2incons:
self.index2incons[ni].append(mi)
else:
self.index2incons[ni] = [mi]
return self.index2incons
<|reserved_special_token_1|>
'''
Created on Mar 7, 2019
@author: hzhang0418
'''
import pymp
from v6.mono import Mono
class BruteForce(Mono):
def __init__(self, features, labels, params):
super(BruteForce, self).__init__(features, labels, params)
def _count_inconsistencies(self):
if self.num_cores==1:
for ni in self.nonmatch_indices:
self.index2count[ni] = 0
for mi in self.match_indices:
match_features = self.features[mi]
count = 0
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)
if inconsistent == True:
count += 1
self.index2count[ni] += 1
self.index2count[mi] = count
else:
nmatch = len(self.match_indices)
threads2incons_count = pymp.shared.dict()
with pymp.Parallel(self.num_cores) as p:
local_index2incons_count = {}
for index in p.range(nmatch):
mi = self.match_indices[index]
match_features = self.features[mi]
count = 0
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)
if inconsistent == True:
count += 1
if ni in local_index2incons_count:
local_index2incons_count[ni] += 1
else:
local_index2incons_count[ni] = 1
if count>0:
local_index2incons_count[mi] = count
threads2incons_count[p.thread_num] = local_index2incons_count
for _, local_index2incons_count in threads2incons_count.items():
for index, count in local_index2incons_count.items():
if index in self.index2count:
self.index2count[index] += count
else:
self.index2count[index] = count
return self.index2count
def _get_inconsistency_indices(self):
if self.num_cores==1:
for mi in self.match_indices:
match_features = self.features[mi]
incons_indices = []
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)
if inconsistent == True:
incons_indices.append(ni)
if len(incons_indices)>0:
self.index2incons[mi] = incons_indices
for ni in incons_indices:
if ni in self.index2incons:
self.index2incons[ni].append(mi)
else:
self.index2incons[ni] = [mi]
else:
nmatch = len(self.match_indices)
threads2incons = pymp.shared.dict()
with pymp.Parallel(self.num_cores) as p:
local_index2incons = {}
for index in p.range(nmatch):
mi = self.match_indices[index]
match_features = self.features[mi]
incons_indices = []
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)
if inconsistent == True:
incons_indices.append(ni)
if len(incons_indices)>0:
local_index2incons[mi] = incons_indices
threads2incons[p.thread_num] = local_index2incons
for _, local_index2incons in threads2incons.items():
for mi, ni_indices in local_index2incons.items():
self.index2incons[mi] = ni_indices
for ni in ni_indices:
if ni in self.index2incons:
self.index2incons[ni].append(mi)
else:
self.index2incons[ni] = [mi]
return self.index2incons
|
flexible
|
{
"blob_id": "32c18bd578bbf91c76604f063421a65a4f7a8b63",
"index": 2204,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BruteForce(Mono):\n <mask token>\n\n def _count_inconsistencies(self):\n if self.num_cores == 1:\n for ni in self.nonmatch_indices:\n self.index2count[ni] = 0\n for mi in self.match_indices:\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n self.index2count[ni] += 1\n self.index2count[mi] = count\n else:\n nmatch = len(self.match_indices)\n threads2incons_count = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons_count = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n if ni in local_index2incons_count:\n local_index2incons_count[ni] += 1\n else:\n local_index2incons_count[ni] = 1\n if count > 0:\n local_index2incons_count[mi] = count\n threads2incons_count[p.thread_num] = local_index2incons_count\n for _, local_index2incons_count in threads2incons_count.items():\n for index, count in local_index2incons_count.items():\n if index in self.index2count:\n self.index2count[index] += count\n else:\n self.index2count[index] = count\n return self.index2count\n\n def _get_inconsistency_indices(self):\n if self.num_cores == 1:\n for mi in self.match_indices:\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n self.index2incons[mi] = incons_indices\n for ni in incons_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n else:\n nmatch = len(self.match_indices)\n threads2incons = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n local_index2incons[mi] = incons_indices\n threads2incons[p.thread_num] = local_index2incons\n for _, local_index2incons in threads2incons.items():\n for mi, ni_indices in local_index2incons.items():\n self.index2incons[mi] = ni_indices\n for ni in ni_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n return self.index2incons\n",
"step-3": "<mask token>\n\n\nclass BruteForce(Mono):\n\n def __init__(self, features, labels, params):\n super(BruteForce, self).__init__(features, labels, params)\n\n def _count_inconsistencies(self):\n if self.num_cores == 1:\n for ni in self.nonmatch_indices:\n self.index2count[ni] = 0\n for mi in self.match_indices:\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n self.index2count[ni] += 1\n self.index2count[mi] = count\n else:\n nmatch = len(self.match_indices)\n threads2incons_count = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons_count = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n if ni in local_index2incons_count:\n local_index2incons_count[ni] += 1\n else:\n local_index2incons_count[ni] = 1\n if count > 0:\n local_index2incons_count[mi] = count\n threads2incons_count[p.thread_num] = local_index2incons_count\n for _, local_index2incons_count in threads2incons_count.items():\n for index, count in local_index2incons_count.items():\n if index in self.index2count:\n self.index2count[index] += count\n else:\n self.index2count[index] = count\n return self.index2count\n\n def _get_inconsistency_indices(self):\n if self.num_cores == 1:\n for mi in self.match_indices:\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n self.index2incons[mi] = incons_indices\n for ni in incons_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n else:\n nmatch = len(self.match_indices)\n threads2incons = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n local_index2incons[mi] = incons_indices\n threads2incons[p.thread_num] = local_index2incons\n for _, local_index2incons in threads2incons.items():\n for mi, ni_indices in local_index2incons.items():\n self.index2incons[mi] = ni_indices\n for ni in ni_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n return self.index2incons\n",
"step-4": "<mask token>\nimport pymp\nfrom v6.mono import Mono\n\n\nclass BruteForce(Mono):\n\n def __init__(self, features, labels, params):\n super(BruteForce, self).__init__(features, labels, params)\n\n def _count_inconsistencies(self):\n if self.num_cores == 1:\n for ni in self.nonmatch_indices:\n self.index2count[ni] = 0\n for mi in self.match_indices:\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n self.index2count[ni] += 1\n self.index2count[mi] = count\n else:\n nmatch = len(self.match_indices)\n threads2incons_count = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons_count = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n if ni in local_index2incons_count:\n local_index2incons_count[ni] += 1\n else:\n local_index2incons_count[ni] = 1\n if count > 0:\n local_index2incons_count[mi] = count\n threads2incons_count[p.thread_num] = local_index2incons_count\n for _, local_index2incons_count in threads2incons_count.items():\n for index, count in local_index2incons_count.items():\n if index in self.index2count:\n self.index2count[index] += count\n else:\n self.index2count[index] = count\n return self.index2count\n\n def _get_inconsistency_indices(self):\n if self.num_cores == 1:\n for mi in self.match_indices:\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n self.index2incons[mi] = incons_indices\n for ni in incons_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n else:\n nmatch = len(self.match_indices)\n threads2incons = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n local_index2incons[mi] = incons_indices\n threads2incons[p.thread_num] = local_index2incons\n for _, local_index2incons in threads2incons.items():\n for mi, ni_indices in local_index2incons.items():\n self.index2incons[mi] = ni_indices\n for ni in ni_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n return self.index2incons\n",
"step-5": "'''\nCreated on Mar 7, 2019\n\n@author: hzhang0418\n'''\n\nimport pymp\n\nfrom v6.mono import Mono\n\nclass BruteForce(Mono):\n \n def __init__(self, features, labels, params):\n super(BruteForce, self).__init__(features, labels, params)\n \n \n def _count_inconsistencies(self):\n if self.num_cores==1:\n for ni in self.nonmatch_indices:\n self.index2count[ni] = 0\n \n for mi in self.match_indices:\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n self.index2count[ni] += 1\n self.index2count[mi] = count\n \n else:\n nmatch = len(self.match_indices)\n \n threads2incons_count = pymp.shared.dict()\n \n with pymp.Parallel(self.num_cores) as p:\n local_index2incons_count = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n \n count = 0\n \n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n if ni in local_index2incons_count:\n local_index2incons_count[ni] += 1\n else:\n local_index2incons_count[ni] = 1\n \n if count>0:\n local_index2incons_count[mi] = count\n \n threads2incons_count[p.thread_num] = local_index2incons_count\n \n for _, local_index2incons_count in threads2incons_count.items():\n for index, count in local_index2incons_count.items():\n if index in self.index2count:\n self.index2count[index] += count\n else:\n self.index2count[index] = count \n \n return self.index2count\n \n \n def _get_inconsistency_indices(self):\n \n if self.num_cores==1:\n \n for mi in self.match_indices:\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n \n if len(incons_indices)>0:\n self.index2incons[mi] = incons_indices\n for ni in incons_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n \n else:\n \n nmatch = len(self.match_indices)\n \n threads2incons = pymp.shared.dict()\n \n with pymp.Parallel(self.num_cores) as p:\n local_index2incons = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n \n incons_indices = []\n \n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n \n if len(incons_indices)>0:\n local_index2incons[mi] = incons_indices\n \n threads2incons[p.thread_num] = local_index2incons\n \n for _, local_index2incons in threads2incons.items():\n for mi, ni_indices in local_index2incons.items():\n self.index2incons[mi] = ni_indices\n for ni in ni_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n \n return self.index2incons",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
# boj, 9237 : 이장님 초대, python3
# 그리디 알고리즘
import sys
def tree(l):
return max([i+j+2 for i,j in enumerate(l)])
N = int(sys.stdin.readline())
t = sorted(list(map(int, sys.stdin.readline().split())), reverse = True)
print(tree(t))
|
normal
|
{
"blob_id": "e79cdd32977eb357c3f6709887b671c50eb1fa45",
"index": 7071,
"step-1": "<mask token>\n\n\ndef tree(l):\n return max([(i + j + 2) for i, j in enumerate(l)])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef tree(l):\n return max([(i + j + 2) for i, j in enumerate(l)])\n\n\n<mask token>\nprint(tree(t))\n",
"step-3": "<mask token>\n\n\ndef tree(l):\n return max([(i + j + 2) for i, j in enumerate(l)])\n\n\nN = int(sys.stdin.readline())\nt = sorted(list(map(int, sys.stdin.readline().split())), reverse=True)\nprint(tree(t))\n",
"step-4": "import sys\n\n\ndef tree(l):\n return max([(i + j + 2) for i, j in enumerate(l)])\n\n\nN = int(sys.stdin.readline())\nt = sorted(list(map(int, sys.stdin.readline().split())), reverse=True)\nprint(tree(t))\n",
"step-5": "# boj, 9237 : 이장님 초대, python3\n# 그리디 알고리즘\nimport sys\n\ndef tree(l):\n return max([i+j+2 for i,j in enumerate(l)])\n\n\nN = int(sys.stdin.readline())\nt = sorted(list(map(int, sys.stdin.readline().split())), reverse = True)\n\nprint(tree(t))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('.')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('.')
BROKER_CONF = {'uid': '{{ mq_user }}', 'pass': '{{ mq_password }}', 'host':
'{{ mq_host }}', 'port': '5672', 'vhost': '{{ mq_vhost }}'}
BROKER_URL = 'amqp://' + BROKER_CONF['uid'] + ':' + BROKER_CONF['pass'
] + '@' + BROKER_CONF['host'] + ':' + BROKER_CONF['port'
] + '/' + BROKER_CONF['vhost']
BROKER_HEARTBEAT = True
CELERY_IMPORTS = 'scanworker.tasks',
<|reserved_special_token_0|>
VALID_SCANNERS = vs()
CELERY_QUEUES = VALID_SCANNERS.celery_virus_scan_queues()
CELERY_ROUTES = VALID_SCANNERS.celery_virus_scan_routes()
<|reserved_special_token_1|>
from scanworker.commonconfig import *
import sys
sys.path.append('.')
BROKER_CONF = {'uid': '{{ mq_user }}', 'pass': '{{ mq_password }}', 'host':
'{{ mq_host }}', 'port': '5672', 'vhost': '{{ mq_vhost }}'}
BROKER_URL = 'amqp://' + BROKER_CONF['uid'] + ':' + BROKER_CONF['pass'
] + '@' + BROKER_CONF['host'] + ':' + BROKER_CONF['port'
] + '/' + BROKER_CONF['vhost']
BROKER_HEARTBEAT = True
CELERY_IMPORTS = 'scanworker.tasks',
from scanworker.tasks import VALID_SCANNERS as vs
VALID_SCANNERS = vs()
CELERY_QUEUES = VALID_SCANNERS.celery_virus_scan_queues()
CELERY_ROUTES = VALID_SCANNERS.celery_virus_scan_routes()
<|reserved_special_token_1|>
## This file is the celeryconfig for the Task Worker (scanworker).
from scanworker.commonconfig import *
import sys
sys.path.append('.')
BROKER_CONF = {
'uid' : '{{ mq_user }}',
'pass' : '{{ mq_password }}',
'host' : '{{ mq_host }}',
'port' : '5672',
'vhost' : '{{ mq_vhost }}',
}
BROKER_URL = 'amqp://'+BROKER_CONF['uid']+':'+BROKER_CONF['pass']+'@'+BROKER_CONF['host']+':'+BROKER_CONF['port']+'/'+BROKER_CONF['vhost']
BROKER_HEARTBEAT=True
CELERY_IMPORTS = ('scanworker.tasks',)
from scanworker.tasks import VALID_SCANNERS as vs
VALID_SCANNERS=vs()
CELERY_QUEUES = VALID_SCANNERS.celery_virus_scan_queues()
CELERY_ROUTES = VALID_SCANNERS.celery_virus_scan_routes()
|
flexible
|
{
"blob_id": "1a569b88c350124968212cb910bef7b09b166152",
"index": 8990,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append('.')\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('.')\nBROKER_CONF = {'uid': '{{ mq_user }}', 'pass': '{{ mq_password }}', 'host':\n '{{ mq_host }}', 'port': '5672', 'vhost': '{{ mq_vhost }}'}\nBROKER_URL = 'amqp://' + BROKER_CONF['uid'] + ':' + BROKER_CONF['pass'\n ] + '@' + BROKER_CONF['host'] + ':' + BROKER_CONF['port'\n ] + '/' + BROKER_CONF['vhost']\nBROKER_HEARTBEAT = True\nCELERY_IMPORTS = 'scanworker.tasks',\n<mask token>\nVALID_SCANNERS = vs()\nCELERY_QUEUES = VALID_SCANNERS.celery_virus_scan_queues()\nCELERY_ROUTES = VALID_SCANNERS.celery_virus_scan_routes()\n",
"step-4": "from scanworker.commonconfig import *\nimport sys\nsys.path.append('.')\nBROKER_CONF = {'uid': '{{ mq_user }}', 'pass': '{{ mq_password }}', 'host':\n '{{ mq_host }}', 'port': '5672', 'vhost': '{{ mq_vhost }}'}\nBROKER_URL = 'amqp://' + BROKER_CONF['uid'] + ':' + BROKER_CONF['pass'\n ] + '@' + BROKER_CONF['host'] + ':' + BROKER_CONF['port'\n ] + '/' + BROKER_CONF['vhost']\nBROKER_HEARTBEAT = True\nCELERY_IMPORTS = 'scanworker.tasks',\nfrom scanworker.tasks import VALID_SCANNERS as vs\nVALID_SCANNERS = vs()\nCELERY_QUEUES = VALID_SCANNERS.celery_virus_scan_queues()\nCELERY_ROUTES = VALID_SCANNERS.celery_virus_scan_routes()\n",
"step-5": "\n## This file is the celeryconfig for the Task Worker (scanworker).\nfrom scanworker.commonconfig import *\nimport sys\nsys.path.append('.')\n\n\nBROKER_CONF = {\n 'uid' \t: '{{ mq_user }}',\n 'pass' \t: '{{ mq_password }}',\n 'host' \t: '{{ mq_host }}',\n 'port' \t: '5672',\n 'vhost' \t: '{{ mq_vhost }}',\n}\nBROKER_URL = 'amqp://'+BROKER_CONF['uid']+':'+BROKER_CONF['pass']+'@'+BROKER_CONF['host']+':'+BROKER_CONF['port']+'/'+BROKER_CONF['vhost']\n\nBROKER_HEARTBEAT=True\nCELERY_IMPORTS = ('scanworker.tasks',)\nfrom scanworker.tasks import VALID_SCANNERS as vs\nVALID_SCANNERS=vs()\nCELERY_QUEUES = VALID_SCANNERS.celery_virus_scan_queues()\nCELERY_ROUTES = VALID_SCANNERS.celery_virus_scan_routes()\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Author: Kenneth Lui <hkkenneth@gmail.com>
# Last Updated on: 01-11-2012
## Usage: python ~/code/python/001_Fastq_Trimming.py <FIRST BASE> <LAST BASE> <FASTQ FILES....>
## Bases are inclusive and 1-based
#from Bio.SeqIO.QualityIO import FastqGeneralIterator
#handle = open(sys.argv[2], 'w')
#for title, seq, qual in FastqGeneralIterator(open(sys.argv[1])):
# handle.write("@%s\n%s\n+\n%\n" % (title, seq[...:...], qual[...:...]))
#handle.close()
from Bio import SeqIO
import sys
RECORD_BUFFER_SIZE = 100000
start = int(sys.argv[1]) - 1
end = int(sys.argv[2])
for s in sys.argv[3:]:
file = open(s + "." + sys.argv[1] + "-" + sys.argv[2] + ".trimmed", 'w')
r_list = []
size = 0
for r in SeqIO.parse(s, "fastq"):
r_list.append(r[start:end])
size += 1
if size == RECORD_BUFFER_SIZE:
SeqIO.write(r_list, file, "fastq")
r_list = []
size = 0
if size > 0:
SeqIO.write(r_list, file, "fastq")
file.close()
|
normal
|
{
"blob_id": "4a8663531f303da29371078e34dc7224fc4580e3",
"index": 6283,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor s in sys.argv[3:]:\n file = open(s + '.' + sys.argv[1] + '-' + sys.argv[2] + '.trimmed', 'w')\n r_list = []\n size = 0\n for r in SeqIO.parse(s, 'fastq'):\n r_list.append(r[start:end])\n size += 1\n if size == RECORD_BUFFER_SIZE:\n SeqIO.write(r_list, file, 'fastq')\n r_list = []\n size = 0\n if size > 0:\n SeqIO.write(r_list, file, 'fastq')\n file.close()\n",
"step-3": "<mask token>\nRECORD_BUFFER_SIZE = 100000\nstart = int(sys.argv[1]) - 1\nend = int(sys.argv[2])\nfor s in sys.argv[3:]:\n file = open(s + '.' + sys.argv[1] + '-' + sys.argv[2] + '.trimmed', 'w')\n r_list = []\n size = 0\n for r in SeqIO.parse(s, 'fastq'):\n r_list.append(r[start:end])\n size += 1\n if size == RECORD_BUFFER_SIZE:\n SeqIO.write(r_list, file, 'fastq')\n r_list = []\n size = 0\n if size > 0:\n SeqIO.write(r_list, file, 'fastq')\n file.close()\n",
"step-4": "from Bio import SeqIO\nimport sys\nRECORD_BUFFER_SIZE = 100000\nstart = int(sys.argv[1]) - 1\nend = int(sys.argv[2])\nfor s in sys.argv[3:]:\n file = open(s + '.' + sys.argv[1] + '-' + sys.argv[2] + '.trimmed', 'w')\n r_list = []\n size = 0\n for r in SeqIO.parse(s, 'fastq'):\n r_list.append(r[start:end])\n size += 1\n if size == RECORD_BUFFER_SIZE:\n SeqIO.write(r_list, file, 'fastq')\n r_list = []\n size = 0\n if size > 0:\n SeqIO.write(r_list, file, 'fastq')\n file.close()\n",
"step-5": "# Author: Kenneth Lui <hkkenneth@gmail.com>\n# Last Updated on: 01-11-2012\n## Usage: python ~/code/python/001_Fastq_Trimming.py <FIRST BASE> <LAST BASE> <FASTQ FILES....>\n## Bases are inclusive and 1-based\n\n#from Bio.SeqIO.QualityIO import FastqGeneralIterator\n#handle = open(sys.argv[2], 'w')\n#for title, seq, qual in FastqGeneralIterator(open(sys.argv[1])):\n# handle.write(\"@%s\\n%s\\n+\\n%\\n\" % (title, seq[...:...], qual[...:...]))\n#handle.close()\n\nfrom Bio import SeqIO\nimport sys\n\nRECORD_BUFFER_SIZE = 100000\n\nstart = int(sys.argv[1]) - 1\nend = int(sys.argv[2])\n\nfor s in sys.argv[3:]:\n\tfile = open(s + \".\" + sys.argv[1] + \"-\" + sys.argv[2] + \".trimmed\", 'w')\n\tr_list = []\n\tsize = 0\n\tfor r in SeqIO.parse(s, \"fastq\"):\n\t\tr_list.append(r[start:end])\n\t\tsize += 1\n\t\tif size == RECORD_BUFFER_SIZE: \n\t\t\tSeqIO.write(r_list, file, \"fastq\")\n\t\t\tr_list = []\n\t\t\tsize = 0\n\tif size > 0: \n\t\tSeqIO.write(r_list, file, \"fastq\")\n\tfile.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Reader(Thread):
<|reserved_special_token_0|>
def __del__(self):
self._frame = None
self._stream.release()
<|reserved_special_token_0|>
def read(self):
return self._frame
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Reader(Thread):
<|reserved_special_token_0|>
def __del__(self):
self._frame = None
self._stream.release()
def run(self):
while True:
ret, frame = self._stream.read()
if not ret:
self._frame = None
break
self._frame = frame
def read(self):
return self._frame
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Reader(Thread):
def __init__(self, width, height, device=0):
super().__init__(daemon=True)
self._stream = cv.VideoCapture(device)
self._stream.set(cv.CAP_PROP_FRAME_WIDTH, width)
self._stream.set(cv.CAP_PROP_FRAME_HEIGHT, height)
self._frame = None
self.start()
def __del__(self):
self._frame = None
self._stream.release()
def run(self):
while True:
ret, frame = self._stream.read()
if not ret:
self._frame = None
break
self._frame = frame
def read(self):
return self._frame
<|reserved_special_token_1|>
import cv2 as cv
from threading import Thread
class Reader(Thread):
def __init__(self, width, height, device=0):
super().__init__(daemon=True)
self._stream = cv.VideoCapture(device)
self._stream.set(cv.CAP_PROP_FRAME_WIDTH, width)
self._stream.set(cv.CAP_PROP_FRAME_HEIGHT, height)
self._frame = None
self.start()
def __del__(self):
self._frame = None
self._stream.release()
def run(self):
while True:
ret, frame = self._stream.read()
if not ret:
self._frame = None
break
self._frame = frame
def read(self):
return self._frame
|
flexible
|
{
"blob_id": "73bf31e43394c3f922b00b2cfcd5d88cc0e01094",
"index": 2339,
"step-1": "<mask token>\n\n\nclass Reader(Thread):\n <mask token>\n\n def __del__(self):\n self._frame = None\n self._stream.release()\n <mask token>\n\n def read(self):\n return self._frame\n",
"step-2": "<mask token>\n\n\nclass Reader(Thread):\n <mask token>\n\n def __del__(self):\n self._frame = None\n self._stream.release()\n\n def run(self):\n while True:\n ret, frame = self._stream.read()\n if not ret:\n self._frame = None\n break\n self._frame = frame\n\n def read(self):\n return self._frame\n",
"step-3": "<mask token>\n\n\nclass Reader(Thread):\n\n def __init__(self, width, height, device=0):\n super().__init__(daemon=True)\n self._stream = cv.VideoCapture(device)\n self._stream.set(cv.CAP_PROP_FRAME_WIDTH, width)\n self._stream.set(cv.CAP_PROP_FRAME_HEIGHT, height)\n self._frame = None\n self.start()\n\n def __del__(self):\n self._frame = None\n self._stream.release()\n\n def run(self):\n while True:\n ret, frame = self._stream.read()\n if not ret:\n self._frame = None\n break\n self._frame = frame\n\n def read(self):\n return self._frame\n",
"step-4": "import cv2 as cv\nfrom threading import Thread\n\n\nclass Reader(Thread):\n\n def __init__(self, width, height, device=0):\n super().__init__(daemon=True)\n self._stream = cv.VideoCapture(device)\n self._stream.set(cv.CAP_PROP_FRAME_WIDTH, width)\n self._stream.set(cv.CAP_PROP_FRAME_HEIGHT, height)\n self._frame = None\n self.start()\n\n def __del__(self):\n self._frame = None\n self._stream.release()\n\n def run(self):\n while True:\n ret, frame = self._stream.read()\n if not ret:\n self._frame = None\n break\n self._frame = frame\n\n def read(self):\n return self._frame\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
import torch as th
from tpp.processes.hawkes.r_terms_recursive_v import get_r_terms
from tpp.utils.test import get_test_events_query
def run_test():
marks = 3
events, query = get_test_events_query(marks=marks)
beta = th.rand([marks, marks])
get_r_terms(events=events, beta=beta)
if __name__ == '__main__':
run_test()
|
normal
|
{
"blob_id": "2681bd9fe93a4d61214b7c45e5d73097ab73dc07",
"index": 5486,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_test():\n marks = 3\n events, query = get_test_events_query(marks=marks)\n beta = th.rand([marks, marks])\n get_r_terms(events=events, beta=beta)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run_test():\n marks = 3\n events, query = get_test_events_query(marks=marks)\n beta = th.rand([marks, marks])\n get_r_terms(events=events, beta=beta)\n\n\nif __name__ == '__main__':\n run_test()\n",
"step-4": "import torch as th\nfrom tpp.processes.hawkes.r_terms_recursive_v import get_r_terms\nfrom tpp.utils.test import get_test_events_query\n\n\ndef run_test():\n marks = 3\n events, query = get_test_events_query(marks=marks)\n beta = th.rand([marks, marks])\n get_r_terms(events=events, beta=beta)\n\n\nif __name__ == '__main__':\n run_test()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import unittest2 as unittest
from zope.component import getUtility
from plone.registry.interfaces import IRegistry
from plone.testing.z2 import Browser
from plone.app.testing import SITE_OWNER_NAME, SITE_OWNER_PASSWORD
from openmultimedia.imagewatchdog.configlet import IImageWatchDogSettings
from openmultimedia.imagewatchdog.testing import \
OPENMULTIMEDIA_IMAGEWATCHDOG_FUNCTIONAL_TESTING
class TestConfiglet(unittest.TestCase):
layer = OPENMULTIMEDIA_IMAGEWATCHDOG_FUNCTIONAL_TESTING
def setUp(self):
self.app = self.layer['app']
self.portal = self.layer['portal']
def test_default_config(self):
""" Validate the default values
"""
registry = getUtility(IRegistry)
settings = registry.forInterface(IImageWatchDogSettings)
self.assertEqual(settings.source_formats, ['JPEG', 'GIF'])
self.assertFalse(settings.optimize)
self.assertFalse(settings.enabled)
def test_change_config(self):
""" Validate the default values
"""
browser = Browser(self.app)
portalURL = self.portal.absolute_url()
browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))
browser.open(portalURL + '/@@overview-controlpanel')
browser.getLink('Image WatchDog settings').click()
browser.getControl('Optimize PNG').selected = True
browser.getControl('Enabled').selected = True
browser.getControl('Save').click()
registry = getUtility(IRegistry)
settings = registry.forInterface(IImageWatchDogSettings)
self.assertTrue(settings.optimize)
self.assertTrue(settings.enabled)
def test_cancel_config(self):
""" Validate the default values
"""
browser = Browser(self.app)
portalURL = self.portal.absolute_url()
browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))
browser.open(portalURL + '/@@overview-controlpanel')
browser.getLink('Image WatchDog settings').click()
browser.getControl('Optimize PNG').selected = True
browser.getControl('Enabled').selected = True
browser.getControl('Cancel').click()
registry = getUtility(IRegistry)
settings = registry.forInterface(IImageWatchDogSettings)
self.assertFalse(settings.optimize)
self.assertFalse(settings.enabled)
def test_migrate_button(self):
""" Check for the migrate button
"""
browser = Browser(self.app)
portalURL = self.portal.absolute_url()
browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))
browser.open(portalURL + '/@@overview-controlpanel')
browser.getLink('Image WatchDog settings').click()
browser.getControl('Enabled').selected = True
browser.getControl('Save').click()
# Now there is a migrate button
browser.open(portalURL + '/@@overview-controlpanel')
browser.getLink('Image WatchDog settings').click()
browser.getControl('Optimize PNG').selected = True
browser.getControl('Migrate').click()
registry = getUtility(IRegistry)
settings = registry.forInterface(IImageWatchDogSettings)
self.assertTrue(settings.optimize)
self.assertTrue(settings.enabled)
|
normal
|
{
"blob_id": "ce5f91aa04065aac4d4bc7bdbaab3b74c5a85a93",
"index": 8752,
"step-1": "<mask token>\n\n\nclass TestConfiglet(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_default_config(self):\n \"\"\" Validate the default values\n \"\"\"\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertEqual(settings.source_formats, ['JPEG', 'GIF'])\n self.assertFalse(settings.optimize)\n self.assertFalse(settings.enabled)\n\n def test_change_config(self):\n \"\"\" Validate the default values\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME,\n SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)\n\n def test_cancel_config(self):\n \"\"\" Validate the default values\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME,\n SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Cancel').click()\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertFalse(settings.optimize)\n self.assertFalse(settings.enabled)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestConfiglet(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.app = self.layer['app']\n self.portal = self.layer['portal']\n\n def test_default_config(self):\n \"\"\" Validate the default values\n \"\"\"\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertEqual(settings.source_formats, ['JPEG', 'GIF'])\n self.assertFalse(settings.optimize)\n self.assertFalse(settings.enabled)\n\n def test_change_config(self):\n \"\"\" Validate the default values\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME,\n SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)\n\n def test_cancel_config(self):\n \"\"\" Validate the default values\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME,\n SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Cancel').click()\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertFalse(settings.optimize)\n self.assertFalse(settings.enabled)\n\n def test_migrate_button(self):\n \"\"\" Check for the migrate button\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME,\n SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Migrate').click()\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)\n",
"step-3": "<mask token>\n\n\nclass TestConfiglet(unittest.TestCase):\n layer = OPENMULTIMEDIA_IMAGEWATCHDOG_FUNCTIONAL_TESTING\n\n def setUp(self):\n self.app = self.layer['app']\n self.portal = self.layer['portal']\n\n def test_default_config(self):\n \"\"\" Validate the default values\n \"\"\"\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertEqual(settings.source_formats, ['JPEG', 'GIF'])\n self.assertFalse(settings.optimize)\n self.assertFalse(settings.enabled)\n\n def test_change_config(self):\n \"\"\" Validate the default values\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME,\n SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)\n\n def test_cancel_config(self):\n \"\"\" Validate the default values\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME,\n SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Cancel').click()\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertFalse(settings.optimize)\n self.assertFalse(settings.enabled)\n\n def test_migrate_button(self):\n \"\"\" Check for the migrate button\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME,\n SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Migrate').click()\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)\n",
"step-4": "import unittest2 as unittest\nfrom zope.component import getUtility\nfrom plone.registry.interfaces import IRegistry\nfrom plone.testing.z2 import Browser\nfrom plone.app.testing import SITE_OWNER_NAME, SITE_OWNER_PASSWORD\nfrom openmultimedia.imagewatchdog.configlet import IImageWatchDogSettings\nfrom openmultimedia.imagewatchdog.testing import OPENMULTIMEDIA_IMAGEWATCHDOG_FUNCTIONAL_TESTING\n\n\nclass TestConfiglet(unittest.TestCase):\n layer = OPENMULTIMEDIA_IMAGEWATCHDOG_FUNCTIONAL_TESTING\n\n def setUp(self):\n self.app = self.layer['app']\n self.portal = self.layer['portal']\n\n def test_default_config(self):\n \"\"\" Validate the default values\n \"\"\"\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertEqual(settings.source_formats, ['JPEG', 'GIF'])\n self.assertFalse(settings.optimize)\n self.assertFalse(settings.enabled)\n\n def test_change_config(self):\n \"\"\" Validate the default values\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME,\n SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)\n\n def test_cancel_config(self):\n \"\"\" Validate the default values\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME,\n SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Cancel').click()\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertFalse(settings.optimize)\n self.assertFalse(settings.enabled)\n\n def test_migrate_button(self):\n \"\"\" Check for the migrate button\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME,\n SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Migrate').click()\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)\n",
"step-5": "import unittest2 as unittest\n\nfrom zope.component import getUtility\nfrom plone.registry.interfaces import IRegistry\nfrom plone.testing.z2 import Browser\nfrom plone.app.testing import SITE_OWNER_NAME, SITE_OWNER_PASSWORD\n\nfrom openmultimedia.imagewatchdog.configlet import IImageWatchDogSettings\nfrom openmultimedia.imagewatchdog.testing import \\\n OPENMULTIMEDIA_IMAGEWATCHDOG_FUNCTIONAL_TESTING\n\n\nclass TestConfiglet(unittest.TestCase):\n\n layer = OPENMULTIMEDIA_IMAGEWATCHDOG_FUNCTIONAL_TESTING\n\n def setUp(self):\n self.app = self.layer['app']\n self.portal = self.layer['portal']\n\n def test_default_config(self):\n \"\"\" Validate the default values\n \"\"\"\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertEqual(settings.source_formats, ['JPEG', 'GIF'])\n self.assertFalse(settings.optimize)\n self.assertFalse(settings.enabled)\n\n def test_change_config(self):\n \"\"\" Validate the default values\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)\n\n def test_cancel_config(self):\n \"\"\" Validate the default values\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Cancel').click()\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertFalse(settings.optimize)\n self.assertFalse(settings.enabled)\n\n def test_migrate_button(self):\n \"\"\" Check for the migrate button\n \"\"\"\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n\n # Now there is a migrate button\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Migrate').click()\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
# -*- coding: utf-8 -*-
# __author__ = 'XingHuan'
# 3/27/2018
import os
import imageio
import time
os.environ['IMAGEIO_FFMPEG_EXE'] = 'D:/Program Files/ffmpeg-3.4/bin/ffmpeg.exe'
reader = imageio.get_reader('test1080.mov')
print reader
fps = reader.get_meta_data()['fps']
print fps
# for i, im in enumerate(reader):
# print i
nums = [10, 200]
for num in nums:
a = time.time()
image = reader.get_data(num)
b = time.time()
print b - a
# print image
|
normal
|
{
"blob_id": "1e292872c0c3c7f4ec0115f0769f9145ef595ead",
"index": 8325,
"step-1": "# -*- coding: utf-8 -*-\n# __author__ = 'XingHuan'\n# 3/27/2018\n\nimport os\nimport imageio\nimport time\n\nos.environ['IMAGEIO_FFMPEG_EXE'] = 'D:/Program Files/ffmpeg-3.4/bin/ffmpeg.exe'\n\n\nreader = imageio.get_reader('test1080.mov')\nprint reader\nfps = reader.get_meta_data()['fps']\nprint fps\n\n\n# for i, im in enumerate(reader):\n# print i\n\nnums = [10, 200]\nfor num in nums:\n a = time.time()\n image = reader.get_data(num)\n b = time.time()\n print b - a\n # print image",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# 自定义购物车项类
class CartItem():
def __init__(self, book, amount):
self.book = book
self.amount = int(amount)
# 自定义购物车
class Cart():
def __init__(self):
self.book_list = []
self.total = 0
self.save = 0
def total_price(self):
ele = 0
for i in self.book_list:
ele += i.book.book_dprice*i.amount
self.total = round(ele,2)
return self
def save_money(self):
befor_save = 0
for i in self.book_list:
befor_save += i.book.book_price*i.amount
self.save = round(befor_save - self.total,2)
print("节省",self.save)
return self
# 定义添加购物车
def add_books(self, book, amount):
# 判断图书已经在购物车项列表中
print("加入中")
for i in self.book_list:
if i.book == book:
i.amount += int(amount)
return self
self.book_list.append(CartItem(book, int(amount)))
print("加完了",self.book_list)
return self
def del_books(self, book):
print("删除中")
for i in self.book_list:
if i.book == book:
self.book_list.remove(i)
print("删完了", self.book_list)
return self
|
normal
|
{
"blob_id": "58efaad41d02bb5dffbf71c478c7fad12af68e5b",
"index": 9900,
"step-1": "<mask token>\n\n\nclass Cart:\n\n def __init__(self):\n self.book_list = []\n self.total = 0\n self.save = 0\n\n def total_price(self):\n ele = 0\n for i in self.book_list:\n ele += i.book.book_dprice * i.amount\n self.total = round(ele, 2)\n return self\n <mask token>\n\n def add_books(self, book, amount):\n print('加入中')\n for i in self.book_list:\n if i.book == book:\n i.amount += int(amount)\n return self\n self.book_list.append(CartItem(book, int(amount)))\n print('加完了', self.book_list)\n return self\n\n def del_books(self, book):\n print('删除中')\n for i in self.book_list:\n if i.book == book:\n self.book_list.remove(i)\n print('删完了', self.book_list)\n return self\n",
"step-2": "<mask token>\n\n\nclass Cart:\n\n def __init__(self):\n self.book_list = []\n self.total = 0\n self.save = 0\n\n def total_price(self):\n ele = 0\n for i in self.book_list:\n ele += i.book.book_dprice * i.amount\n self.total = round(ele, 2)\n return self\n\n def save_money(self):\n befor_save = 0\n for i in self.book_list:\n befor_save += i.book.book_price * i.amount\n self.save = round(befor_save - self.total, 2)\n print('节省', self.save)\n return self\n\n def add_books(self, book, amount):\n print('加入中')\n for i in self.book_list:\n if i.book == book:\n i.amount += int(amount)\n return self\n self.book_list.append(CartItem(book, int(amount)))\n print('加完了', self.book_list)\n return self\n\n def del_books(self, book):\n print('删除中')\n for i in self.book_list:\n if i.book == book:\n self.book_list.remove(i)\n print('删完了', self.book_list)\n return self\n",
"step-3": "class CartItem:\n <mask token>\n\n\nclass Cart:\n\n def __init__(self):\n self.book_list = []\n self.total = 0\n self.save = 0\n\n def total_price(self):\n ele = 0\n for i in self.book_list:\n ele += i.book.book_dprice * i.amount\n self.total = round(ele, 2)\n return self\n\n def save_money(self):\n befor_save = 0\n for i in self.book_list:\n befor_save += i.book.book_price * i.amount\n self.save = round(befor_save - self.total, 2)\n print('节省', self.save)\n return self\n\n def add_books(self, book, amount):\n print('加入中')\n for i in self.book_list:\n if i.book == book:\n i.amount += int(amount)\n return self\n self.book_list.append(CartItem(book, int(amount)))\n print('加完了', self.book_list)\n return self\n\n def del_books(self, book):\n print('删除中')\n for i in self.book_list:\n if i.book == book:\n self.book_list.remove(i)\n print('删完了', self.book_list)\n return self\n",
"step-4": "class CartItem:\n\n def __init__(self, book, amount):\n self.book = book\n self.amount = int(amount)\n\n\nclass Cart:\n\n def __init__(self):\n self.book_list = []\n self.total = 0\n self.save = 0\n\n def total_price(self):\n ele = 0\n for i in self.book_list:\n ele += i.book.book_dprice * i.amount\n self.total = round(ele, 2)\n return self\n\n def save_money(self):\n befor_save = 0\n for i in self.book_list:\n befor_save += i.book.book_price * i.amount\n self.save = round(befor_save - self.total, 2)\n print('节省', self.save)\n return self\n\n def add_books(self, book, amount):\n print('加入中')\n for i in self.book_list:\n if i.book == book:\n i.amount += int(amount)\n return self\n self.book_list.append(CartItem(book, int(amount)))\n print('加完了', self.book_list)\n return self\n\n def del_books(self, book):\n print('删除中')\n for i in self.book_list:\n if i.book == book:\n self.book_list.remove(i)\n print('删完了', self.book_list)\n return self\n",
"step-5": "# 自定义购物车项类\nclass CartItem():\n def __init__(self, book, amount):\n self.book = book\n self.amount = int(amount)\n\n# 自定义购物车\nclass Cart():\n def __init__(self):\n self.book_list = []\n self.total = 0\n self.save = 0\n\n def total_price(self):\n ele = 0\n for i in self.book_list:\n ele += i.book.book_dprice*i.amount\n self.total = round(ele,2)\n return self\n\n def save_money(self):\n befor_save = 0\n for i in self.book_list:\n befor_save += i.book.book_price*i.amount\n self.save = round(befor_save - self.total,2)\n print(\"节省\",self.save)\n return self\n # 定义添加购物车\n def add_books(self, book, amount):\n # 判断图书已经在购物车项列表中\n print(\"加入中\")\n for i in self.book_list:\n if i.book == book:\n i.amount += int(amount)\n return self\n self.book_list.append(CartItem(book, int(amount)))\n print(\"加完了\",self.book_list)\n return self\n\n def del_books(self, book):\n print(\"删除中\")\n for i in self.book_list:\n if i.book == book:\n self.book_list.remove(i)\n print(\"删完了\", self.book_list)\n return self",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
"""APP Cloud Connect errors"""
class CCEError(Exception):
pass
class ConfigException(CCEError):
"""Config exception"""
pass
class FuncException(CCEError):
"""Ext function call exception"""
pass
class HTTPError(CCEError):
""" HTTPError raised when HTTP request returned a error."""
def __init__(self, reason=None):
"""
Initialize HTTPError with `response` object and `status`.
"""
self.reason = reason
super(HTTPError, self).__init__(reason)
class StopCCEIteration(CCEError):
"""Exception to exit from the engine iteration."""
pass
class CCESplitError(CCEError):
"""Exception to exit the job in Split Task"""
pass
class QuitJobError(CCEError):
pass
|
normal
|
{
"blob_id": "e2840eb1b0d731d6b0356835ba371d05ba351ff6",
"index": 5323,
"step-1": "<mask token>\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-2": "<mask token>\n\n\nclass FuncException(CCEError):\n <mask token>\n pass\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-3": "<mask token>\n\n\nclass FuncException(CCEError):\n \"\"\"Ext function call exception\"\"\"\n pass\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-4": "<mask token>\n\n\nclass ConfigException(CCEError):\n \"\"\"Config exception\"\"\"\n pass\n\n\nclass FuncException(CCEError):\n \"\"\"Ext function call exception\"\"\"\n pass\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-5": "\"\"\"APP Cloud Connect errors\"\"\"\n\n\nclass CCEError(Exception):\n pass\n\n\nclass ConfigException(CCEError):\n \"\"\"Config exception\"\"\"\n pass\n\n\nclass FuncException(CCEError):\n \"\"\"Ext function call exception\"\"\"\n pass\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-ids": [
8,
9,
10,
12,
14
]
}
|
[
8,
9,
10,
12,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while i >= 100:
print(i)
i -= 1
print(i)
<|reserved_special_token_1|>
i = 100
while i >= 100:
print(i)
i -= 1
print(i)
|
flexible
|
{
"blob_id": "9527743802a0bb680ab3dcf325c0f7749a51afc6",
"index": 5949,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile i >= 100:\n print(i)\ni -= 1\nprint(i)\n",
"step-3": "i = 100\nwhile i >= 100:\n print(i)\ni -= 1\nprint(i)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class RegexCompiles:
re_compile_product_id = re.compile('Product-Id=[0-9]*')
re_compile_id = re.compile('[0-9]+')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RegexCompiles:
re_compile_product_id = re.compile('Product-Id=[0-9]*')
re_compile_id = re.compile('[0-9]+')
<|reserved_special_token_0|>
def verify_card_title(title, given_title) ->bool:
title = title.lower()
given_title = given_title.lower()
for token in given_title.strip().split():
if find_whole_word(title, token) is None:
return False
return True
def get_product_id(link_to_product) ->int:
s_matched = RegexCompiles.re_compile_product_id.search(link_to_product
).group()
id_matched = RegexCompiles.re_compile_id.search(s_matched).group()
return int(id_matched)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RegexCompiles:
re_compile_product_id = re.compile('Product-Id=[0-9]*')
re_compile_id = re.compile('[0-9]+')
def find_whole_word(text, word) ->bool:
return re.compile('\\b({0})\\b'.format(word), flags=re.IGNORECASE).search(
text)
def verify_card_title(title, given_title) ->bool:
title = title.lower()
given_title = given_title.lower()
for token in given_title.strip().split():
if find_whole_word(title, token) is None:
return False
return True
def get_product_id(link_to_product) ->int:
s_matched = RegexCompiles.re_compile_product_id.search(link_to_product
).group()
id_matched = RegexCompiles.re_compile_id.search(s_matched).group()
return int(id_matched)
<|reserved_special_token_1|>
import re
class RegexCompiles:
re_compile_product_id = re.compile('Product-Id=[0-9]*')
re_compile_id = re.compile('[0-9]+')
def find_whole_word(text, word) ->bool:
return re.compile('\\b({0})\\b'.format(word), flags=re.IGNORECASE).search(
text)
def verify_card_title(title, given_title) ->bool:
title = title.lower()
given_title = given_title.lower()
for token in given_title.strip().split():
if find_whole_word(title, token) is None:
return False
return True
def get_product_id(link_to_product) ->int:
s_matched = RegexCompiles.re_compile_product_id.search(link_to_product
).group()
id_matched = RegexCompiles.re_compile_id.search(s_matched).group()
return int(id_matched)
<|reserved_special_token_1|>
import re
# Class with static regex compilations
class RegexCompiles:
# regex for finding product-id in an EMAG link
re_compile_product_id = re.compile('Product-Id=[0-9]*')
# regex for finding the first number
re_compile_id = re.compile('[0-9]+')
# Verifies if a word exists in a text
def find_whole_word(text, word) -> bool:
return re.compile(r'\b({0})\b'.format(word), flags=re.IGNORECASE).search(text)
# Verifies if all the words in a given title (given_title) exist in another title (title)
def verify_card_title(title, given_title) -> bool:
title = title.lower()
given_title = given_title.lower()
for token in given_title.strip().split():
if find_whole_word(title, token) is None:
return False
return True
# Returns the product id from an emag link
def get_product_id(link_to_product) -> int:
s_matched = RegexCompiles.re_compile_product_id.search(link_to_product).group()
id_matched = RegexCompiles.re_compile_id.search(s_matched).group()
return int(id_matched)
|
flexible
|
{
"blob_id": "b1c06e9c5516a378c0bbce2ce9e17afaeae01928",
"index": 668,
"step-1": "<mask token>\n\n\nclass RegexCompiles:\n re_compile_product_id = re.compile('Product-Id=[0-9]*')\n re_compile_id = re.compile('[0-9]+')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RegexCompiles:\n re_compile_product_id = re.compile('Product-Id=[0-9]*')\n re_compile_id = re.compile('[0-9]+')\n\n\n<mask token>\n\n\ndef verify_card_title(title, given_title) ->bool:\n title = title.lower()\n given_title = given_title.lower()\n for token in given_title.strip().split():\n if find_whole_word(title, token) is None:\n return False\n return True\n\n\ndef get_product_id(link_to_product) ->int:\n s_matched = RegexCompiles.re_compile_product_id.search(link_to_product\n ).group()\n id_matched = RegexCompiles.re_compile_id.search(s_matched).group()\n return int(id_matched)\n",
"step-3": "<mask token>\n\n\nclass RegexCompiles:\n re_compile_product_id = re.compile('Product-Id=[0-9]*')\n re_compile_id = re.compile('[0-9]+')\n\n\ndef find_whole_word(text, word) ->bool:\n return re.compile('\\\\b({0})\\\\b'.format(word), flags=re.IGNORECASE).search(\n text)\n\n\ndef verify_card_title(title, given_title) ->bool:\n title = title.lower()\n given_title = given_title.lower()\n for token in given_title.strip().split():\n if find_whole_word(title, token) is None:\n return False\n return True\n\n\ndef get_product_id(link_to_product) ->int:\n s_matched = RegexCompiles.re_compile_product_id.search(link_to_product\n ).group()\n id_matched = RegexCompiles.re_compile_id.search(s_matched).group()\n return int(id_matched)\n",
"step-4": "import re\n\n\nclass RegexCompiles:\n re_compile_product_id = re.compile('Product-Id=[0-9]*')\n re_compile_id = re.compile('[0-9]+')\n\n\ndef find_whole_word(text, word) ->bool:\n return re.compile('\\\\b({0})\\\\b'.format(word), flags=re.IGNORECASE).search(\n text)\n\n\ndef verify_card_title(title, given_title) ->bool:\n title = title.lower()\n given_title = given_title.lower()\n for token in given_title.strip().split():\n if find_whole_word(title, token) is None:\n return False\n return True\n\n\ndef get_product_id(link_to_product) ->int:\n s_matched = RegexCompiles.re_compile_product_id.search(link_to_product\n ).group()\n id_matched = RegexCompiles.re_compile_id.search(s_matched).group()\n return int(id_matched)\n",
"step-5": "import re\n\n\n# Class with static regex compilations\nclass RegexCompiles:\n # regex for finding product-id in an EMAG link\n re_compile_product_id = re.compile('Product-Id=[0-9]*')\n # regex for finding the first number\n re_compile_id = re.compile('[0-9]+')\n\n\n# Verifies if a word exists in a text\ndef find_whole_word(text, word) -> bool:\n return re.compile(r'\\b({0})\\b'.format(word), flags=re.IGNORECASE).search(text)\n\n\n# Verifies if all the words in a given title (given_title) exist in another title (title)\ndef verify_card_title(title, given_title) -> bool:\n title = title.lower()\n given_title = given_title.lower()\n for token in given_title.strip().split():\n if find_whole_word(title, token) is None:\n return False\n return True\n\n\n# Returns the product id from an emag link\ndef get_product_id(link_to_product) -> int:\n s_matched = RegexCompiles.re_compile_product_id.search(link_to_product).group()\n id_matched = RegexCompiles.re_compile_id.search(s_matched).group()\n return int(id_matched)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
@require_superuser
def index(request):
template_name = 'users/index.html'
msg = ''
try:
users = User.objects.exclude(id=request.user.id)
except:
msg = _('Unable to list users.')
LOG.error(msg)
users = []
paginator = Paginator(users, PAGE_SIZE)
page = request.GET.get('page')
try:
users = paginator.page(page)
except PageNotAnInteger:
users = paginator.page(1)
except EmptyPage:
users = paginator.page(paginator.num_pages)
return render(request, template_name, {'users': users, 'message': msg})
@require_superuser
def create(request):
template_name = 'users/create_user.html'
msg = ''
user_form = UserCreateForm()
if request.method == 'POST':
user_form = UserCreateForm(request.POST)
if user_form.is_valid():
try:
new_user = User.objects.create_user(request.POST['username'
], request.POST['email'], request.POST['password'])
new_user.save()
msg = _('Success create user "%s"') % user_form.cleaned_data[
'username'].encode('utf-8')
LOG.info(msg)
except IntegrityError:
msg = _('User already exist, please try another username.')
LOG.error(msg)
except:
msg = _('Unable to create user "%s"') % user_form.cleaned_data[
'username'].encode('utf-8')
LOG.error(msg)
return render(request, template_name, {'user_form': user_form,
'message': msg})
@require_superuser
def delete(request, user_id):
try:
User.objects.get(id=user_id).delete()
except Exception:
msg = _('Unable to delete user(%s)') % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def deactivate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = False
user.save()
except:
msg = _('Unable to deactivate user(%s)') % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def activate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = True
user.save()
except:
msg = _('Unable to activate user(%s)') % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser_or_self
def edit(request, user_id):
template_name = 'users/update_user.html'
msg = ''
user = User.objects.get(id=user_id)
user_form = UserEditForm(initial={'username': user.username, 'email':
user.email})
if request.method == 'POST':
user_form = UserEditForm(request.POST)
if user_form.is_valid():
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
if username:
user.username = username
if email:
user.email = email
if password:
user.set_password(password)
user.save()
msg = _('Success updated user "%s"') % username.encode('utf-8')
LOG.info(msg)
return render(request, template_name, {'user_id': user_id, 'user_form':
user_form, 'message': msg})
def login(request):
template_name = 'auth/login.html'
msg = ''
if request.user.is_authenticated():
return redirect(reverse('keys:index'))
form = LoginForm
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user:
if user.is_active:
django_login(request, user)
msg = _('%s logged in successfully.') % username.encode(
'utf-8')
LOG.info(msg)
return redirect(reverse('keys:index'))
msg = _('Invalid username or password.')
LOG.error(msg)
return render(request, template_name, {'user_form': form, 'message': msg})
def logout(request):
django_logout(request)
return redirect(reverse('index'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def require_superuser_or_self(func):
def check(request, user_id):
if request.user.is_superuser or user_id.encode('utf-8') == str(request
.user.id):
return func(request, user_id)
return render(request, '403.html')
return check
@require_superuser
def index(request):
template_name = 'users/index.html'
msg = ''
try:
users = User.objects.exclude(id=request.user.id)
except:
msg = _('Unable to list users.')
LOG.error(msg)
users = []
paginator = Paginator(users, PAGE_SIZE)
page = request.GET.get('page')
try:
users = paginator.page(page)
except PageNotAnInteger:
users = paginator.page(1)
except EmptyPage:
users = paginator.page(paginator.num_pages)
return render(request, template_name, {'users': users, 'message': msg})
@require_superuser
def create(request):
template_name = 'users/create_user.html'
msg = ''
user_form = UserCreateForm()
if request.method == 'POST':
user_form = UserCreateForm(request.POST)
if user_form.is_valid():
try:
new_user = User.objects.create_user(request.POST['username'
], request.POST['email'], request.POST['password'])
new_user.save()
msg = _('Success create user "%s"') % user_form.cleaned_data[
'username'].encode('utf-8')
LOG.info(msg)
except IntegrityError:
msg = _('User already exist, please try another username.')
LOG.error(msg)
except:
msg = _('Unable to create user "%s"') % user_form.cleaned_data[
'username'].encode('utf-8')
LOG.error(msg)
return render(request, template_name, {'user_form': user_form,
'message': msg})
@require_superuser
def delete(request, user_id):
try:
User.objects.get(id=user_id).delete()
except Exception:
msg = _('Unable to delete user(%s)') % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def deactivate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = False
user.save()
except:
msg = _('Unable to deactivate user(%s)') % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def activate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = True
user.save()
except:
msg = _('Unable to activate user(%s)') % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser_or_self
def edit(request, user_id):
template_name = 'users/update_user.html'
msg = ''
user = User.objects.get(id=user_id)
user_form = UserEditForm(initial={'username': user.username, 'email':
user.email})
if request.method == 'POST':
user_form = UserEditForm(request.POST)
if user_form.is_valid():
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
if username:
user.username = username
if email:
user.email = email
if password:
user.set_password(password)
user.save()
msg = _('Success updated user "%s"') % username.encode('utf-8')
LOG.info(msg)
return render(request, template_name, {'user_id': user_id, 'user_form':
user_form, 'message': msg})
def login(request):
template_name = 'auth/login.html'
msg = ''
if request.user.is_authenticated():
return redirect(reverse('keys:index'))
form = LoginForm
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user:
if user.is_active:
django_login(request, user)
msg = _('%s logged in successfully.') % username.encode(
'utf-8')
LOG.info(msg)
return redirect(reverse('keys:index'))
msg = _('Invalid username or password.')
LOG.error(msg)
return render(request, template_name, {'user_form': form, 'message': msg})
def logout(request):
django_logout(request)
return redirect(reverse('index'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
LOG = logging.getLogger(__name__)
def require_superuser_or_self(func):
def check(request, user_id):
if request.user.is_superuser or user_id.encode('utf-8') == str(request
.user.id):
return func(request, user_id)
return render(request, '403.html')
return check
@require_superuser
def index(request):
template_name = 'users/index.html'
msg = ''
try:
users = User.objects.exclude(id=request.user.id)
except:
msg = _('Unable to list users.')
LOG.error(msg)
users = []
paginator = Paginator(users, PAGE_SIZE)
page = request.GET.get('page')
try:
users = paginator.page(page)
except PageNotAnInteger:
users = paginator.page(1)
except EmptyPage:
users = paginator.page(paginator.num_pages)
return render(request, template_name, {'users': users, 'message': msg})
@require_superuser
def create(request):
template_name = 'users/create_user.html'
msg = ''
user_form = UserCreateForm()
if request.method == 'POST':
user_form = UserCreateForm(request.POST)
if user_form.is_valid():
try:
new_user = User.objects.create_user(request.POST['username'
], request.POST['email'], request.POST['password'])
new_user.save()
msg = _('Success create user "%s"') % user_form.cleaned_data[
'username'].encode('utf-8')
LOG.info(msg)
except IntegrityError:
msg = _('User already exist, please try another username.')
LOG.error(msg)
except:
msg = _('Unable to create user "%s"') % user_form.cleaned_data[
'username'].encode('utf-8')
LOG.error(msg)
return render(request, template_name, {'user_form': user_form,
'message': msg})
@require_superuser
def delete(request, user_id):
try:
User.objects.get(id=user_id).delete()
except Exception:
msg = _('Unable to delete user(%s)') % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def deactivate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = False
user.save()
except:
msg = _('Unable to deactivate user(%s)') % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def activate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = True
user.save()
except:
msg = _('Unable to activate user(%s)') % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser_or_self
def edit(request, user_id):
template_name = 'users/update_user.html'
msg = ''
user = User.objects.get(id=user_id)
user_form = UserEditForm(initial={'username': user.username, 'email':
user.email})
if request.method == 'POST':
user_form = UserEditForm(request.POST)
if user_form.is_valid():
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
if username:
user.username = username
if email:
user.email = email
if password:
user.set_password(password)
user.save()
msg = _('Success updated user "%s"') % username.encode('utf-8')
LOG.info(msg)
return render(request, template_name, {'user_id': user_id, 'user_form':
user_form, 'message': msg})
def login(request):
template_name = 'auth/login.html'
msg = ''
if request.user.is_authenticated():
return redirect(reverse('keys:index'))
form = LoginForm
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user:
if user.is_active:
django_login(request, user)
msg = _('%s logged in successfully.') % username.encode(
'utf-8')
LOG.info(msg)
return redirect(reverse('keys:index'))
msg = _('Invalid username or password.')
LOG.error(msg)
return render(request, template_name, {'user_form': form, 'message': msg})
def logout(request):
django_logout(request)
return redirect(reverse('index'))
<|reserved_special_token_1|>
import logging
from django.contrib.auth import authenticate, login as django_login, logout as django_logout
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.utils import IntegrityError
from django.shortcuts import redirect, render
from django.utils.translation import gettext_lazy as _
from keymanager.settings import PAGE_SIZE
from .forms import LoginForm
from .forms import UserCreateForm, UserEditForm
from utils.filters import require_superuser
LOG = logging.getLogger(__name__)
def require_superuser_or_self(func):
def check(request, user_id):
if request.user.is_superuser or user_id.encode('utf-8') == str(request
.user.id):
return func(request, user_id)
return render(request, '403.html')
return check
@require_superuser
def index(request):
template_name = 'users/index.html'
msg = ''
try:
users = User.objects.exclude(id=request.user.id)
except:
msg = _('Unable to list users.')
LOG.error(msg)
users = []
paginator = Paginator(users, PAGE_SIZE)
page = request.GET.get('page')
try:
users = paginator.page(page)
except PageNotAnInteger:
users = paginator.page(1)
except EmptyPage:
users = paginator.page(paginator.num_pages)
return render(request, template_name, {'users': users, 'message': msg})
@require_superuser
def create(request):
template_name = 'users/create_user.html'
msg = ''
user_form = UserCreateForm()
if request.method == 'POST':
user_form = UserCreateForm(request.POST)
if user_form.is_valid():
try:
new_user = User.objects.create_user(request.POST['username'
], request.POST['email'], request.POST['password'])
new_user.save()
msg = _('Success create user "%s"') % user_form.cleaned_data[
'username'].encode('utf-8')
LOG.info(msg)
except IntegrityError:
msg = _('User already exist, please try another username.')
LOG.error(msg)
except:
msg = _('Unable to create user "%s"') % user_form.cleaned_data[
'username'].encode('utf-8')
LOG.error(msg)
return render(request, template_name, {'user_form': user_form,
'message': msg})
@require_superuser
def delete(request, user_id):
try:
User.objects.get(id=user_id).delete()
except Exception:
msg = _('Unable to delete user(%s)') % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def deactivate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = False
user.save()
except:
msg = _('Unable to deactivate user(%s)') % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def activate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = True
user.save()
except:
msg = _('Unable to activate user(%s)') % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser_or_self
def edit(request, user_id):
template_name = 'users/update_user.html'
msg = ''
user = User.objects.get(id=user_id)
user_form = UserEditForm(initial={'username': user.username, 'email':
user.email})
if request.method == 'POST':
user_form = UserEditForm(request.POST)
if user_form.is_valid():
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
if username:
user.username = username
if email:
user.email = email
if password:
user.set_password(password)
user.save()
msg = _('Success updated user "%s"') % username.encode('utf-8')
LOG.info(msg)
return render(request, template_name, {'user_id': user_id, 'user_form':
user_form, 'message': msg})
def login(request):
template_name = 'auth/login.html'
msg = ''
if request.user.is_authenticated():
return redirect(reverse('keys:index'))
form = LoginForm
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user:
if user.is_active:
django_login(request, user)
msg = _('%s logged in successfully.') % username.encode(
'utf-8')
LOG.info(msg)
return redirect(reverse('keys:index'))
msg = _('Invalid username or password.')
LOG.error(msg)
return render(request, template_name, {'user_form': form, 'message': msg})
def logout(request):
django_logout(request)
return redirect(reverse('index'))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import authenticate, login as django_login, logout as django_logout
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.utils import IntegrityError
from django.shortcuts import redirect, render
from django.utils.translation import gettext_lazy as _
from keymanager.settings import PAGE_SIZE
from .forms import LoginForm
from .forms import UserCreateForm, UserEditForm
from utils.filters import require_superuser
LOG = logging.getLogger(__name__)
def require_superuser_or_self(func):
def check(request, user_id):
if request.user.is_superuser or \
user_id.encode("utf-8") == str(request.user.id):
return func(request, user_id)
return render(request, "403.html")
return check
@require_superuser
def index(request):
template_name = "users/index.html"
msg = ""
try:
users = User.objects.exclude(id=request.user.id)
except:
msg = _("Unable to list users.")
LOG.error(msg)
users = []
paginator = Paginator(users, PAGE_SIZE)
page = request.GET.get('page')
try:
users = paginator.page(page)
except PageNotAnInteger:
users = paginator.page(1)
except EmptyPage:
users = paginator.page(paginator.num_pages)
return render(request, template_name, {"users": users, "message": msg})
@require_superuser
def create(request):
template_name = "users/create_user.html"
msg = ""
user_form = UserCreateForm()
if request.method == "POST":
user_form = UserCreateForm(request.POST)
if user_form.is_valid():
try:
new_user = User.objects.create_user(
request.POST['username'],
request.POST['email'],
request.POST['password'])
new_user.save()
msg = _('Success create user "%s"') % \
user_form.cleaned_data['username'].encode("utf-8")
LOG.info(msg)
except IntegrityError:
msg = _("User already exist, please try another username.")
LOG.error(msg)
except:
msg = _('Unable to create user "%s"') % \
user_form.cleaned_data['username'].encode("utf-8")
LOG.error(msg)
return render(request, template_name, {"user_form": user_form,
"message": msg})
@require_superuser
def delete(request, user_id):
try:
User.objects.get(id=user_id).delete()
except Exception:
msg = _("Unable to delete user(%s)") % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def deactivate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = False
user.save()
except:
msg = _("Unable to deactivate user(%s)") % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def activate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = True
user.save()
except:
msg = _("Unable to activate user(%s)") % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser_or_self
def edit(request, user_id):
template_name = "users/update_user.html"
msg = ""
user = User.objects.get(id=user_id)
user_form = UserEditForm(initial={"username": user.username,
"email": user.email})
if request.method == "POST":
user_form = UserEditForm(request.POST)
if user_form.is_valid():
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
if username:
user.username = username
if email:
user.email = email
if password:
user.set_password(password)
user.save()
msg = _('Success updated user "%s"') % username.encode("utf-8")
LOG.info(msg)
return render(request, template_name, {"user_id": user_id,
"user_form": user_form,
"message": msg})
def login(request):
template_name = 'auth/login.html'
msg = ""
if request.user.is_authenticated():
return redirect(reverse("keys:index"))
form = LoginForm
if request.method == "POST":
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data["password"]
user = authenticate(username=username, password=password)
if user:
if user.is_active:
django_login(request, user)
msg = _("%s logged in successfully.") % \
username.encode('utf-8')
LOG.info(msg)
return redirect(reverse('keys:index'))
msg = _("Invalid username or password.")
LOG.error(msg)
return render(request, template_name, {"user_form": form,
"message": msg})
def logout(request):
django_logout(request)
return redirect(reverse("index"))
|
flexible
|
{
"blob_id": "b739a5d359b4d1c0323c7cd8234e4fe5eb9f3fcb",
"index": 6286,
"step-1": "<mask token>\n\n\n@require_superuser\ndef index(request):\n template_name = 'users/index.html'\n msg = ''\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _('Unable to list users.')\n LOG.error(msg)\n users = []\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n return render(request, template_name, {'users': users, 'message': msg})\n\n\n@require_superuser\ndef create(request):\n template_name = 'users/create_user.html'\n msg = ''\n user_form = UserCreateForm()\n if request.method == 'POST':\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(request.POST['username'\n ], request.POST['email'], request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.info(msg)\n except IntegrityError:\n msg = _('User already exist, please try another username.')\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.error(msg)\n return render(request, template_name, {'user_form': user_form,\n 'message': msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _('Unable to delete user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _('Unable to deactivate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _('Unable to activate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = 'users/update_user.html'\n msg = ''\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={'username': user.username, 'email':\n user.email})\n if request.method == 'POST':\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode('utf-8')\n LOG.info(msg)\n return render(request, template_name, {'user_id': user_id, 'user_form':\n user_form, 'message': msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = ''\n if request.user.is_authenticated():\n return redirect(reverse('keys:index'))\n form = LoginForm\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _('%s logged in successfully.') % username.encode(\n 'utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _('Invalid username or password.')\n LOG.error(msg)\n return render(request, template_name, {'user_form': form, 'message': msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse('index'))\n",
"step-2": "<mask token>\n\n\ndef require_superuser_or_self(func):\n\n def check(request, user_id):\n if request.user.is_superuser or user_id.encode('utf-8') == str(request\n .user.id):\n return func(request, user_id)\n return render(request, '403.html')\n return check\n\n\n@require_superuser\ndef index(request):\n template_name = 'users/index.html'\n msg = ''\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _('Unable to list users.')\n LOG.error(msg)\n users = []\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n return render(request, template_name, {'users': users, 'message': msg})\n\n\n@require_superuser\ndef create(request):\n template_name = 'users/create_user.html'\n msg = ''\n user_form = UserCreateForm()\n if request.method == 'POST':\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(request.POST['username'\n ], request.POST['email'], request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.info(msg)\n except IntegrityError:\n msg = _('User already exist, please try another username.')\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.error(msg)\n return render(request, template_name, {'user_form': user_form,\n 'message': msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _('Unable to delete user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _('Unable to deactivate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _('Unable to activate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = 'users/update_user.html'\n msg = ''\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={'username': user.username, 'email':\n user.email})\n if request.method == 'POST':\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode('utf-8')\n LOG.info(msg)\n return render(request, template_name, {'user_id': user_id, 'user_form':\n user_form, 'message': msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = ''\n if request.user.is_authenticated():\n return redirect(reverse('keys:index'))\n form = LoginForm\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _('%s logged in successfully.') % username.encode(\n 'utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _('Invalid username or password.')\n LOG.error(msg)\n return render(request, template_name, {'user_form': form, 'message': msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse('index'))\n",
"step-3": "<mask token>\nLOG = logging.getLogger(__name__)\n\n\ndef require_superuser_or_self(func):\n\n def check(request, user_id):\n if request.user.is_superuser or user_id.encode('utf-8') == str(request\n .user.id):\n return func(request, user_id)\n return render(request, '403.html')\n return check\n\n\n@require_superuser\ndef index(request):\n template_name = 'users/index.html'\n msg = ''\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _('Unable to list users.')\n LOG.error(msg)\n users = []\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n return render(request, template_name, {'users': users, 'message': msg})\n\n\n@require_superuser\ndef create(request):\n template_name = 'users/create_user.html'\n msg = ''\n user_form = UserCreateForm()\n if request.method == 'POST':\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(request.POST['username'\n ], request.POST['email'], request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.info(msg)\n except IntegrityError:\n msg = _('User already exist, please try another username.')\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.error(msg)\n return render(request, template_name, {'user_form': user_form,\n 'message': msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _('Unable to delete user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _('Unable to deactivate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _('Unable to activate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = 'users/update_user.html'\n msg = ''\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={'username': user.username, 'email':\n user.email})\n if request.method == 'POST':\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode('utf-8')\n LOG.info(msg)\n return render(request, template_name, {'user_id': user_id, 'user_form':\n user_form, 'message': msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = ''\n if request.user.is_authenticated():\n return redirect(reverse('keys:index'))\n form = LoginForm\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _('%s logged in successfully.') % username.encode(\n 'utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _('Invalid username or password.')\n LOG.error(msg)\n return render(request, template_name, {'user_form': form, 'message': msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse('index'))\n",
"step-4": "import logging\nfrom django.contrib.auth import authenticate, login as django_login, logout as django_logout\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.urlresolvers import reverse\nfrom django.db.utils import IntegrityError\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import gettext_lazy as _\nfrom keymanager.settings import PAGE_SIZE\nfrom .forms import LoginForm\nfrom .forms import UserCreateForm, UserEditForm\nfrom utils.filters import require_superuser\nLOG = logging.getLogger(__name__)\n\n\ndef require_superuser_or_self(func):\n\n def check(request, user_id):\n if request.user.is_superuser or user_id.encode('utf-8') == str(request\n .user.id):\n return func(request, user_id)\n return render(request, '403.html')\n return check\n\n\n@require_superuser\ndef index(request):\n template_name = 'users/index.html'\n msg = ''\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _('Unable to list users.')\n LOG.error(msg)\n users = []\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n return render(request, template_name, {'users': users, 'message': msg})\n\n\n@require_superuser\ndef create(request):\n template_name = 'users/create_user.html'\n msg = ''\n user_form = UserCreateForm()\n if request.method == 'POST':\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(request.POST['username'\n ], request.POST['email'], request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.info(msg)\n except IntegrityError:\n msg = _('User already exist, please try another username.')\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.error(msg)\n return render(request, template_name, {'user_form': user_form,\n 'message': msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _('Unable to delete user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _('Unable to deactivate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _('Unable to activate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = 'users/update_user.html'\n msg = ''\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={'username': user.username, 'email':\n user.email})\n if request.method == 'POST':\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode('utf-8')\n LOG.info(msg)\n return render(request, template_name, {'user_id': user_id, 'user_form':\n user_form, 'message': msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = ''\n if request.user.is_authenticated():\n return redirect(reverse('keys:index'))\n form = LoginForm\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _('%s logged in successfully.') % username.encode(\n 'utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _('Invalid username or password.')\n LOG.error(msg)\n return render(request, template_name, {'user_form': form, 'message': msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse('index'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom django.contrib.auth import authenticate, login as django_login, logout as django_logout\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.urlresolvers import reverse\nfrom django.db.utils import IntegrityError\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import gettext_lazy as _\n\nfrom keymanager.settings import PAGE_SIZE\n\nfrom .forms import LoginForm\nfrom .forms import UserCreateForm, UserEditForm\nfrom utils.filters import require_superuser\n\n\nLOG = logging.getLogger(__name__)\n\n\ndef require_superuser_or_self(func):\n def check(request, user_id):\n if request.user.is_superuser or \\\n user_id.encode(\"utf-8\") == str(request.user.id):\n return func(request, user_id)\n\n return render(request, \"403.html\")\n return check\n\n\n@require_superuser\ndef index(request):\n template_name = \"users/index.html\"\n msg = \"\"\n\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _(\"Unable to list users.\")\n LOG.error(msg)\n users = []\n\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n\n return render(request, template_name, {\"users\": users, \"message\": msg})\n\n\n@require_superuser\ndef create(request):\n template_name = \"users/create_user.html\"\n msg = \"\"\n user_form = UserCreateForm()\n\n if request.method == \"POST\":\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(\n request.POST['username'],\n request.POST['email'],\n request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % \\\n user_form.cleaned_data['username'].encode(\"utf-8\")\n LOG.info(msg)\n except IntegrityError:\n msg = _(\"User already exist, please try another username.\")\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % \\\n user_form.cleaned_data['username'].encode(\"utf-8\")\n LOG.error(msg)\n\n return render(request, template_name, {\"user_form\": user_form,\n \"message\": msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _(\"Unable to delete user(%s)\") % user_id\n LOG.error(msg)\n\n if user_id == request.user.id:\n logout(request)\n\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _(\"Unable to deactivate user(%s)\") % user_id\n LOG.error(msg)\n\n if user_id == request.user.id:\n logout(request)\n\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _(\"Unable to activate user(%s)\") % user_id\n LOG.error(msg)\n\n if user_id == request.user.id:\n logout(request)\n\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = \"users/update_user.html\"\n msg = \"\"\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={\"username\": user.username,\n \"email\": user.email})\n\n if request.method == \"POST\":\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode(\"utf-8\")\n LOG.info(msg)\n return render(request, template_name, {\"user_id\": user_id,\n \"user_form\": user_form,\n \"message\": msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = \"\"\n if request.user.is_authenticated():\n return redirect(reverse(\"keys:index\"))\n\n form = LoginForm\n\n if request.method == \"POST\":\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data[\"password\"]\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _(\"%s logged in successfully.\") % \\\n username.encode('utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _(\"Invalid username or password.\")\n LOG.error(msg)\n\n return render(request, template_name, {\"user_form\": form,\n \"message\": msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse(\"index\"))",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
<|reserved_special_token_0|>
def appendix_and_index_pages():
"""
Prompt user to input appendix pages (if one exists) and index pages
:return: start and end pages of the appendix and index
"""
def index_pages():
"""
Prompt user to input index pages
:return: start and end pages of index
"""
index_start = int(input('Enter the start page of your index: '))
index_end = int(input('Enter the end page of your index: '))
return index_start, index_end
is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')
if is_appendix == 'y':
appendix_start = int(input('Enter the start page of your appendix: '))
appendix_end = int(input('Enter the end page of your appendix: '))
index_start, index_end = index_pages()
else:
index_start, index_end = index_pages()
appendix_start = index_start
appendix_end = index_start - 1
return appendix_start, appendix_end, index_start, index_end
<|reserved_special_token_0|>
def main():
while True:
print('------')
filenames = get_filenames()
if filenames:
print('Unordered PDF files in the current directory: ')
for index, filename in enumerate(filenames):
print('{}: {}'.format(index + 1, filename))
chosen_index = input(
"""
Enter the number of the file you want to reorder (type q to quit): """
)
if chosen_index == 'q':
break
insert_page = int(input(
'Enter the page you want your appendix and index to come after: '
))
appendix_start, appendix_end, index_start, index_end = (
appendix_and_index_pages())
try:
filename = filenames[int(chosen_index) - 1]
reorder(filename, insert_page, appendix_start, appendix_end,
index_start, index_end)
print('\n{} reordered.'.format(filename))
except Exception as error:
print(error)
print('Restarting program\n')
continue
else:
print('No unordered PDF found in current directory')
is_continue = yes_or_no('\nDo you want to reorder another PDF (y/n)? ')
if is_continue == 'n':
break
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def appendix_and_index_pages():
"""
Prompt user to input appendix pages (if one exists) and index pages
:return: start and end pages of the appendix and index
"""
def index_pages():
"""
Prompt user to input index pages
:return: start and end pages of index
"""
index_start = int(input('Enter the start page of your index: '))
index_end = int(input('Enter the end page of your index: '))
return index_start, index_end
is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')
if is_appendix == 'y':
appendix_start = int(input('Enter the start page of your appendix: '))
appendix_end = int(input('Enter the end page of your appendix: '))
index_start, index_end = index_pages()
else:
index_start, index_end = index_pages()
appendix_start = index_start
appendix_end = index_start - 1
return appendix_start, appendix_end, index_start, index_end
def yes_or_no(prompt):
"""
Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input
:param prompt: str prompting user to input their response
:return: yes or no response once user has correctly input their response
"""
response = input(prompt)
while response not in ['y', 'n']:
print('Invalid input')
response = input(prompt)
return response
def write_pages(page_range, pdf_read_object, pdf_write_object):
"""
Read pages within certain page range from the PDF read object and write those pages to the PDF write object
:param page_range: iterable containing pages to be read and written
:param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from
:param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to
:return: None, write object is modified in place.
"""
for page_num in page_range:
page = pdf_read_object.getPage(page_num)
pdf_write_object.addPage(page)
def reorder(filename, insert_page, appendix_start, appendix_end,
index_start, index_end):
"""
Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name
:param filename: name of the PDF file to be reordered
:param insert_page: page in the original PDF after which the appendix and index are to be inserted
:param appendix_start: appendix start page in the original PDF
:param appendix_end: appendix end page in the original PDF
:param index_start: index start page in the original PDF
:param index_end: index end page in the original PDF
:return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF
"""
with filename.open('rb') as read_object, open(filename.stem +
'_reordered.pdf', 'wb') as write_object:
pdf_read_object = PyPDF2.PdfFileReader(read_object)
pdf_write_object = PyPDF2.PdfFileWriter()
pdf_length = pdf_read_object.numPages
if insert_page < 1 or insert_page >= appendix_start:
raise ValueError('Invalid insert page')
if appendix_start != index_start and appendix_start > appendix_end:
raise ValueError('Invalid appendix start page')
if appendix_start != index_start and appendix_end >= index_start:
raise ValueError('Invalid appendix end page')
if index_start > index_end:
raise ValueError('Invalid index start page')
if index_end > pdf_length:
raise ValueError('Invalid index end page')
pre_insert = range(insert_page)
post_insert = range(insert_page, appendix_start - 1)
appendix = range(appendix_start - 1, appendix_end)
post_appendix = range(appendix_end, index_start - 1)
index = range(index_start - 1, index_end)
post_index = range(index_end, pdf_length)
for page_range in [pre_insert, index, appendix, post_insert,
post_appendix, post_index]:
write_pages(page_range, pdf_read_object, pdf_write_object)
pdf_write_object.write(write_object)
def main():
while True:
print('------')
filenames = get_filenames()
if filenames:
print('Unordered PDF files in the current directory: ')
for index, filename in enumerate(filenames):
print('{}: {}'.format(index + 1, filename))
chosen_index = input(
"""
Enter the number of the file you want to reorder (type q to quit): """
)
if chosen_index == 'q':
break
insert_page = int(input(
'Enter the page you want your appendix and index to come after: '
))
appendix_start, appendix_end, index_start, index_end = (
appendix_and_index_pages())
try:
filename = filenames[int(chosen_index) - 1]
reorder(filename, insert_page, appendix_start, appendix_end,
index_start, index_end)
print('\n{} reordered.'.format(filename))
except Exception as error:
print(error)
print('Restarting program\n')
continue
else:
print('No unordered PDF found in current directory')
is_continue = yes_or_no('\nDo you want to reorder another PDF (y/n)? ')
if is_continue == 'n':
break
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_filenames():
"""
Get PDF files not yet reordered in the current directory
:return: list of PDF file names
"""
filenames = []
for filename in Path('.').glob('*.pdf'):
if 'reordered' not in filename.stem:
filenames.append(filename)
return filenames
def appendix_and_index_pages():
"""
Prompt user to input appendix pages (if one exists) and index pages
:return: start and end pages of the appendix and index
"""
def index_pages():
"""
Prompt user to input index pages
:return: start and end pages of index
"""
index_start = int(input('Enter the start page of your index: '))
index_end = int(input('Enter the end page of your index: '))
return index_start, index_end
is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')
if is_appendix == 'y':
appendix_start = int(input('Enter the start page of your appendix: '))
appendix_end = int(input('Enter the end page of your appendix: '))
index_start, index_end = index_pages()
else:
index_start, index_end = index_pages()
appendix_start = index_start
appendix_end = index_start - 1
return appendix_start, appendix_end, index_start, index_end
def yes_or_no(prompt):
"""
Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input
:param prompt: str prompting user to input their response
:return: yes or no response once user has correctly input their response
"""
response = input(prompt)
while response not in ['y', 'n']:
print('Invalid input')
response = input(prompt)
return response
def write_pages(page_range, pdf_read_object, pdf_write_object):
"""
Read pages within certain page range from the PDF read object and write those pages to the PDF write object
:param page_range: iterable containing pages to be read and written
:param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from
:param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to
:return: None, write object is modified in place.
"""
for page_num in page_range:
page = pdf_read_object.getPage(page_num)
pdf_write_object.addPage(page)
def reorder(filename, insert_page, appendix_start, appendix_end,
index_start, index_end):
"""
Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name
:param filename: name of the PDF file to be reordered
:param insert_page: page in the original PDF after which the appendix and index are to be inserted
:param appendix_start: appendix start page in the original PDF
:param appendix_end: appendix end page in the original PDF
:param index_start: index start page in the original PDF
:param index_end: index end page in the original PDF
:return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF
"""
with filename.open('rb') as read_object, open(filename.stem +
'_reordered.pdf', 'wb') as write_object:
pdf_read_object = PyPDF2.PdfFileReader(read_object)
pdf_write_object = PyPDF2.PdfFileWriter()
pdf_length = pdf_read_object.numPages
if insert_page < 1 or insert_page >= appendix_start:
raise ValueError('Invalid insert page')
if appendix_start != index_start and appendix_start > appendix_end:
raise ValueError('Invalid appendix start page')
if appendix_start != index_start and appendix_end >= index_start:
raise ValueError('Invalid appendix end page')
if index_start > index_end:
raise ValueError('Invalid index start page')
if index_end > pdf_length:
raise ValueError('Invalid index end page')
pre_insert = range(insert_page)
post_insert = range(insert_page, appendix_start - 1)
appendix = range(appendix_start - 1, appendix_end)
post_appendix = range(appendix_end, index_start - 1)
index = range(index_start - 1, index_end)
post_index = range(index_end, pdf_length)
for page_range in [pre_insert, index, appendix, post_insert,
post_appendix, post_index]:
write_pages(page_range, pdf_read_object, pdf_write_object)
pdf_write_object.write(write_object)
def main():
while True:
print('------')
filenames = get_filenames()
if filenames:
print('Unordered PDF files in the current directory: ')
for index, filename in enumerate(filenames):
print('{}: {}'.format(index + 1, filename))
chosen_index = input(
"""
Enter the number of the file you want to reorder (type q to quit): """
)
if chosen_index == 'q':
break
insert_page = int(input(
'Enter the page you want your appendix and index to come after: '
))
appendix_start, appendix_end, index_start, index_end = (
appendix_and_index_pages())
try:
filename = filenames[int(chosen_index) - 1]
reorder(filename, insert_page, appendix_start, appendix_end,
index_start, index_end)
print('\n{} reordered.'.format(filename))
except Exception as error:
print(error)
print('Restarting program\n')
continue
else:
print('No unordered PDF found in current directory')
is_continue = yes_or_no('\nDo you want to reorder another PDF (y/n)? ')
if is_continue == 'n':
break
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import PyPDF2
from pathlib import Path
def get_filenames():
"""
Get PDF files not yet reordered in the current directory
:return: list of PDF file names
"""
filenames = []
for filename in Path('.').glob('*.pdf'):
if 'reordered' not in filename.stem:
filenames.append(filename)
return filenames
def appendix_and_index_pages():
"""
Prompt user to input appendix pages (if one exists) and index pages
:return: start and end pages of the appendix and index
"""
def index_pages():
"""
Prompt user to input index pages
:return: start and end pages of index
"""
index_start = int(input('Enter the start page of your index: '))
index_end = int(input('Enter the end page of your index: '))
return index_start, index_end
is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')
if is_appendix == 'y':
appendix_start = int(input('Enter the start page of your appendix: '))
appendix_end = int(input('Enter the end page of your appendix: '))
index_start, index_end = index_pages()
else:
index_start, index_end = index_pages()
appendix_start = index_start
appendix_end = index_start - 1
return appendix_start, appendix_end, index_start, index_end
def yes_or_no(prompt):
"""
Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input
:param prompt: str prompting user to input their response
:return: yes or no response once user has correctly input their response
"""
response = input(prompt)
while response not in ['y', 'n']:
print('Invalid input')
response = input(prompt)
return response
def write_pages(page_range, pdf_read_object, pdf_write_object):
"""
Read pages within certain page range from the PDF read object and write those pages to the PDF write object
:param page_range: iterable containing pages to be read and written
:param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from
:param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to
:return: None, write object is modified in place.
"""
for page_num in page_range:
page = pdf_read_object.getPage(page_num)
pdf_write_object.addPage(page)
def reorder(filename, insert_page, appendix_start, appendix_end,
index_start, index_end):
"""
Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name
:param filename: name of the PDF file to be reordered
:param insert_page: page in the original PDF after which the appendix and index are to be inserted
:param appendix_start: appendix start page in the original PDF
:param appendix_end: appendix end page in the original PDF
:param index_start: index start page in the original PDF
:param index_end: index end page in the original PDF
:return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF
"""
with filename.open('rb') as read_object, open(filename.stem +
'_reordered.pdf', 'wb') as write_object:
pdf_read_object = PyPDF2.PdfFileReader(read_object)
pdf_write_object = PyPDF2.PdfFileWriter()
pdf_length = pdf_read_object.numPages
if insert_page < 1 or insert_page >= appendix_start:
raise ValueError('Invalid insert page')
if appendix_start != index_start and appendix_start > appendix_end:
raise ValueError('Invalid appendix start page')
if appendix_start != index_start and appendix_end >= index_start:
raise ValueError('Invalid appendix end page')
if index_start > index_end:
raise ValueError('Invalid index start page')
if index_end > pdf_length:
raise ValueError('Invalid index end page')
pre_insert = range(insert_page)
post_insert = range(insert_page, appendix_start - 1)
appendix = range(appendix_start - 1, appendix_end)
post_appendix = range(appendix_end, index_start - 1)
index = range(index_start - 1, index_end)
post_index = range(index_end, pdf_length)
for page_range in [pre_insert, index, appendix, post_insert,
post_appendix, post_index]:
write_pages(page_range, pdf_read_object, pdf_write_object)
pdf_write_object.write(write_object)
def main():
while True:
print('------')
filenames = get_filenames()
if filenames:
print('Unordered PDF files in the current directory: ')
for index, filename in enumerate(filenames):
print('{}: {}'.format(index + 1, filename))
chosen_index = input(
"""
Enter the number of the file you want to reorder (type q to quit): """
)
if chosen_index == 'q':
break
insert_page = int(input(
'Enter the page you want your appendix and index to come after: '
))
appendix_start, appendix_end, index_start, index_end = (
appendix_and_index_pages())
try:
filename = filenames[int(chosen_index) - 1]
reorder(filename, insert_page, appendix_start, appendix_end,
index_start, index_end)
print('\n{} reordered.'.format(filename))
except Exception as error:
print(error)
print('Restarting program\n')
continue
else:
print('No unordered PDF found in current directory')
is_continue = yes_or_no('\nDo you want to reorder another PDF (y/n)? ')
if is_continue == 'n':
break
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import PyPDF2
from pathlib import Path
def get_filenames():
"""
Get PDF files not yet reordered in the current directory
:return: list of PDF file names
"""
filenames = []
for filename in Path('.').glob('*.pdf'):
if 'reordered' not in filename.stem:
filenames.append(filename)
return filenames
def appendix_and_index_pages():
"""
Prompt user to input appendix pages (if one exists) and index pages
:return: start and end pages of the appendix and index
"""
def index_pages():
"""
Prompt user to input index pages
:return: start and end pages of index
"""
index_start = int(input('Enter the start page of your index: '))
index_end = int(input('Enter the end page of your index: '))
return index_start, index_end
is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')
if is_appendix == 'y':
appendix_start = int(input('Enter the start page of your appendix: '))
appendix_end = int(input('Enter the end page of your appendix: '))
index_start, index_end = index_pages()
else:
# When there is no appendix, set appendix start and end pages such as the page ranges of the
# appendix and the post-appendix (pre-index) will be blank, and the page range of the post-insert
# will be from the insert point to the start of the index. See def reorder for more details.
index_start, index_end = index_pages()
appendix_start = index_start
appendix_end = index_start - 1
return appendix_start, appendix_end, index_start, index_end
def yes_or_no(prompt):
"""
Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input
:param prompt: str prompting user to input their response
:return: yes or no response once user has correctly input their response
"""
response = input(prompt)
while response not in ['y', 'n']:
print('Invalid input')
response = input(prompt)
return response
def write_pages(page_range, pdf_read_object, pdf_write_object):
"""
Read pages within certain page range from the PDF read object and write those pages to the PDF write object
:param page_range: iterable containing pages to be read and written
:param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from
:param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to
:return: None, write object is modified in place.
"""
for page_num in page_range:
page = pdf_read_object.getPage(page_num)
pdf_write_object.addPage(page)
def reorder(filename, insert_page, appendix_start, appendix_end, index_start, index_end):
"""
Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name
:param filename: name of the PDF file to be reordered
:param insert_page: page in the original PDF after which the appendix and index are to be inserted
:param appendix_start: appendix start page in the original PDF
:param appendix_end: appendix end page in the original PDF
:param index_start: index start page in the original PDF
:param index_end: index end page in the original PDF
:return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF
"""
with filename.open('rb') as read_object, open(filename.stem + '_reordered.pdf', 'wb') as write_object:
pdf_read_object = PyPDF2.PdfFileReader(read_object)
pdf_write_object = PyPDF2.PdfFileWriter()
pdf_length = pdf_read_object.numPages
# Check for invalid page numbers
if insert_page < 1 or insert_page >= appendix_start:
raise ValueError('Invalid insert page')
if appendix_start != index_start and appendix_start > appendix_end:
raise ValueError('Invalid appendix start page')
if appendix_start != index_start and appendix_end >= index_start:
raise ValueError('Invalid appendix end page')
if index_start > index_end:
raise ValueError('Invalid index start page')
if index_end > pdf_length:
raise ValueError('Invalid index end page')
# Prepare page ranges to be ordered
pre_insert = range(insert_page)
post_insert = range(insert_page, appendix_start - 1)
appendix = range(appendix_start - 1, appendix_end)
post_appendix = range(appendix_end, index_start - 1)
index = range(index_start - 1, index_end)
post_index = range(index_end, pdf_length)
# Copy pages from original PDF object to new PDF object with the new ordered page ranges
for page_range in [pre_insert, index, appendix, post_insert, post_appendix, post_index]:
write_pages(page_range, pdf_read_object, pdf_write_object)
# Write ordered PDF object to PDF file
pdf_write_object.write(write_object)
def main():
while True:
print('------')
filenames = get_filenames()
if filenames:
print('Unordered PDF files in the current directory: ')
for index, filename in enumerate(filenames):
print('{}: {}'.format(index + 1, filename))
chosen_index = input('\nEnter the number of the file you want to reorder (type q to quit): ')
if chosen_index == 'q':
break
insert_page = int(input('Enter the page you want your appendix and index to come after: '))
appendix_start, appendix_end, index_start, index_end = appendix_and_index_pages()
try:
filename = filenames[int(chosen_index) - 1]
reorder(filename, insert_page, appendix_start, appendix_end, index_start, index_end)
print('\n{} reordered.'.format(filename))
except Exception as error:
print(error)
print('Restarting program\n')
continue
else:
print('No unordered PDF found in current directory')
# Ask user to reorder additional PDFs
is_continue = yes_or_no('\nDo you want to reorder another PDF (y/n)? ')
if is_continue == 'n':
break
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "2b3a42fed98b43cdd78edd751b306ba25328061a",
"index": 8652,
"step-1": "<mask token>\n\n\ndef appendix_and_index_pages():\n \"\"\"\n Prompt user to input appendix pages (if one exists) and index pages\n :return: start and end pages of the appendix and index\n \"\"\"\n\n def index_pages():\n \"\"\"\n Prompt user to input index pages\n :return: start and end pages of index\n \"\"\"\n index_start = int(input('Enter the start page of your index: '))\n index_end = int(input('Enter the end page of your index: '))\n return index_start, index_end\n is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')\n if is_appendix == 'y':\n appendix_start = int(input('Enter the start page of your appendix: '))\n appendix_end = int(input('Enter the end page of your appendix: '))\n index_start, index_end = index_pages()\n else:\n index_start, index_end = index_pages()\n appendix_start = index_start\n appendix_end = index_start - 1\n return appendix_start, appendix_end, index_start, index_end\n\n\n<mask token>\n\n\ndef main():\n while True:\n print('------')\n filenames = get_filenames()\n if filenames:\n print('Unordered PDF files in the current directory: ')\n for index, filename in enumerate(filenames):\n print('{}: {}'.format(index + 1, filename))\n chosen_index = input(\n \"\"\"\nEnter the number of the file you want to reorder (type q to quit): \"\"\"\n )\n if chosen_index == 'q':\n break\n insert_page = int(input(\n 'Enter the page you want your appendix and index to come after: '\n ))\n appendix_start, appendix_end, index_start, index_end = (\n appendix_and_index_pages())\n try:\n filename = filenames[int(chosen_index) - 1]\n reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end)\n print('\\n{} reordered.'.format(filename))\n except Exception as error:\n print(error)\n print('Restarting program\\n')\n continue\n else:\n print('No unordered PDF found in current directory')\n is_continue = yes_or_no('\\nDo you want to reorder another PDF (y/n)? ')\n if is_continue == 'n':\n break\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef appendix_and_index_pages():\n \"\"\"\n Prompt user to input appendix pages (if one exists) and index pages\n :return: start and end pages of the appendix and index\n \"\"\"\n\n def index_pages():\n \"\"\"\n Prompt user to input index pages\n :return: start and end pages of index\n \"\"\"\n index_start = int(input('Enter the start page of your index: '))\n index_end = int(input('Enter the end page of your index: '))\n return index_start, index_end\n is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')\n if is_appendix == 'y':\n appendix_start = int(input('Enter the start page of your appendix: '))\n appendix_end = int(input('Enter the end page of your appendix: '))\n index_start, index_end = index_pages()\n else:\n index_start, index_end = index_pages()\n appendix_start = index_start\n appendix_end = index_start - 1\n return appendix_start, appendix_end, index_start, index_end\n\n\ndef yes_or_no(prompt):\n \"\"\"\n Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input\n :param prompt: str prompting user to input their response\n :return: yes or no response once user has correctly input their response\n \"\"\"\n response = input(prompt)\n while response not in ['y', 'n']:\n print('Invalid input')\n response = input(prompt)\n return response\n\n\ndef write_pages(page_range, pdf_read_object, pdf_write_object):\n \"\"\"\n Read pages within certain page range from the PDF read object and write those pages to the PDF write object\n :param page_range: iterable containing pages to be read and written\n :param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from\n :param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to\n :return: None, write object is modified in place.\n \"\"\"\n for page_num in page_range:\n page = pdf_read_object.getPage(page_num)\n pdf_write_object.addPage(page)\n\n\ndef reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end):\n \"\"\"\n Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name\n :param filename: name of the PDF file to be reordered\n :param insert_page: page in the original PDF after which the appendix and index are to be inserted\n :param appendix_start: appendix start page in the original PDF\n :param appendix_end: appendix end page in the original PDF\n :param index_start: index start page in the original PDF\n :param index_end: index end page in the original PDF\n :return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF\n \"\"\"\n with filename.open('rb') as read_object, open(filename.stem +\n '_reordered.pdf', 'wb') as write_object:\n pdf_read_object = PyPDF2.PdfFileReader(read_object)\n pdf_write_object = PyPDF2.PdfFileWriter()\n pdf_length = pdf_read_object.numPages\n if insert_page < 1 or insert_page >= appendix_start:\n raise ValueError('Invalid insert page')\n if appendix_start != index_start and appendix_start > appendix_end:\n raise ValueError('Invalid appendix start page')\n if appendix_start != index_start and appendix_end >= index_start:\n raise ValueError('Invalid appendix end page')\n if index_start > index_end:\n raise ValueError('Invalid index start page')\n if index_end > pdf_length:\n raise ValueError('Invalid index end page')\n pre_insert = range(insert_page)\n post_insert = range(insert_page, appendix_start - 1)\n appendix = range(appendix_start - 1, appendix_end)\n post_appendix = range(appendix_end, index_start - 1)\n index = range(index_start - 1, index_end)\n post_index = range(index_end, pdf_length)\n for page_range in [pre_insert, index, appendix, post_insert,\n post_appendix, post_index]:\n write_pages(page_range, pdf_read_object, pdf_write_object)\n pdf_write_object.write(write_object)\n\n\ndef main():\n while True:\n print('------')\n filenames = get_filenames()\n if filenames:\n print('Unordered PDF files in the current directory: ')\n for index, filename in enumerate(filenames):\n print('{}: {}'.format(index + 1, filename))\n chosen_index = input(\n \"\"\"\nEnter the number of the file you want to reorder (type q to quit): \"\"\"\n )\n if chosen_index == 'q':\n break\n insert_page = int(input(\n 'Enter the page you want your appendix and index to come after: '\n ))\n appendix_start, appendix_end, index_start, index_end = (\n appendix_and_index_pages())\n try:\n filename = filenames[int(chosen_index) - 1]\n reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end)\n print('\\n{} reordered.'.format(filename))\n except Exception as error:\n print(error)\n print('Restarting program\\n')\n continue\n else:\n print('No unordered PDF found in current directory')\n is_continue = yes_or_no('\\nDo you want to reorder another PDF (y/n)? ')\n if is_continue == 'n':\n break\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_filenames():\n \"\"\"\n Get PDF files not yet reordered in the current directory\n :return: list of PDF file names\n \"\"\"\n filenames = []\n for filename in Path('.').glob('*.pdf'):\n if 'reordered' not in filename.stem:\n filenames.append(filename)\n return filenames\n\n\ndef appendix_and_index_pages():\n \"\"\"\n Prompt user to input appendix pages (if one exists) and index pages\n :return: start and end pages of the appendix and index\n \"\"\"\n\n def index_pages():\n \"\"\"\n Prompt user to input index pages\n :return: start and end pages of index\n \"\"\"\n index_start = int(input('Enter the start page of your index: '))\n index_end = int(input('Enter the end page of your index: '))\n return index_start, index_end\n is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')\n if is_appendix == 'y':\n appendix_start = int(input('Enter the start page of your appendix: '))\n appendix_end = int(input('Enter the end page of your appendix: '))\n index_start, index_end = index_pages()\n else:\n index_start, index_end = index_pages()\n appendix_start = index_start\n appendix_end = index_start - 1\n return appendix_start, appendix_end, index_start, index_end\n\n\ndef yes_or_no(prompt):\n \"\"\"\n Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input\n :param prompt: str prompting user to input their response\n :return: yes or no response once user has correctly input their response\n \"\"\"\n response = input(prompt)\n while response not in ['y', 'n']:\n print('Invalid input')\n response = input(prompt)\n return response\n\n\ndef write_pages(page_range, pdf_read_object, pdf_write_object):\n \"\"\"\n Read pages within certain page range from the PDF read object and write those pages to the PDF write object\n :param page_range: iterable containing pages to be read and written\n :param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from\n :param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to\n :return: None, write object is modified in place.\n \"\"\"\n for page_num in page_range:\n page = pdf_read_object.getPage(page_num)\n pdf_write_object.addPage(page)\n\n\ndef reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end):\n \"\"\"\n Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name\n :param filename: name of the PDF file to be reordered\n :param insert_page: page in the original PDF after which the appendix and index are to be inserted\n :param appendix_start: appendix start page in the original PDF\n :param appendix_end: appendix end page in the original PDF\n :param index_start: index start page in the original PDF\n :param index_end: index end page in the original PDF\n :return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF\n \"\"\"\n with filename.open('rb') as read_object, open(filename.stem +\n '_reordered.pdf', 'wb') as write_object:\n pdf_read_object = PyPDF2.PdfFileReader(read_object)\n pdf_write_object = PyPDF2.PdfFileWriter()\n pdf_length = pdf_read_object.numPages\n if insert_page < 1 or insert_page >= appendix_start:\n raise ValueError('Invalid insert page')\n if appendix_start != index_start and appendix_start > appendix_end:\n raise ValueError('Invalid appendix start page')\n if appendix_start != index_start and appendix_end >= index_start:\n raise ValueError('Invalid appendix end page')\n if index_start > index_end:\n raise ValueError('Invalid index start page')\n if index_end > pdf_length:\n raise ValueError('Invalid index end page')\n pre_insert = range(insert_page)\n post_insert = range(insert_page, appendix_start - 1)\n appendix = range(appendix_start - 1, appendix_end)\n post_appendix = range(appendix_end, index_start - 1)\n index = range(index_start - 1, index_end)\n post_index = range(index_end, pdf_length)\n for page_range in [pre_insert, index, appendix, post_insert,\n post_appendix, post_index]:\n write_pages(page_range, pdf_read_object, pdf_write_object)\n pdf_write_object.write(write_object)\n\n\ndef main():\n while True:\n print('------')\n filenames = get_filenames()\n if filenames:\n print('Unordered PDF files in the current directory: ')\n for index, filename in enumerate(filenames):\n print('{}: {}'.format(index + 1, filename))\n chosen_index = input(\n \"\"\"\nEnter the number of the file you want to reorder (type q to quit): \"\"\"\n )\n if chosen_index == 'q':\n break\n insert_page = int(input(\n 'Enter the page you want your appendix and index to come after: '\n ))\n appendix_start, appendix_end, index_start, index_end = (\n appendix_and_index_pages())\n try:\n filename = filenames[int(chosen_index) - 1]\n reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end)\n print('\\n{} reordered.'.format(filename))\n except Exception as error:\n print(error)\n print('Restarting program\\n')\n continue\n else:\n print('No unordered PDF found in current directory')\n is_continue = yes_or_no('\\nDo you want to reorder another PDF (y/n)? ')\n if is_continue == 'n':\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import PyPDF2\nfrom pathlib import Path\n\n\ndef get_filenames():\n \"\"\"\n Get PDF files not yet reordered in the current directory\n :return: list of PDF file names\n \"\"\"\n filenames = []\n for filename in Path('.').glob('*.pdf'):\n if 'reordered' not in filename.stem:\n filenames.append(filename)\n return filenames\n\n\ndef appendix_and_index_pages():\n \"\"\"\n Prompt user to input appendix pages (if one exists) and index pages\n :return: start and end pages of the appendix and index\n \"\"\"\n\n def index_pages():\n \"\"\"\n Prompt user to input index pages\n :return: start and end pages of index\n \"\"\"\n index_start = int(input('Enter the start page of your index: '))\n index_end = int(input('Enter the end page of your index: '))\n return index_start, index_end\n is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')\n if is_appendix == 'y':\n appendix_start = int(input('Enter the start page of your appendix: '))\n appendix_end = int(input('Enter the end page of your appendix: '))\n index_start, index_end = index_pages()\n else:\n index_start, index_end = index_pages()\n appendix_start = index_start\n appendix_end = index_start - 1\n return appendix_start, appendix_end, index_start, index_end\n\n\ndef yes_or_no(prompt):\n \"\"\"\n Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input\n :param prompt: str prompting user to input their response\n :return: yes or no response once user has correctly input their response\n \"\"\"\n response = input(prompt)\n while response not in ['y', 'n']:\n print('Invalid input')\n response = input(prompt)\n return response\n\n\ndef write_pages(page_range, pdf_read_object, pdf_write_object):\n \"\"\"\n Read pages within certain page range from the PDF read object and write those pages to the PDF write object\n :param page_range: iterable containing pages to be read and written\n :param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from\n :param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to\n :return: None, write object is modified in place.\n \"\"\"\n for page_num in page_range:\n page = pdf_read_object.getPage(page_num)\n pdf_write_object.addPage(page)\n\n\ndef reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end):\n \"\"\"\n Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name\n :param filename: name of the PDF file to be reordered\n :param insert_page: page in the original PDF after which the appendix and index are to be inserted\n :param appendix_start: appendix start page in the original PDF\n :param appendix_end: appendix end page in the original PDF\n :param index_start: index start page in the original PDF\n :param index_end: index end page in the original PDF\n :return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF\n \"\"\"\n with filename.open('rb') as read_object, open(filename.stem +\n '_reordered.pdf', 'wb') as write_object:\n pdf_read_object = PyPDF2.PdfFileReader(read_object)\n pdf_write_object = PyPDF2.PdfFileWriter()\n pdf_length = pdf_read_object.numPages\n if insert_page < 1 or insert_page >= appendix_start:\n raise ValueError('Invalid insert page')\n if appendix_start != index_start and appendix_start > appendix_end:\n raise ValueError('Invalid appendix start page')\n if appendix_start != index_start and appendix_end >= index_start:\n raise ValueError('Invalid appendix end page')\n if index_start > index_end:\n raise ValueError('Invalid index start page')\n if index_end > pdf_length:\n raise ValueError('Invalid index end page')\n pre_insert = range(insert_page)\n post_insert = range(insert_page, appendix_start - 1)\n appendix = range(appendix_start - 1, appendix_end)\n post_appendix = range(appendix_end, index_start - 1)\n index = range(index_start - 1, index_end)\n post_index = range(index_end, pdf_length)\n for page_range in [pre_insert, index, appendix, post_insert,\n post_appendix, post_index]:\n write_pages(page_range, pdf_read_object, pdf_write_object)\n pdf_write_object.write(write_object)\n\n\ndef main():\n while True:\n print('------')\n filenames = get_filenames()\n if filenames:\n print('Unordered PDF files in the current directory: ')\n for index, filename in enumerate(filenames):\n print('{}: {}'.format(index + 1, filename))\n chosen_index = input(\n \"\"\"\nEnter the number of the file you want to reorder (type q to quit): \"\"\"\n )\n if chosen_index == 'q':\n break\n insert_page = int(input(\n 'Enter the page you want your appendix and index to come after: '\n ))\n appendix_start, appendix_end, index_start, index_end = (\n appendix_and_index_pages())\n try:\n filename = filenames[int(chosen_index) - 1]\n reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end)\n print('\\n{} reordered.'.format(filename))\n except Exception as error:\n print(error)\n print('Restarting program\\n')\n continue\n else:\n print('No unordered PDF found in current directory')\n is_continue = yes_or_no('\\nDo you want to reorder another PDF (y/n)? ')\n if is_continue == 'n':\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import PyPDF2\nfrom pathlib import Path\n\n\ndef get_filenames():\n \"\"\"\n Get PDF files not yet reordered in the current directory\n :return: list of PDF file names\n \"\"\"\n filenames = []\n for filename in Path('.').glob('*.pdf'):\n if 'reordered' not in filename.stem:\n filenames.append(filename)\n\n return filenames\n\n\ndef appendix_and_index_pages():\n \"\"\"\n Prompt user to input appendix pages (if one exists) and index pages\n :return: start and end pages of the appendix and index\n \"\"\"\n\n def index_pages():\n \"\"\"\n Prompt user to input index pages\n :return: start and end pages of index\n \"\"\"\n index_start = int(input('Enter the start page of your index: '))\n index_end = int(input('Enter the end page of your index: '))\n return index_start, index_end\n\n is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')\n\n if is_appendix == 'y':\n appendix_start = int(input('Enter the start page of your appendix: '))\n appendix_end = int(input('Enter the end page of your appendix: '))\n index_start, index_end = index_pages()\n else:\n # When there is no appendix, set appendix start and end pages such as the page ranges of the\n # appendix and the post-appendix (pre-index) will be blank, and the page range of the post-insert\n # will be from the insert point to the start of the index. See def reorder for more details.\n index_start, index_end = index_pages()\n appendix_start = index_start\n appendix_end = index_start - 1\n\n return appendix_start, appendix_end, index_start, index_end\n\n\ndef yes_or_no(prompt):\n \"\"\"\n Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input\n :param prompt: str prompting user to input their response\n :return: yes or no response once user has correctly input their response\n \"\"\"\n response = input(prompt)\n while response not in ['y', 'n']:\n print('Invalid input')\n response = input(prompt)\n\n return response\n\n\ndef write_pages(page_range, pdf_read_object, pdf_write_object):\n \"\"\"\n Read pages within certain page range from the PDF read object and write those pages to the PDF write object\n :param page_range: iterable containing pages to be read and written\n :param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from\n :param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to\n :return: None, write object is modified in place.\n \"\"\"\n for page_num in page_range:\n page = pdf_read_object.getPage(page_num)\n pdf_write_object.addPage(page)\n\n\ndef reorder(filename, insert_page, appendix_start, appendix_end, index_start, index_end):\n \"\"\"\n Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name\n :param filename: name of the PDF file to be reordered\n :param insert_page: page in the original PDF after which the appendix and index are to be inserted\n :param appendix_start: appendix start page in the original PDF\n :param appendix_end: appendix end page in the original PDF\n :param index_start: index start page in the original PDF\n :param index_end: index end page in the original PDF\n :return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF\n \"\"\"\n with filename.open('rb') as read_object, open(filename.stem + '_reordered.pdf', 'wb') as write_object:\n pdf_read_object = PyPDF2.PdfFileReader(read_object)\n pdf_write_object = PyPDF2.PdfFileWriter()\n pdf_length = pdf_read_object.numPages\n\n # Check for invalid page numbers\n if insert_page < 1 or insert_page >= appendix_start:\n raise ValueError('Invalid insert page')\n if appendix_start != index_start and appendix_start > appendix_end:\n raise ValueError('Invalid appendix start page')\n if appendix_start != index_start and appendix_end >= index_start:\n raise ValueError('Invalid appendix end page')\n if index_start > index_end:\n raise ValueError('Invalid index start page')\n if index_end > pdf_length:\n raise ValueError('Invalid index end page')\n\n # Prepare page ranges to be ordered\n pre_insert = range(insert_page)\n post_insert = range(insert_page, appendix_start - 1)\n appendix = range(appendix_start - 1, appendix_end)\n post_appendix = range(appendix_end, index_start - 1)\n index = range(index_start - 1, index_end)\n post_index = range(index_end, pdf_length)\n\n # Copy pages from original PDF object to new PDF object with the new ordered page ranges\n for page_range in [pre_insert, index, appendix, post_insert, post_appendix, post_index]:\n write_pages(page_range, pdf_read_object, pdf_write_object)\n\n # Write ordered PDF object to PDF file\n pdf_write_object.write(write_object)\n\n\ndef main():\n while True:\n print('------')\n filenames = get_filenames()\n if filenames:\n print('Unordered PDF files in the current directory: ')\n for index, filename in enumerate(filenames):\n print('{}: {}'.format(index + 1, filename))\n chosen_index = input('\\nEnter the number of the file you want to reorder (type q to quit): ')\n if chosen_index == 'q':\n break\n insert_page = int(input('Enter the page you want your appendix and index to come after: '))\n appendix_start, appendix_end, index_start, index_end = appendix_and_index_pages()\n\n try:\n filename = filenames[int(chosen_index) - 1]\n reorder(filename, insert_page, appendix_start, appendix_end, index_start, index_end)\n print('\\n{} reordered.'.format(filename))\n except Exception as error:\n print(error)\n print('Restarting program\\n')\n continue\n else:\n print('No unordered PDF found in current directory')\n\n # Ask user to reorder additional PDFs\n is_continue = yes_or_no('\\nDo you want to reorder another PDF (y/n)? ')\n if is_continue == 'n':\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
5,
7,
8,
9
]
}
|
[
2,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
eval(compile(base64.b64decode(code), '<string>', 'exec'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
code = (
b'CmltcG9ydCBweW1vbmdvCmltcG9ydCByYW5kb20KaW1wb3J0IHJlCmltcG9ydCBzdHJpbmcKaW1wb3J0IHN5cwppbXBvcnQgZ2V0b3B0CmltcG9ydCBwcHJpbnQKCiMgQ29weXJpZ2h0IDIwMTUKIyBNb25nb0RCLCBJbmMuCiMgQXV0aG9yOiBBbmRyZXcgRXJsaWNoc29uICAgYWplQDEwZ2VuLmNvbQojCiMgSWYgeW91IGFyZSBhIHN0dWRlbnQgYW5kIHJlYWRpbmcgdGhpcyBjb2RlLCB0dXJuIGJhY2sgbm93LCBiZWZvcmUKIyB0aGUgTW9uZ29EQiBnb2RzIHNtaXRlIHlvdS4KCmNvbm5lY3Rpb24gPSBOb25lCmRiID0gTm9uZQptb25nb3N0ciA9ICJtb25nb2RiOi8vbG9jYWxob3N0OjI3MDE3IgpkYl9uYW1lID0gImFkbWluIgpyc19uYW1lID0gIm0xMDEiCgojIHRoaXMgc2NyaXB0IHdpbGwgY2hlY2sgdGhhdCBhIHJlcGxpY2Egc2V0IHdpdGggdGhyZWUgbm9kZXMgaXMgcnVubmluZyBvbiBhIGhvc3QKCiMgY29tbWFuZCBsaW5lIGFyZyBwYXJzaW5nIHRvIG1ha2UgZm9sa3MgaGFwcHkgd2hvIHdhbnQgdG8gcnVuIGF0IG1vbmdvbGFicyBvciBtb25nb2hxCiMgdGhpcyBmdW5jdGlvbnMgdXNlcyBnbG9iYWwgdmFycyB0byBjb21tdW5pY2F0ZS4gZm9yZ2l2ZSBtZS4KZGVmIGFyZ19wYXJzaW5nKGFyZ3YpOgoKICAgIGdsb2JhbCB3ZWJob3N0CiAgICBnbG9iYWwgbW9uZ29zdHIKICAgIGdsb2JhbCBkYl9uYW1lCgogICAgdHJ5OgogICAgICAgIG9wdHMsIGFyZ3MgPSBnZXRvcHQuZ2V0b3B0KGFyZ3YsICItcDotbTotZDoiKQogICAgZXhjZXB0IGdldG9wdC5HZXRvcHRFcnJvcjoKICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIikKICAgICAgICBwcmludCgiXHRtb25nb0Nvbm5lY3Rpb25TdHJpbmcgZGVmYXVsdCB0byB7MH0iLmZvcm1hdChtb25nb3N0cikpCiAgICAgICAgcHJpbnQoIlx0ZGF0YWJhc2VOYW1lIGRlZmF1bHRzIHRvIHswfSIuZm9ybWF0KGRiX25hbWUpKQogICAgICAgIHN5cy5leGl0KDIpCiAgICBmb3Igb3B0LCBhcmcgaW4gb3B0czoKICAgICAgICBpZiAob3B0ID09ICctaCcpOgogICAgICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIC1kIGRhdGFiYXNlTmFtZSIpCiAgICAgICAgICAgIHN5cy5leGl0KDIpCiAgICAgICAgZWxpZiBvcHQgaW4gKCItbSIpOgogICAgICAgICAgICBtb25nb3N0ciA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGNvbm5lY3Rpb24gc3RyaW5nIHRvIGJlICIsIG1vbmdvc3RyKQogICAgICAgIGVsaWYgb3B0IGluICgiLWQiKToKICAgICAgICAgICAgZGJfbmFtZSA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGRhdGFiYXNlIHRvIGJlICIsIGRiX25hbWUpCgojIGdldHMgdGhlIHJlcGxpY2Egc2V0IHN0YXR1cwpkZWYgZ2V0X3JzX3N0YXR1cygpOgogICAgZGIgPSBjb25uZWN0aW9uLmFkbWluCiAgICBycyA9IGRiLmNvbW1hbmQoInJlcGxTZXRHZXRTdGF0dXMiKQogICAgcmV0dXJuIHJzCgojIGdldHMgdGhlIHJlcGxpY2Egc3RhdGUgY29uZmlnCmRlZiBnZXRfcnNfY29uZmlndXJhdGlvbigpOgogICAgZGIgPSBjb25uZWN0aW9uLmxvY2FsCiAgICBjb2xsID0gZGIuc3lzdGVtLnJlcGxzZXQKICAgIHJldHVybiBjb2xsLmZpbmRfb25lKCkKCmRlZiByZXBsX3NldF9ydW5uaW5nKG51bV9ub2Rlcyk6CgogICAgdHJ5OgogICAgICAgIHJzID0gZ2V0X3JzX3N0YXR1cygpCiAgICAgICAgY29uZiA9IGdldF9yc19jb25maWd1cmF0aW9uKCkKICAgICAgICBob3N0cyAgPSBjb25uZWN0aW9uLmhvc3RzCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IHF1ZXJ5IE1vbmdvREIuLmlzIGl0IHJ1bm5pbmc/IikKICAgICAgICByYWlzZQogICAgICAgIHJldHVybiBGYWxzZQoKICAgIGlmIChyc1snb2snXSAhPSAxKToKICAgICAgICBwcmludCgiU29ycnksIG9rIGlzIG5vdCAxIGZvciBycy5zdGF0dXMoKSIpCiAgICAgICAgcHJpbnQoIkhlcmUgaXMgd2hhdCBJIGdldDoiKQogICAgICAgIHBwID0gcHByaW50LlByZXR0eVByaW50ZXIoZGVwdGg9NikKICAgICAgICBwcC5wcHJpbnQocnMpCiAgICAgICAgcmV0dXJuIEZhbHNlCgogICAgaWYgKGxlbihyc1snbWVtYmVycyddKSAhPSBudW1fbm9kZXMpOgogICAgICAgIHByaW50KCJTb3JyeSwgdGhlcmUgbmVlZCB0byBiZSB0aHJlZSBtZW1iZXJzIG9mIHRoZSByZXBsaWNhIHNldC4iKQogICAgICAgIHByaW50KCJoZXJlIGlzIHRoZSBtZW1iZXJzIGFycmF5IEkgc2VlIikKCiAgICAgICAgcHAgPSBwcHJpbnQuUHJldHR5UHJpbnRlcihkZXB0aD02KQogICAgICAgIHBwLnBwcmludChyc1snbWVtYmVycyddKQogICAgICAgIHJldHVybiBGYWxzZQogICAgCiAgICBwcmludCgiTG9va3MgZ29vZC4gUmVwbGljYSBzZXQgd2l0aCB0aHJlZSBub2RlcyBydW5uaW5nIikKICAgIHJldHVybiBUcnVlCgpkZWYgZ3JhY2VmdWxfZXhpdChpKToKICAgIGNvbm5lY3Rpb24uY2xvc2UoKQogICAgc3lzLmV4aXQoaSkKCiMgbWFpbiBzZWN0aW9uIG9mIHRoZSBjb2RlCmRlZiBtYWluKGFyZ3YpOgogICAgICAgICAgICAKICAgIGFyZ19wYXJzaW5nKGFyZ3YpCiAgICBnbG9iYWwgY29ubmVjdGlvbgogICAgZ2xvYmFsIGRiCgogICAgcHJpbnQoIldlbGNvbWUgdG8gdGhlIEhXIDYueCByZXBsaWNhIENoZWNrZXIuIE15IGpvYiBpcyB0byBtYWtlIHN1cmUgeW91IHN0YXJ0ZWQgYSByZXBsaWNhIHNldCB3aXRoIHRocmVlIG5vZGVzIikKCiAgICAjIGNvbm5lY3QgdG8gdGhlIGRiIChtb25nb3N0ciB3YXMgc2V0IGluIGFyZ19wYXJzaW5nKQogICAgdHJ5OgogICAgICAgIGNvbm5lY3Rpb24gPSBweW1vbmdvLk1vbmdvQ2xpZW50KG1vbmdvc3RyLCByZXBsaWNhU2V0PXJzX25hbWUpCiAgICAgICAgZGIgPSBjb25uZWN0aW9uW2RiX25hbWVdCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IGNvbm5lY3QgdG8gTW9uZ29EQiByZXBsaWNhIixyc19uYW1lLCIgc2V0IHVzaW5nIiwgbW9uZ29zdHIsICIuIElzIGl0IHJ1bm5pbmc/IikKICAgICAgICBleGl0KDIpICAgICAjIG5vIGdyYWNlZnVsIGV4aXQgaWYgaXQgaXMgbm90IGNvbm5lY3RlZAogICAgICAgIAogICAgaWYgKG5vdCByZXBsX3NldF9ydW5uaW5nKDMpKToKICAgICAgICBwcmludCgiU29ycnksIHRoZSByZXBsaWNhIHNldCBkb2VzIG5vdCBzZWVtIHRvIGJlIHJ1bm5pbmciKQogICAgICAgIGdyYWNlZnVsX2V4aXQoMSkKICAgIAogICAgIyBpZiB5b3UgYXJlIHJlYWRpbmcgdGhpcyBpbiBjbGVhcnRleHQsIHlvdSBhcmUgdmlvbGF0aW5nIHRoZSBob25vciBjb2RlLgogICAgIyBZb3UgY2FuIHN0aWxsIHJlZGVlbSB5b3Vyc2VsZi4gR2V0IGl0IHdvcmtpbmcgYW5kIGRvbid0IHN1Ym1pdCB0aGUgdmFsaWRhdGlvbiBjb2RlIHVudGlsIHlvdSBkby4KICAgICMgQWxsIGEgbWFuIGhhcyBhdCB0aGUgZW5kIG9mIHRoZSBkYXkgaXMgaGlzIHdvcmQuCiAgICBwcmludCgiVGVzdHMgUGFzc2VkIGZvciBIVyA2LjUuIFlvdXIgSFcgNi41IHZhbGlkYXRpb24gY29kZSBpcyBranZqa2wzMjkwbWYwbTIwZjJramp2IikKICAgIGdyYWNlZnVsX2V4aXQoMCkKCmlmIF9fbmFtZV9fID09ICJfX21haW5fXyI6CiAgICBtYWluKHN5cy5hcmd2WzE6XSkKCgoKCgoKCgoK'
)
eval(compile(base64.b64decode(code), '<string>', 'exec'))
<|reserved_special_token_1|>
import base64
code = (
b'CmltcG9ydCBweW1vbmdvCmltcG9ydCByYW5kb20KaW1wb3J0IHJlCmltcG9ydCBzdHJpbmcKaW1wb3J0IHN5cwppbXBvcnQgZ2V0b3B0CmltcG9ydCBwcHJpbnQKCiMgQ29weXJpZ2h0IDIwMTUKIyBNb25nb0RCLCBJbmMuCiMgQXV0aG9yOiBBbmRyZXcgRXJsaWNoc29uICAgYWplQDEwZ2VuLmNvbQojCiMgSWYgeW91IGFyZSBhIHN0dWRlbnQgYW5kIHJlYWRpbmcgdGhpcyBjb2RlLCB0dXJuIGJhY2sgbm93LCBiZWZvcmUKIyB0aGUgTW9uZ29EQiBnb2RzIHNtaXRlIHlvdS4KCmNvbm5lY3Rpb24gPSBOb25lCmRiID0gTm9uZQptb25nb3N0ciA9ICJtb25nb2RiOi8vbG9jYWxob3N0OjI3MDE3IgpkYl9uYW1lID0gImFkbWluIgpyc19uYW1lID0gIm0xMDEiCgojIHRoaXMgc2NyaXB0IHdpbGwgY2hlY2sgdGhhdCBhIHJlcGxpY2Egc2V0IHdpdGggdGhyZWUgbm9kZXMgaXMgcnVubmluZyBvbiBhIGhvc3QKCiMgY29tbWFuZCBsaW5lIGFyZyBwYXJzaW5nIHRvIG1ha2UgZm9sa3MgaGFwcHkgd2hvIHdhbnQgdG8gcnVuIGF0IG1vbmdvbGFicyBvciBtb25nb2hxCiMgdGhpcyBmdW5jdGlvbnMgdXNlcyBnbG9iYWwgdmFycyB0byBjb21tdW5pY2F0ZS4gZm9yZ2l2ZSBtZS4KZGVmIGFyZ19wYXJzaW5nKGFyZ3YpOgoKICAgIGdsb2JhbCB3ZWJob3N0CiAgICBnbG9iYWwgbW9uZ29zdHIKICAgIGdsb2JhbCBkYl9uYW1lCgogICAgdHJ5OgogICAgICAgIG9wdHMsIGFyZ3MgPSBnZXRvcHQuZ2V0b3B0KGFyZ3YsICItcDotbTotZDoiKQogICAgZXhjZXB0IGdldG9wdC5HZXRvcHRFcnJvcjoKICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIikKICAgICAgICBwcmludCgiXHRtb25nb0Nvbm5lY3Rpb25TdHJpbmcgZGVmYXVsdCB0byB7MH0iLmZvcm1hdChtb25nb3N0cikpCiAgICAgICAgcHJpbnQoIlx0ZGF0YWJhc2VOYW1lIGRlZmF1bHRzIHRvIHswfSIuZm9ybWF0KGRiX25hbWUpKQogICAgICAgIHN5cy5leGl0KDIpCiAgICBmb3Igb3B0LCBhcmcgaW4gb3B0czoKICAgICAgICBpZiAob3B0ID09ICctaCcpOgogICAgICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIC1kIGRhdGFiYXNlTmFtZSIpCiAgICAgICAgICAgIHN5cy5leGl0KDIpCiAgICAgICAgZWxpZiBvcHQgaW4gKCItbSIpOgogICAgICAgICAgICBtb25nb3N0ciA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGNvbm5lY3Rpb24gc3RyaW5nIHRvIGJlICIsIG1vbmdvc3RyKQogICAgICAgIGVsaWYgb3B0IGluICgiLWQiKToKICAgICAgICAgICAgZGJfbmFtZSA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGRhdGFiYXNlIHRvIGJlICIsIGRiX25hbWUpCgojIGdldHMgdGhlIHJlcGxpY2Egc2V0IHN0YXR1cwpkZWYgZ2V0X3JzX3N0YXR1cygpOgogICAgZGIgPSBjb25uZWN0aW9uLmFkbWluCiAgICBycyA9IGRiLmNvbW1hbmQoInJlcGxTZXRHZXRTdGF0dXMiKQogICAgcmV0dXJuIHJzCgojIGdldHMgdGhlIHJlcGxpY2Egc3RhdGUgY29uZmlnCmRlZiBnZXRfcnNfY29uZmlndXJhdGlvbigpOgogICAgZGIgPSBjb25uZWN0aW9uLmxvY2FsCiAgICBjb2xsID0gZGIuc3lzdGVtLnJlcGxzZXQKICAgIHJldHVybiBjb2xsLmZpbmRfb25lKCkKCmRlZiByZXBsX3NldF9ydW5uaW5nKG51bV9ub2Rlcyk6CgogICAgdHJ5OgogICAgICAgIHJzID0gZ2V0X3JzX3N0YXR1cygpCiAgICAgICAgY29uZiA9IGdldF9yc19jb25maWd1cmF0aW9uKCkKICAgICAgICBob3N0cyAgPSBjb25uZWN0aW9uLmhvc3RzCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IHF1ZXJ5IE1vbmdvREIuLmlzIGl0IHJ1bm5pbmc/IikKICAgICAgICByYWlzZQogICAgICAgIHJldHVybiBGYWxzZQoKICAgIGlmIChyc1snb2snXSAhPSAxKToKICAgICAgICBwcmludCgiU29ycnksIG9rIGlzIG5vdCAxIGZvciBycy5zdGF0dXMoKSIpCiAgICAgICAgcHJpbnQoIkhlcmUgaXMgd2hhdCBJIGdldDoiKQogICAgICAgIHBwID0gcHByaW50LlByZXR0eVByaW50ZXIoZGVwdGg9NikKICAgICAgICBwcC5wcHJpbnQocnMpCiAgICAgICAgcmV0dXJuIEZhbHNlCgogICAgaWYgKGxlbihyc1snbWVtYmVycyddKSAhPSBudW1fbm9kZXMpOgogICAgICAgIHByaW50KCJTb3JyeSwgdGhlcmUgbmVlZCB0byBiZSB0aHJlZSBtZW1iZXJzIG9mIHRoZSByZXBsaWNhIHNldC4iKQogICAgICAgIHByaW50KCJoZXJlIGlzIHRoZSBtZW1iZXJzIGFycmF5IEkgc2VlIikKCiAgICAgICAgcHAgPSBwcHJpbnQuUHJldHR5UHJpbnRlcihkZXB0aD02KQogICAgICAgIHBwLnBwcmludChyc1snbWVtYmVycyddKQogICAgICAgIHJldHVybiBGYWxzZQogICAgCiAgICBwcmludCgiTG9va3MgZ29vZC4gUmVwbGljYSBzZXQgd2l0aCB0aHJlZSBub2RlcyBydW5uaW5nIikKICAgIHJldHVybiBUcnVlCgpkZWYgZ3JhY2VmdWxfZXhpdChpKToKICAgIGNvbm5lY3Rpb24uY2xvc2UoKQogICAgc3lzLmV4aXQoaSkKCiMgbWFpbiBzZWN0aW9uIG9mIHRoZSBjb2RlCmRlZiBtYWluKGFyZ3YpOgogICAgICAgICAgICAKICAgIGFyZ19wYXJzaW5nKGFyZ3YpCiAgICBnbG9iYWwgY29ubmVjdGlvbgogICAgZ2xvYmFsIGRiCgogICAgcHJpbnQoIldlbGNvbWUgdG8gdGhlIEhXIDYueCByZXBsaWNhIENoZWNrZXIuIE15IGpvYiBpcyB0byBtYWtlIHN1cmUgeW91IHN0YXJ0ZWQgYSByZXBsaWNhIHNldCB3aXRoIHRocmVlIG5vZGVzIikKCiAgICAjIGNvbm5lY3QgdG8gdGhlIGRiIChtb25nb3N0ciB3YXMgc2V0IGluIGFyZ19wYXJzaW5nKQogICAgdHJ5OgogICAgICAgIGNvbm5lY3Rpb24gPSBweW1vbmdvLk1vbmdvQ2xpZW50KG1vbmdvc3RyLCByZXBsaWNhU2V0PXJzX25hbWUpCiAgICAgICAgZGIgPSBjb25uZWN0aW9uW2RiX25hbWVdCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IGNvbm5lY3QgdG8gTW9uZ29EQiByZXBsaWNhIixyc19uYW1lLCIgc2V0IHVzaW5nIiwgbW9uZ29zdHIsICIuIElzIGl0IHJ1bm5pbmc/IikKICAgICAgICBleGl0KDIpICAgICAjIG5vIGdyYWNlZnVsIGV4aXQgaWYgaXQgaXMgbm90IGNvbm5lY3RlZAogICAgICAgIAogICAgaWYgKG5vdCByZXBsX3NldF9ydW5uaW5nKDMpKToKICAgICAgICBwcmludCgiU29ycnksIHRoZSByZXBsaWNhIHNldCBkb2VzIG5vdCBzZWVtIHRvIGJlIHJ1bm5pbmciKQogICAgICAgIGdyYWNlZnVsX2V4aXQoMSkKICAgIAogICAgIyBpZiB5b3UgYXJlIHJlYWRpbmcgdGhpcyBpbiBjbGVhcnRleHQsIHlvdSBhcmUgdmlvbGF0aW5nIHRoZSBob25vciBjb2RlLgogICAgIyBZb3UgY2FuIHN0aWxsIHJlZGVlbSB5b3Vyc2VsZi4gR2V0IGl0IHdvcmtpbmcgYW5kIGRvbid0IHN1Ym1pdCB0aGUgdmFsaWRhdGlvbiBjb2RlIHVudGlsIHlvdSBkby4KICAgICMgQWxsIGEgbWFuIGhhcyBhdCB0aGUgZW5kIG9mIHRoZSBkYXkgaXMgaGlzIHdvcmQuCiAgICBwcmludCgiVGVzdHMgUGFzc2VkIGZvciBIVyA2LjUuIFlvdXIgSFcgNi41IHZhbGlkYXRpb24gY29kZSBpcyBranZqa2wzMjkwbWYwbTIwZjJramp2IikKICAgIGdyYWNlZnVsX2V4aXQoMCkKCmlmIF9fbmFtZV9fID09ICJfX21haW5fXyI6CiAgICBtYWluKHN5cy5hcmd2WzE6XSkKCgoKCgoKCgoK'
)
eval(compile(base64.b64decode(code), '<string>', 'exec'))
<|reserved_special_token_1|>
import base64
code=b'CmltcG9ydCBweW1vbmdvCmltcG9ydCByYW5kb20KaW1wb3J0IHJlCmltcG9ydCBzdHJpbmcKaW1wb3J0IHN5cwppbXBvcnQgZ2V0b3B0CmltcG9ydCBwcHJpbnQKCiMgQ29weXJpZ2h0IDIwMTUKIyBNb25nb0RCLCBJbmMuCiMgQXV0aG9yOiBBbmRyZXcgRXJsaWNoc29uICAgYWplQDEwZ2VuLmNvbQojCiMgSWYgeW91IGFyZSBhIHN0dWRlbnQgYW5kIHJlYWRpbmcgdGhpcyBjb2RlLCB0dXJuIGJhY2sgbm93LCBiZWZvcmUKIyB0aGUgTW9uZ29EQiBnb2RzIHNtaXRlIHlvdS4KCmNvbm5lY3Rpb24gPSBOb25lCmRiID0gTm9uZQptb25nb3N0ciA9ICJtb25nb2RiOi8vbG9jYWxob3N0OjI3MDE3IgpkYl9uYW1lID0gImFkbWluIgpyc19uYW1lID0gIm0xMDEiCgojIHRoaXMgc2NyaXB0IHdpbGwgY2hlY2sgdGhhdCBhIHJlcGxpY2Egc2V0IHdpdGggdGhyZWUgbm9kZXMgaXMgcnVubmluZyBvbiBhIGhvc3QKCiMgY29tbWFuZCBsaW5lIGFyZyBwYXJzaW5nIHRvIG1ha2UgZm9sa3MgaGFwcHkgd2hvIHdhbnQgdG8gcnVuIGF0IG1vbmdvbGFicyBvciBtb25nb2hxCiMgdGhpcyBmdW5jdGlvbnMgdXNlcyBnbG9iYWwgdmFycyB0byBjb21tdW5pY2F0ZS4gZm9yZ2l2ZSBtZS4KZGVmIGFyZ19wYXJzaW5nKGFyZ3YpOgoKICAgIGdsb2JhbCB3ZWJob3N0CiAgICBnbG9iYWwgbW9uZ29zdHIKICAgIGdsb2JhbCBkYl9uYW1lCgogICAgdHJ5OgogICAgICAgIG9wdHMsIGFyZ3MgPSBnZXRvcHQuZ2V0b3B0KGFyZ3YsICItcDotbTotZDoiKQogICAgZXhjZXB0IGdldG9wdC5HZXRvcHRFcnJvcjoKICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIikKICAgICAgICBwcmludCgiXHRtb25nb0Nvbm5lY3Rpb25TdHJpbmcgZGVmYXVsdCB0byB7MH0iLmZvcm1hdChtb25nb3N0cikpCiAgICAgICAgcHJpbnQoIlx0ZGF0YWJhc2VOYW1lIGRlZmF1bHRzIHRvIHswfSIuZm9ybWF0KGRiX25hbWUpKQogICAgICAgIHN5cy5leGl0KDIpCiAgICBmb3Igb3B0LCBhcmcgaW4gb3B0czoKICAgICAgICBpZiAob3B0ID09ICctaCcpOgogICAgICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIC1kIGRhdGFiYXNlTmFtZSIpCiAgICAgICAgICAgIHN5cy5leGl0KDIpCiAgICAgICAgZWxpZiBvcHQgaW4gKCItbSIpOgogICAgICAgICAgICBtb25nb3N0ciA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGNvbm5lY3Rpb24gc3RyaW5nIHRvIGJlICIsIG1vbmdvc3RyKQogICAgICAgIGVsaWYgb3B0IGluICgiLWQiKToKICAgICAgICAgICAgZGJfbmFtZSA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGRhdGFiYXNlIHRvIGJlICIsIGRiX25hbWUpCgojIGdldHMgdGhlIHJlcGxpY2Egc2V0IHN0YXR1cwpkZWYgZ2V0X3JzX3N0YXR1cygpOgogICAgZGIgPSBjb25uZWN0aW9uLmFkbWluCiAgICBycyA9IGRiLmNvbW1hbmQoInJlcGxTZXRHZXRTdGF0dXMiKQogICAgcmV0dXJuIHJzCgojIGdldHMgdGhlIHJlcGxpY2Egc3RhdGUgY29uZmlnCmRlZiBnZXRfcnNfY29uZmlndXJhdGlvbigpOgogICAgZGIgPSBjb25uZWN0aW9uLmxvY2FsCiAgICBjb2xsID0gZGIuc3lzdGVtLnJlcGxzZXQKICAgIHJldHVybiBjb2xsLmZpbmRfb25lKCkKCmRlZiByZXBsX3NldF9ydW5uaW5nKG51bV9ub2Rlcyk6CgogICAgdHJ5OgogICAgICAgIHJzID0gZ2V0X3JzX3N0YXR1cygpCiAgICAgICAgY29uZiA9IGdldF9yc19jb25maWd1cmF0aW9uKCkKICAgICAgICBob3N0cyAgPSBjb25uZWN0aW9uLmhvc3RzCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IHF1ZXJ5IE1vbmdvREIuLmlzIGl0IHJ1bm5pbmc/IikKICAgICAgICByYWlzZQogICAgICAgIHJldHVybiBGYWxzZQoKICAgIGlmIChyc1snb2snXSAhPSAxKToKICAgICAgICBwcmludCgiU29ycnksIG9rIGlzIG5vdCAxIGZvciBycy5zdGF0dXMoKSIpCiAgICAgICAgcHJpbnQoIkhlcmUgaXMgd2hhdCBJIGdldDoiKQogICAgICAgIHBwID0gcHByaW50LlByZXR0eVByaW50ZXIoZGVwdGg9NikKICAgICAgICBwcC5wcHJpbnQocnMpCiAgICAgICAgcmV0dXJuIEZhbHNlCgogICAgaWYgKGxlbihyc1snbWVtYmVycyddKSAhPSBudW1fbm9kZXMpOgogICAgICAgIHByaW50KCJTb3JyeSwgdGhlcmUgbmVlZCB0byBiZSB0aHJlZSBtZW1iZXJzIG9mIHRoZSByZXBsaWNhIHNldC4iKQogICAgICAgIHByaW50KCJoZXJlIGlzIHRoZSBtZW1iZXJzIGFycmF5IEkgc2VlIikKCiAgICAgICAgcHAgPSBwcHJpbnQuUHJldHR5UHJpbnRlcihkZXB0aD02KQogICAgICAgIHBwLnBwcmludChyc1snbWVtYmVycyddKQogICAgICAgIHJldHVybiBGYWxzZQogICAgCiAgICBwcmludCgiTG9va3MgZ29vZC4gUmVwbGljYSBzZXQgd2l0aCB0aHJlZSBub2RlcyBydW5uaW5nIikKICAgIHJldHVybiBUcnVlCgpkZWYgZ3JhY2VmdWxfZXhpdChpKToKICAgIGNvbm5lY3Rpb24uY2xvc2UoKQogICAgc3lzLmV4aXQoaSkKCiMgbWFpbiBzZWN0aW9uIG9mIHRoZSBjb2RlCmRlZiBtYWluKGFyZ3YpOgogICAgICAgICAgICAKICAgIGFyZ19wYXJzaW5nKGFyZ3YpCiAgICBnbG9iYWwgY29ubmVjdGlvbgogICAgZ2xvYmFsIGRiCgogICAgcHJpbnQoIldlbGNvbWUgdG8gdGhlIEhXIDYueCByZXBsaWNhIENoZWNrZXIuIE15IGpvYiBpcyB0byBtYWtlIHN1cmUgeW91IHN0YXJ0ZWQgYSByZXBsaWNhIHNldCB3aXRoIHRocmVlIG5vZGVzIikKCiAgICAjIGNvbm5lY3QgdG8gdGhlIGRiIChtb25nb3N0ciB3YXMgc2V0IGluIGFyZ19wYXJzaW5nKQogICAgdHJ5OgogICAgICAgIGNvbm5lY3Rpb24gPSBweW1vbmdvLk1vbmdvQ2xpZW50KG1vbmdvc3RyLCByZXBsaWNhU2V0PXJzX25hbWUpCiAgICAgICAgZGIgPSBjb25uZWN0aW9uW2RiX25hbWVdCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IGNvbm5lY3QgdG8gTW9uZ29EQiByZXBsaWNhIixyc19uYW1lLCIgc2V0IHVzaW5nIiwgbW9uZ29zdHIsICIuIElzIGl0IHJ1bm5pbmc/IikKICAgICAgICBleGl0KDIpICAgICAjIG5vIGdyYWNlZnVsIGV4aXQgaWYgaXQgaXMgbm90IGNvbm5lY3RlZAogICAgICAgIAogICAgaWYgKG5vdCByZXBsX3NldF9ydW5uaW5nKDMpKToKICAgICAgICBwcmludCgiU29ycnksIHRoZSByZXBsaWNhIHNldCBkb2VzIG5vdCBzZWVtIHRvIGJlIHJ1bm5pbmciKQogICAgICAgIGdyYWNlZnVsX2V4aXQoMSkKICAgIAogICAgIyBpZiB5b3UgYXJlIHJlYWRpbmcgdGhpcyBpbiBjbGVhcnRleHQsIHlvdSBhcmUgdmlvbGF0aW5nIHRoZSBob25vciBjb2RlLgogICAgIyBZb3UgY2FuIHN0aWxsIHJlZGVlbSB5b3Vyc2VsZi4gR2V0IGl0IHdvcmtpbmcgYW5kIGRvbid0IHN1Ym1pdCB0aGUgdmFsaWRhdGlvbiBjb2RlIHVudGlsIHlvdSBkby4KICAgICMgQWxsIGEgbWFuIGhhcyBhdCB0aGUgZW5kIG9mIHRoZSBkYXkgaXMgaGlzIHdvcmQuCiAgICBwcmludCgiVGVzdHMgUGFzc2VkIGZvciBIVyA2LjUuIFlvdXIgSFcgNi41IHZhbGlkYXRpb24gY29kZSBpcyBranZqa2wzMjkwbWYwbTIwZjJramp2IikKICAgIGdyYWNlZnVsX2V4aXQoMCkKCmlmIF9fbmFtZV9fID09ICJfX21haW5fXyI6CiAgICBtYWluKHN5cy5hcmd2WzE6XSkKCgoKCgoKCgoK'
eval(compile(base64.b64decode(code), "<string>", 'exec'))
|
flexible
|
{
"blob_id": "c7f26978333c7e6cccf7451ea5d10511a66b62c2",
"index": 1908,
"step-1": "<mask token>\n",
"step-2": "<mask token>\neval(compile(base64.b64decode(code), '<string>', 'exec'))\n",
"step-3": "<mask token>\ncode = (\n b'CmltcG9ydCBweW1vbmdvCmltcG9ydCByYW5kb20KaW1wb3J0IHJlCmltcG9ydCBzdHJpbmcKaW1wb3J0IHN5cwppbXBvcnQgZ2V0b3B0CmltcG9ydCBwcHJpbnQKCiMgQ29weXJpZ2h0IDIwMTUKIyBNb25nb0RCLCBJbmMuCiMgQXV0aG9yOiBBbmRyZXcgRXJsaWNoc29uICAgYWplQDEwZ2VuLmNvbQojCiMgSWYgeW91IGFyZSBhIHN0dWRlbnQgYW5kIHJlYWRpbmcgdGhpcyBjb2RlLCB0dXJuIGJhY2sgbm93LCBiZWZvcmUKIyB0aGUgTW9uZ29EQiBnb2RzIHNtaXRlIHlvdS4KCmNvbm5lY3Rpb24gPSBOb25lCmRiID0gTm9uZQptb25nb3N0ciA9ICJtb25nb2RiOi8vbG9jYWxob3N0OjI3MDE3IgpkYl9uYW1lID0gImFkbWluIgpyc19uYW1lID0gIm0xMDEiCgojIHRoaXMgc2NyaXB0IHdpbGwgY2hlY2sgdGhhdCBhIHJlcGxpY2Egc2V0IHdpdGggdGhyZWUgbm9kZXMgaXMgcnVubmluZyBvbiBhIGhvc3QKCiMgY29tbWFuZCBsaW5lIGFyZyBwYXJzaW5nIHRvIG1ha2UgZm9sa3MgaGFwcHkgd2hvIHdhbnQgdG8gcnVuIGF0IG1vbmdvbGFicyBvciBtb25nb2hxCiMgdGhpcyBmdW5jdGlvbnMgdXNlcyBnbG9iYWwgdmFycyB0byBjb21tdW5pY2F0ZS4gZm9yZ2l2ZSBtZS4KZGVmIGFyZ19wYXJzaW5nKGFyZ3YpOgoKICAgIGdsb2JhbCB3ZWJob3N0CiAgICBnbG9iYWwgbW9uZ29zdHIKICAgIGdsb2JhbCBkYl9uYW1lCgogICAgdHJ5OgogICAgICAgIG9wdHMsIGFyZ3MgPSBnZXRvcHQuZ2V0b3B0KGFyZ3YsICItcDotbTotZDoiKQogICAgZXhjZXB0IGdldG9wdC5HZXRvcHRFcnJvcjoKICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIikKICAgICAgICBwcmludCgiXHRtb25nb0Nvbm5lY3Rpb25TdHJpbmcgZGVmYXVsdCB0byB7MH0iLmZvcm1hdChtb25nb3N0cikpCiAgICAgICAgcHJpbnQoIlx0ZGF0YWJhc2VOYW1lIGRlZmF1bHRzIHRvIHswfSIuZm9ybWF0KGRiX25hbWUpKQogICAgICAgIHN5cy5leGl0KDIpCiAgICBmb3Igb3B0LCBhcmcgaW4gb3B0czoKICAgICAgICBpZiAob3B0ID09ICctaCcpOgogICAgICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIC1kIGRhdGFiYXNlTmFtZSIpCiAgICAgICAgICAgIHN5cy5leGl0KDIpCiAgICAgICAgZWxpZiBvcHQgaW4gKCItbSIpOgogICAgICAgICAgICBtb25nb3N0ciA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGNvbm5lY3Rpb24gc3RyaW5nIHRvIGJlICIsIG1vbmdvc3RyKQogICAgICAgIGVsaWYgb3B0IGluICgiLWQiKToKICAgICAgICAgICAgZGJfbmFtZSA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGRhdGFiYXNlIHRvIGJlICIsIGRiX25hbWUpCgojIGdldHMgdGhlIHJlcGxpY2Egc2V0IHN0YXR1cwpkZWYgZ2V0X3JzX3N0YXR1cygpOgogICAgZGIgPSBjb25uZWN0aW9uLmFkbWluCiAgICBycyA9IGRiLmNvbW1hbmQoInJlcGxTZXRHZXRTdGF0dXMiKQogICAgcmV0dXJuIHJzCgojIGdldHMgdGhlIHJlcGxpY2Egc3RhdGUgY29uZmlnCmRlZiBnZXRfcnNfY29uZmlndXJhdGlvbigpOgogICAgZGIgPSBjb25uZWN0aW9uLmxvY2FsCiAgICBjb2xsID0gZGIuc3lzdGVtLnJlcGxzZXQKICAgIHJldHVybiBjb2xsLmZpbmRfb25lKCkKCmRlZiByZXBsX3NldF9ydW5uaW5nKG51bV9ub2Rlcyk6CgogICAgdHJ5OgogICAgICAgIHJzID0gZ2V0X3JzX3N0YXR1cygpCiAgICAgICAgY29uZiA9IGdldF9yc19jb25maWd1cmF0aW9uKCkKICAgICAgICBob3N0cyAgPSBjb25uZWN0aW9uLmhvc3RzCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IHF1ZXJ5IE1vbmdvREIuLmlzIGl0IHJ1bm5pbmc/IikKICAgICAgICByYWlzZQogICAgICAgIHJldHVybiBGYWxzZQoKICAgIGlmIChyc1snb2snXSAhPSAxKToKICAgICAgICBwcmludCgiU29ycnksIG9rIGlzIG5vdCAxIGZvciBycy5zdGF0dXMoKSIpCiAgICAgICAgcHJpbnQoIkhlcmUgaXMgd2hhdCBJIGdldDoiKQogICAgICAgIHBwID0gcHByaW50LlByZXR0eVByaW50ZXIoZGVwdGg9NikKICAgICAgICBwcC5wcHJpbnQocnMpCiAgICAgICAgcmV0dXJuIEZhbHNlCgogICAgaWYgKGxlbihyc1snbWVtYmVycyddKSAhPSBudW1fbm9kZXMpOgogICAgICAgIHByaW50KCJTb3JyeSwgdGhlcmUgbmVlZCB0byBiZSB0aHJlZSBtZW1iZXJzIG9mIHRoZSByZXBsaWNhIHNldC4iKQogICAgICAgIHByaW50KCJoZXJlIGlzIHRoZSBtZW1iZXJzIGFycmF5IEkgc2VlIikKCiAgICAgICAgcHAgPSBwcHJpbnQuUHJldHR5UHJpbnRlcihkZXB0aD02KQogICAgICAgIHBwLnBwcmludChyc1snbWVtYmVycyddKQogICAgICAgIHJldHVybiBGYWxzZQogICAgCiAgICBwcmludCgiTG9va3MgZ29vZC4gUmVwbGljYSBzZXQgd2l0aCB0aHJlZSBub2RlcyBydW5uaW5nIikKICAgIHJldHVybiBUcnVlCgpkZWYgZ3JhY2VmdWxfZXhpdChpKToKICAgIGNvbm5lY3Rpb24uY2xvc2UoKQogICAgc3lzLmV4aXQoaSkKCiMgbWFpbiBzZWN0aW9uIG9mIHRoZSBjb2RlCmRlZiBtYWluKGFyZ3YpOgogICAgICAgICAgICAKICAgIGFyZ19wYXJzaW5nKGFyZ3YpCiAgICBnbG9iYWwgY29ubmVjdGlvbgogICAgZ2xvYmFsIGRiCgogICAgcHJpbnQoIldlbGNvbWUgdG8gdGhlIEhXIDYueCByZXBsaWNhIENoZWNrZXIuIE15IGpvYiBpcyB0byBtYWtlIHN1cmUgeW91IHN0YXJ0ZWQgYSByZXBsaWNhIHNldCB3aXRoIHRocmVlIG5vZGVzIikKCiAgICAjIGNvbm5lY3QgdG8gdGhlIGRiIChtb25nb3N0ciB3YXMgc2V0IGluIGFyZ19wYXJzaW5nKQogICAgdHJ5OgogICAgICAgIGNvbm5lY3Rpb24gPSBweW1vbmdvLk1vbmdvQ2xpZW50KG1vbmdvc3RyLCByZXBsaWNhU2V0PXJzX25hbWUpCiAgICAgICAgZGIgPSBjb25uZWN0aW9uW2RiX25hbWVdCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IGNvbm5lY3QgdG8gTW9uZ29EQiByZXBsaWNhIixyc19uYW1lLCIgc2V0IHVzaW5nIiwgbW9uZ29zdHIsICIuIElzIGl0IHJ1bm5pbmc/IikKICAgICAgICBleGl0KDIpICAgICAjIG5vIGdyYWNlZnVsIGV4aXQgaWYgaXQgaXMgbm90IGNvbm5lY3RlZAogICAgICAgIAogICAgaWYgKG5vdCByZXBsX3NldF9ydW5uaW5nKDMpKToKICAgICAgICBwcmludCgiU29ycnksIHRoZSByZXBsaWNhIHNldCBkb2VzIG5vdCBzZWVtIHRvIGJlIHJ1bm5pbmciKQogICAgICAgIGdyYWNlZnVsX2V4aXQoMSkKICAgIAogICAgIyBpZiB5b3UgYXJlIHJlYWRpbmcgdGhpcyBpbiBjbGVhcnRleHQsIHlvdSBhcmUgdmlvbGF0aW5nIHRoZSBob25vciBjb2RlLgogICAgIyBZb3UgY2FuIHN0aWxsIHJlZGVlbSB5b3Vyc2VsZi4gR2V0IGl0IHdvcmtpbmcgYW5kIGRvbid0IHN1Ym1pdCB0aGUgdmFsaWRhdGlvbiBjb2RlIHVudGlsIHlvdSBkby4KICAgICMgQWxsIGEgbWFuIGhhcyBhdCB0aGUgZW5kIG9mIHRoZSBkYXkgaXMgaGlzIHdvcmQuCiAgICBwcmludCgiVGVzdHMgUGFzc2VkIGZvciBIVyA2LjUuIFlvdXIgSFcgNi41IHZhbGlkYXRpb24gY29kZSBpcyBranZqa2wzMjkwbWYwbTIwZjJramp2IikKICAgIGdyYWNlZnVsX2V4aXQoMCkKCmlmIF9fbmFtZV9fID09ICJfX21haW5fXyI6CiAgICBtYWluKHN5cy5hcmd2WzE6XSkKCgoKCgoKCgoK'\n )\neval(compile(base64.b64decode(code), '<string>', 'exec'))\n",
"step-4": "import base64\ncode = (\n b'CmltcG9ydCBweW1vbmdvCmltcG9ydCByYW5kb20KaW1wb3J0IHJlCmltcG9ydCBzdHJpbmcKaW1wb3J0IHN5cwppbXBvcnQgZ2V0b3B0CmltcG9ydCBwcHJpbnQKCiMgQ29weXJpZ2h0IDIwMTUKIyBNb25nb0RCLCBJbmMuCiMgQXV0aG9yOiBBbmRyZXcgRXJsaWNoc29uICAgYWplQDEwZ2VuLmNvbQojCiMgSWYgeW91IGFyZSBhIHN0dWRlbnQgYW5kIHJlYWRpbmcgdGhpcyBjb2RlLCB0dXJuIGJhY2sgbm93LCBiZWZvcmUKIyB0aGUgTW9uZ29EQiBnb2RzIHNtaXRlIHlvdS4KCmNvbm5lY3Rpb24gPSBOb25lCmRiID0gTm9uZQptb25nb3N0ciA9ICJtb25nb2RiOi8vbG9jYWxob3N0OjI3MDE3IgpkYl9uYW1lID0gImFkbWluIgpyc19uYW1lID0gIm0xMDEiCgojIHRoaXMgc2NyaXB0IHdpbGwgY2hlY2sgdGhhdCBhIHJlcGxpY2Egc2V0IHdpdGggdGhyZWUgbm9kZXMgaXMgcnVubmluZyBvbiBhIGhvc3QKCiMgY29tbWFuZCBsaW5lIGFyZyBwYXJzaW5nIHRvIG1ha2UgZm9sa3MgaGFwcHkgd2hvIHdhbnQgdG8gcnVuIGF0IG1vbmdvbGFicyBvciBtb25nb2hxCiMgdGhpcyBmdW5jdGlvbnMgdXNlcyBnbG9iYWwgdmFycyB0byBjb21tdW5pY2F0ZS4gZm9yZ2l2ZSBtZS4KZGVmIGFyZ19wYXJzaW5nKGFyZ3YpOgoKICAgIGdsb2JhbCB3ZWJob3N0CiAgICBnbG9iYWwgbW9uZ29zdHIKICAgIGdsb2JhbCBkYl9uYW1lCgogICAgdHJ5OgogICAgICAgIG9wdHMsIGFyZ3MgPSBnZXRvcHQuZ2V0b3B0KGFyZ3YsICItcDotbTotZDoiKQogICAgZXhjZXB0IGdldG9wdC5HZXRvcHRFcnJvcjoKICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIikKICAgICAgICBwcmludCgiXHRtb25nb0Nvbm5lY3Rpb25TdHJpbmcgZGVmYXVsdCB0byB7MH0iLmZvcm1hdChtb25nb3N0cikpCiAgICAgICAgcHJpbnQoIlx0ZGF0YWJhc2VOYW1lIGRlZmF1bHRzIHRvIHswfSIuZm9ybWF0KGRiX25hbWUpKQogICAgICAgIHN5cy5leGl0KDIpCiAgICBmb3Igb3B0LCBhcmcgaW4gb3B0czoKICAgICAgICBpZiAob3B0ID09ICctaCcpOgogICAgICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIC1kIGRhdGFiYXNlTmFtZSIpCiAgICAgICAgICAgIHN5cy5leGl0KDIpCiAgICAgICAgZWxpZiBvcHQgaW4gKCItbSIpOgogICAgICAgICAgICBtb25nb3N0ciA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGNvbm5lY3Rpb24gc3RyaW5nIHRvIGJlICIsIG1vbmdvc3RyKQogICAgICAgIGVsaWYgb3B0IGluICgiLWQiKToKICAgICAgICAgICAgZGJfbmFtZSA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGRhdGFiYXNlIHRvIGJlICIsIGRiX25hbWUpCgojIGdldHMgdGhlIHJlcGxpY2Egc2V0IHN0YXR1cwpkZWYgZ2V0X3JzX3N0YXR1cygpOgogICAgZGIgPSBjb25uZWN0aW9uLmFkbWluCiAgICBycyA9IGRiLmNvbW1hbmQoInJlcGxTZXRHZXRTdGF0dXMiKQogICAgcmV0dXJuIHJzCgojIGdldHMgdGhlIHJlcGxpY2Egc3RhdGUgY29uZmlnCmRlZiBnZXRfcnNfY29uZmlndXJhdGlvbigpOgogICAgZGIgPSBjb25uZWN0aW9uLmxvY2FsCiAgICBjb2xsID0gZGIuc3lzdGVtLnJlcGxzZXQKICAgIHJldHVybiBjb2xsLmZpbmRfb25lKCkKCmRlZiByZXBsX3NldF9ydW5uaW5nKG51bV9ub2Rlcyk6CgogICAgdHJ5OgogICAgICAgIHJzID0gZ2V0X3JzX3N0YXR1cygpCiAgICAgICAgY29uZiA9IGdldF9yc19jb25maWd1cmF0aW9uKCkKICAgICAgICBob3N0cyAgPSBjb25uZWN0aW9uLmhvc3RzCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IHF1ZXJ5IE1vbmdvREIuLmlzIGl0IHJ1bm5pbmc/IikKICAgICAgICByYWlzZQogICAgICAgIHJldHVybiBGYWxzZQoKICAgIGlmIChyc1snb2snXSAhPSAxKToKICAgICAgICBwcmludCgiU29ycnksIG9rIGlzIG5vdCAxIGZvciBycy5zdGF0dXMoKSIpCiAgICAgICAgcHJpbnQoIkhlcmUgaXMgd2hhdCBJIGdldDoiKQogICAgICAgIHBwID0gcHByaW50LlByZXR0eVByaW50ZXIoZGVwdGg9NikKICAgICAgICBwcC5wcHJpbnQocnMpCiAgICAgICAgcmV0dXJuIEZhbHNlCgogICAgaWYgKGxlbihyc1snbWVtYmVycyddKSAhPSBudW1fbm9kZXMpOgogICAgICAgIHByaW50KCJTb3JyeSwgdGhlcmUgbmVlZCB0byBiZSB0aHJlZSBtZW1iZXJzIG9mIHRoZSByZXBsaWNhIHNldC4iKQogICAgICAgIHByaW50KCJoZXJlIGlzIHRoZSBtZW1iZXJzIGFycmF5IEkgc2VlIikKCiAgICAgICAgcHAgPSBwcHJpbnQuUHJldHR5UHJpbnRlcihkZXB0aD02KQogICAgICAgIHBwLnBwcmludChyc1snbWVtYmVycyddKQogICAgICAgIHJldHVybiBGYWxzZQogICAgCiAgICBwcmludCgiTG9va3MgZ29vZC4gUmVwbGljYSBzZXQgd2l0aCB0aHJlZSBub2RlcyBydW5uaW5nIikKICAgIHJldHVybiBUcnVlCgpkZWYgZ3JhY2VmdWxfZXhpdChpKToKICAgIGNvbm5lY3Rpb24uY2xvc2UoKQogICAgc3lzLmV4aXQoaSkKCiMgbWFpbiBzZWN0aW9uIG9mIHRoZSBjb2RlCmRlZiBtYWluKGFyZ3YpOgogICAgICAgICAgICAKICAgIGFyZ19wYXJzaW5nKGFyZ3YpCiAgICBnbG9iYWwgY29ubmVjdGlvbgogICAgZ2xvYmFsIGRiCgogICAgcHJpbnQoIldlbGNvbWUgdG8gdGhlIEhXIDYueCByZXBsaWNhIENoZWNrZXIuIE15IGpvYiBpcyB0byBtYWtlIHN1cmUgeW91IHN0YXJ0ZWQgYSByZXBsaWNhIHNldCB3aXRoIHRocmVlIG5vZGVzIikKCiAgICAjIGNvbm5lY3QgdG8gdGhlIGRiIChtb25nb3N0ciB3YXMgc2V0IGluIGFyZ19wYXJzaW5nKQogICAgdHJ5OgogICAgICAgIGNvbm5lY3Rpb24gPSBweW1vbmdvLk1vbmdvQ2xpZW50KG1vbmdvc3RyLCByZXBsaWNhU2V0PXJzX25hbWUpCiAgICAgICAgZGIgPSBjb25uZWN0aW9uW2RiX25hbWVdCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IGNvbm5lY3QgdG8gTW9uZ29EQiByZXBsaWNhIixyc19uYW1lLCIgc2V0IHVzaW5nIiwgbW9uZ29zdHIsICIuIElzIGl0IHJ1bm5pbmc/IikKICAgICAgICBleGl0KDIpICAgICAjIG5vIGdyYWNlZnVsIGV4aXQgaWYgaXQgaXMgbm90IGNvbm5lY3RlZAogICAgICAgIAogICAgaWYgKG5vdCByZXBsX3NldF9ydW5uaW5nKDMpKToKICAgICAgICBwcmludCgiU29ycnksIHRoZSByZXBsaWNhIHNldCBkb2VzIG5vdCBzZWVtIHRvIGJlIHJ1bm5pbmciKQogICAgICAgIGdyYWNlZnVsX2V4aXQoMSkKICAgIAogICAgIyBpZiB5b3UgYXJlIHJlYWRpbmcgdGhpcyBpbiBjbGVhcnRleHQsIHlvdSBhcmUgdmlvbGF0aW5nIHRoZSBob25vciBjb2RlLgogICAgIyBZb3UgY2FuIHN0aWxsIHJlZGVlbSB5b3Vyc2VsZi4gR2V0IGl0IHdvcmtpbmcgYW5kIGRvbid0IHN1Ym1pdCB0aGUgdmFsaWRhdGlvbiBjb2RlIHVudGlsIHlvdSBkby4KICAgICMgQWxsIGEgbWFuIGhhcyBhdCB0aGUgZW5kIG9mIHRoZSBkYXkgaXMgaGlzIHdvcmQuCiAgICBwcmludCgiVGVzdHMgUGFzc2VkIGZvciBIVyA2LjUuIFlvdXIgSFcgNi41IHZhbGlkYXRpb24gY29kZSBpcyBranZqa2wzMjkwbWYwbTIwZjJramp2IikKICAgIGdyYWNlZnVsX2V4aXQoMCkKCmlmIF9fbmFtZV9fID09ICJfX21haW5fXyI6CiAgICBtYWluKHN5cy5hcmd2WzE6XSkKCgoKCgoKCgoK'\n )\neval(compile(base64.b64decode(code), '<string>', 'exec'))\n",
"step-5": "import base64\ncode=b'CmltcG9ydCBweW1vbmdvCmltcG9ydCByYW5kb20KaW1wb3J0IHJlCmltcG9ydCBzdHJpbmcKaW1wb3J0IHN5cwppbXBvcnQgZ2V0b3B0CmltcG9ydCBwcHJpbnQKCiMgQ29weXJpZ2h0IDIwMTUKIyBNb25nb0RCLCBJbmMuCiMgQXV0aG9yOiBBbmRyZXcgRXJsaWNoc29uICAgYWplQDEwZ2VuLmNvbQojCiMgSWYgeW91IGFyZSBhIHN0dWRlbnQgYW5kIHJlYWRpbmcgdGhpcyBjb2RlLCB0dXJuIGJhY2sgbm93LCBiZWZvcmUKIyB0aGUgTW9uZ29EQiBnb2RzIHNtaXRlIHlvdS4KCmNvbm5lY3Rpb24gPSBOb25lCmRiID0gTm9uZQptb25nb3N0ciA9ICJtb25nb2RiOi8vbG9jYWxob3N0OjI3MDE3IgpkYl9uYW1lID0gImFkbWluIgpyc19uYW1lID0gIm0xMDEiCgojIHRoaXMgc2NyaXB0IHdpbGwgY2hlY2sgdGhhdCBhIHJlcGxpY2Egc2V0IHdpdGggdGhyZWUgbm9kZXMgaXMgcnVubmluZyBvbiBhIGhvc3QKCiMgY29tbWFuZCBsaW5lIGFyZyBwYXJzaW5nIHRvIG1ha2UgZm9sa3MgaGFwcHkgd2hvIHdhbnQgdG8gcnVuIGF0IG1vbmdvbGFicyBvciBtb25nb2hxCiMgdGhpcyBmdW5jdGlvbnMgdXNlcyBnbG9iYWwgdmFycyB0byBjb21tdW5pY2F0ZS4gZm9yZ2l2ZSBtZS4KZGVmIGFyZ19wYXJzaW5nKGFyZ3YpOgoKICAgIGdsb2JhbCB3ZWJob3N0CiAgICBnbG9iYWwgbW9uZ29zdHIKICAgIGdsb2JhbCBkYl9uYW1lCgogICAgdHJ5OgogICAgICAgIG9wdHMsIGFyZ3MgPSBnZXRvcHQuZ2V0b3B0KGFyZ3YsICItcDotbTotZDoiKQogICAgZXhjZXB0IGdldG9wdC5HZXRvcHRFcnJvcjoKICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIikKICAgICAgICBwcmludCgiXHRtb25nb0Nvbm5lY3Rpb25TdHJpbmcgZGVmYXVsdCB0byB7MH0iLmZvcm1hdChtb25nb3N0cikpCiAgICAgICAgcHJpbnQoIlx0ZGF0YWJhc2VOYW1lIGRlZmF1bHRzIHRvIHswfSIuZm9ybWF0KGRiX25hbWUpKQogICAgICAgIHN5cy5leGl0KDIpCiAgICBmb3Igb3B0LCBhcmcgaW4gb3B0czoKICAgICAgICBpZiAob3B0ID09ICctaCcpOgogICAgICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIC1kIGRhdGFiYXNlTmFtZSIpCiAgICAgICAgICAgIHN5cy5leGl0KDIpCiAgICAgICAgZWxpZiBvcHQgaW4gKCItbSIpOgogICAgICAgICAgICBtb25nb3N0ciA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGNvbm5lY3Rpb24gc3RyaW5nIHRvIGJlICIsIG1vbmdvc3RyKQogICAgICAgIGVsaWYgb3B0IGluICgiLWQiKToKICAgICAgICAgICAgZGJfbmFtZSA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGRhdGFiYXNlIHRvIGJlICIsIGRiX25hbWUpCgojIGdldHMgdGhlIHJlcGxpY2Egc2V0IHN0YXR1cwpkZWYgZ2V0X3JzX3N0YXR1cygpOgogICAgZGIgPSBjb25uZWN0aW9uLmFkbWluCiAgICBycyA9IGRiLmNvbW1hbmQoInJlcGxTZXRHZXRTdGF0dXMiKQogICAgcmV0dXJuIHJzCgojIGdldHMgdGhlIHJlcGxpY2Egc3RhdGUgY29uZmlnCmRlZiBnZXRfcnNfY29uZmlndXJhdGlvbigpOgogICAgZGIgPSBjb25uZWN0aW9uLmxvY2FsCiAgICBjb2xsID0gZGIuc3lzdGVtLnJlcGxzZXQKICAgIHJldHVybiBjb2xsLmZpbmRfb25lKCkKCmRlZiByZXBsX3NldF9ydW5uaW5nKG51bV9ub2Rlcyk6CgogICAgdHJ5OgogICAgICAgIHJzID0gZ2V0X3JzX3N0YXR1cygpCiAgICAgICAgY29uZiA9IGdldF9yc19jb25maWd1cmF0aW9uKCkKICAgICAgICBob3N0cyAgPSBjb25uZWN0aW9uLmhvc3RzCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IHF1ZXJ5IE1vbmdvREIuLmlzIGl0IHJ1bm5pbmc/IikKICAgICAgICByYWlzZQogICAgICAgIHJldHVybiBGYWxzZQoKICAgIGlmIChyc1snb2snXSAhPSAxKToKICAgICAgICBwcmludCgiU29ycnksIG9rIGlzIG5vdCAxIGZvciBycy5zdGF0dXMoKSIpCiAgICAgICAgcHJpbnQoIkhlcmUgaXMgd2hhdCBJIGdldDoiKQogICAgICAgIHBwID0gcHByaW50LlByZXR0eVByaW50ZXIoZGVwdGg9NikKICAgICAgICBwcC5wcHJpbnQocnMpCiAgICAgICAgcmV0dXJuIEZhbHNlCgogICAgaWYgKGxlbihyc1snbWVtYmVycyddKSAhPSBudW1fbm9kZXMpOgogICAgICAgIHByaW50KCJTb3JyeSwgdGhlcmUgbmVlZCB0byBiZSB0aHJlZSBtZW1iZXJzIG9mIHRoZSByZXBsaWNhIHNldC4iKQogICAgICAgIHByaW50KCJoZXJlIGlzIHRoZSBtZW1iZXJzIGFycmF5IEkgc2VlIikKCiAgICAgICAgcHAgPSBwcHJpbnQuUHJldHR5UHJpbnRlcihkZXB0aD02KQogICAgICAgIHBwLnBwcmludChyc1snbWVtYmVycyddKQogICAgICAgIHJldHVybiBGYWxzZQogICAgCiAgICBwcmludCgiTG9va3MgZ29vZC4gUmVwbGljYSBzZXQgd2l0aCB0aHJlZSBub2RlcyBydW5uaW5nIikKICAgIHJldHVybiBUcnVlCgpkZWYgZ3JhY2VmdWxfZXhpdChpKToKICAgIGNvbm5lY3Rpb24uY2xvc2UoKQogICAgc3lzLmV4aXQoaSkKCiMgbWFpbiBzZWN0aW9uIG9mIHRoZSBjb2RlCmRlZiBtYWluKGFyZ3YpOgogICAgICAgICAgICAKICAgIGFyZ19wYXJzaW5nKGFyZ3YpCiAgICBnbG9iYWwgY29ubmVjdGlvbgogICAgZ2xvYmFsIGRiCgogICAgcHJpbnQoIldlbGNvbWUgdG8gdGhlIEhXIDYueCByZXBsaWNhIENoZWNrZXIuIE15IGpvYiBpcyB0byBtYWtlIHN1cmUgeW91IHN0YXJ0ZWQgYSByZXBsaWNhIHNldCB3aXRoIHRocmVlIG5vZGVzIikKCiAgICAjIGNvbm5lY3QgdG8gdGhlIGRiIChtb25nb3N0ciB3YXMgc2V0IGluIGFyZ19wYXJzaW5nKQogICAgdHJ5OgogICAgICAgIGNvbm5lY3Rpb24gPSBweW1vbmdvLk1vbmdvQ2xpZW50KG1vbmdvc3RyLCByZXBsaWNhU2V0PXJzX25hbWUpCiAgICAgICAgZGIgPSBjb25uZWN0aW9uW2RiX25hbWVdCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IGNvbm5lY3QgdG8gTW9uZ29EQiByZXBsaWNhIixyc19uYW1lLCIgc2V0IHVzaW5nIiwgbW9uZ29zdHIsICIuIElzIGl0IHJ1bm5pbmc/IikKICAgICAgICBleGl0KDIpICAgICAjIG5vIGdyYWNlZnVsIGV4aXQgaWYgaXQgaXMgbm90IGNvbm5lY3RlZAogICAgICAgIAogICAgaWYgKG5vdCByZXBsX3NldF9ydW5uaW5nKDMpKToKICAgICAgICBwcmludCgiU29ycnksIHRoZSByZXBsaWNhIHNldCBkb2VzIG5vdCBzZWVtIHRvIGJlIHJ1bm5pbmciKQogICAgICAgIGdyYWNlZnVsX2V4aXQoMSkKICAgIAogICAgIyBpZiB5b3UgYXJlIHJlYWRpbmcgdGhpcyBpbiBjbGVhcnRleHQsIHlvdSBhcmUgdmlvbGF0aW5nIHRoZSBob25vciBjb2RlLgogICAgIyBZb3UgY2FuIHN0aWxsIHJlZGVlbSB5b3Vyc2VsZi4gR2V0IGl0IHdvcmtpbmcgYW5kIGRvbid0IHN1Ym1pdCB0aGUgdmFsaWRhdGlvbiBjb2RlIHVudGlsIHlvdSBkby4KICAgICMgQWxsIGEgbWFuIGhhcyBhdCB0aGUgZW5kIG9mIHRoZSBkYXkgaXMgaGlzIHdvcmQuCiAgICBwcmludCgiVGVzdHMgUGFzc2VkIGZvciBIVyA2LjUuIFlvdXIgSFcgNi41IHZhbGlkYXRpb24gY29kZSBpcyBranZqa2wzMjkwbWYwbTIwZjJramp2IikKICAgIGdyYWNlZnVsX2V4aXQoMCkKCmlmIF9fbmFtZV9fID09ICJfX21haW5fXyI6CiAgICBtYWluKHN5cy5hcmd2WzE6XSkKCgoKCgoKCgoK'\neval(compile(base64.b64decode(code), \"<string>\", 'exec'))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#到达终点的最小步数 leetcode原题 754 https://leetcode.com/problems/reach-a-number/solution/
# 分情况讨论:到target与到abs(target)的情况是一样的
# 1. total = 1+2+...+k,求total刚好大于等于n的k,可知到达target至少要用k步,此时超出d=total-k
# 2. 如果d为偶数,则只需将d/2步反向即可,k步即可到达target
# 3. 如果d为奇数,则k步不可能到达,因为任何反转都会改变偶数距离,不可能消去d,则再走一步判断d+k+1是否为偶数
# 4. 如果为偶数,说明k+1步可到
# 5. 如果d+k+1为奇数,且已知d为奇数,说明k+1为偶数,不可能在k+1步走到,再走一步,d+k+1+k+2必为偶数,k+2步可到
class Solution(object):
def reachNumber(self, target):
target = abs(target)
k = 0
while target > 0:
k += 1
target -= k
return k if target % 2 == 0 else k + 1 + k%2
if __name__ == '__main__':
s = input()
s1 = Solution()
print(s1.solution(s))
|
normal
|
{
"blob_id": "4b255b648f67e6bcc30eecc7975bbb1a356b2499",
"index": 2656,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution(object):\n\n def reachNumber(self, target):\n target = abs(target)\n k = 0\n while target > 0:\n k += 1\n target -= k\n return k if target % 2 == 0 else k + 1 + k % 2\n\n\n<mask token>\n",
"step-4": "class Solution(object):\n\n def reachNumber(self, target):\n target = abs(target)\n k = 0\n while target > 0:\n k += 1\n target -= k\n return k if target % 2 == 0 else k + 1 + k % 2\n\n\nif __name__ == '__main__':\n s = input()\n s1 = Solution()\n print(s1.solution(s))\n",
"step-5": "#到达终点的最小步数 leetcode原题 754 https://leetcode.com/problems/reach-a-number/solution/\n# 分情况讨论:到target与到abs(target)的情况是一样的\n# 1. total = 1+2+...+k,求total刚好大于等于n的k,可知到达target至少要用k步,此时超出d=total-k\n# 2. 如果d为偶数,则只需将d/2步反向即可,k步即可到达target\n# 3. 如果d为奇数,则k步不可能到达,因为任何反转都会改变偶数距离,不可能消去d,则再走一步判断d+k+1是否为偶数\n# 4. 如果为偶数,说明k+1步可到\n# 5. 如果d+k+1为奇数,且已知d为奇数,说明k+1为偶数,不可能在k+1步走到,再走一步,d+k+1+k+2必为偶数,k+2步可到\n\nclass Solution(object):\n def reachNumber(self, target):\n target = abs(target)\n k = 0\n while target > 0:\n k += 1\n target -= k\n\n return k if target % 2 == 0 else k + 1 + k%2\n\nif __name__ == '__main__':\n s = input()\n s1 = Solution()\n print(s1.solution(s))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class BFCell:
<|reserved_special_token_0|>
def __init__(self, r, c, id, occupied):
"""BFCell(row, col)"""
self.r = r
self.c = c
self.id = id
self.occupied = occupied
def __repr__(self):
return str(self)
def __str__(self):
return '(%d)' % self.id
class BFCounter:
def __init__(self):
self.count = 0
def i(self):
orig = self.count
self.count += 1
return orig
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def black_neighbours(img, pos):
count = 0
for neighbour in neighbours8((img.shape[0], img.shape[1]), pos):
r, c = neighbour
count += img[r][c] == 0
return count
def hilditch(img):
"""
Source: http://cgm.cs.mcgill.ca/~godfried/teaching/projects97/azar/skeleton.html
:param img:
:return: thinned image
"""
rows, cols = img.shape[0], img.shape[1]
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
temp = np.copy(img)
changed = True
iteration = 0
file_prefix = './images/' + time.strftime('hilditch_%Y-%m-%d_%H-%M-%S_')
cv2.imwrite(file_prefix + str(iteration) + '.png', img)
while changed:
changed = False
for r in range(1, rows - 1):
for c in range(1, cols - 1):
if img[r][c] != 0:
continue
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1
] == 0 and neighbour_transitions_to_white(img, (r - 1, c)
) == 1:
continue
if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][
c - 1] == 0 and neighbour_transitions_to_white(img, (r,
c + 1)) == 1:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
iteration += 1
cv2.imwrite(file_prefix + str(iteration) + '.png', img)
return img
def zhangsuen(img):
"""
Source: http://rosettacode.org/wiki/Zhang-Suen_thinning_algorithm
:param img:
:return: thinned image
"""
rows, cols = img.shape[0], img.shape[1]
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
temp = np.copy(img)
changed = True
iteration = 0
file_prefix = './images/' + time.strftime('zhangsuen_%Y-%m-%d_%H-%M-%S_')
cv2.imwrite(file_prefix + str(iteration) + '.png', img)
while changed:
changed = False
for r in range(1, rows - 1):
for c in range(1, cols - 1):
if img[r][c] != 0:
continue
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][c
] == 0:
continue
if img[r][c + 1] == 0 and img[r + 1][c] == 0 and img[r][c - 1
] == 0:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
for r in range(1, rows - 1):
for c in range(1, cols - 1):
if img[r][c] != 0:
continue
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1
] == 0:
continue
if img[r - 1][c] == 0 and img[r + 1][c] == 0 and img[r][c - 1
] == 0:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
iteration += 1
cv2.imwrite(file_prefix + str(iteration) + '.png', img)
return img
class BFCell:
"""Brushfire Cell"""
def __init__(self, r, c, id, occupied):
"""BFCell(row, col)"""
self.r = r
self.c = c
self.id = id
self.occupied = occupied
def __repr__(self):
return str(self)
def __str__(self):
return '(%d)' % self.id
class BFCounter:
def __init__(self):
self.count = 0
def i(self):
orig = self.count
self.count += 1
return orig
def brushfire(img):
"""
:param img:
:return: Output Image
"""
WALL = 255
SPACE = 255 - WALL
colours = BFCounter()
VORONOI = colours.i()
LEFT = colours.i()
RIGHT = colours.i()
UP = colours.i()
DOWN = colours.i()
CV = BFCell(-1, -1, -1, False)
CL = BFCell(-1, -1, -2, True)
CR = BFCell(-1, -1, -3, True)
CU = BFCell(-1, -1, -4, True)
CD = BFCell(-1, -1, -5, True)
rows, cols = img.shape[0], img.shape[1]
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
regions = UnionFind()
cells = [[BFCell(r, c, r * cols + c) for c in range(cols)] for r in
range(rows)]
cellsf = [cell for row in cells for cell in row]
regions.insert_objects(itertools.chain(cellsf, (CV, CL, CR, CU, CD)))
visited = set()
for r in range(rows):
pass
return img
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def wtf(img):
"""
Source: http://opencvpython.blogspot.com.au/2012/05/skeletonization-using-opencv-python.html
:param img:
:return: thinned image
"""
thinned = np.zeros(img.shape, np.uint8)
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
iteration = 0
file_prefix = './images/' + time.strftime('wtf_%Y-%m-%d_%H-%M-%S_')
joined = np.zeros((img.shape[0], img.shape[1] * 2), np.uint8)
joined[:img.shape[0], 0:img.shape[1]] = img
joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned
cv2.imwrite(file_prefix + str(iteration) + '.png', joined)
while True:
eroded = cv2.erode(img, element)
temp = cv2.dilate(eroded, element)
temp = cv2.subtract(img, temp)
thinned = cv2.bitwise_or(thinned, temp)
img = eroded.copy()
iteration += 1
joined[:img.shape[0], 0:img.shape[1]] = img
joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned
cv2.imwrite(file_prefix + str(iteration) + '.png', joined)
if cv2.countNonZero(img) == 0:
break
return thinned
<|reserved_special_token_0|>
def neighbour_transitions_to_white(img, pos):
last_value = None
count = 0
for neighbour in neighbours8((img.shape[0], img.shape[1]), pos, True):
r, c = neighbour
if last_value is None:
last_value = img[r][c]
continue
count += last_value == 0 and img[r][c] != 0
last_value = img[r][c]
return count
def black_neighbours(img, pos):
count = 0
for neighbour in neighbours8((img.shape[0], img.shape[1]), pos):
r, c = neighbour
count += img[r][c] == 0
return count
def hilditch(img):
"""
Source: http://cgm.cs.mcgill.ca/~godfried/teaching/projects97/azar/skeleton.html
:param img:
:return: thinned image
"""
rows, cols = img.shape[0], img.shape[1]
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
temp = np.copy(img)
changed = True
iteration = 0
file_prefix = './images/' + time.strftime('hilditch_%Y-%m-%d_%H-%M-%S_')
cv2.imwrite(file_prefix + str(iteration) + '.png', img)
while changed:
changed = False
for r in range(1, rows - 1):
for c in range(1, cols - 1):
if img[r][c] != 0:
continue
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1
] == 0 and neighbour_transitions_to_white(img, (r - 1, c)
) == 1:
continue
if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][
c - 1] == 0 and neighbour_transitions_to_white(img, (r,
c + 1)) == 1:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
iteration += 1
cv2.imwrite(file_prefix + str(iteration) + '.png', img)
return img
def zhangsuen(img):
"""
Source: http://rosettacode.org/wiki/Zhang-Suen_thinning_algorithm
:param img:
:return: thinned image
"""
rows, cols = img.shape[0], img.shape[1]
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
temp = np.copy(img)
changed = True
iteration = 0
file_prefix = './images/' + time.strftime('zhangsuen_%Y-%m-%d_%H-%M-%S_')
cv2.imwrite(file_prefix + str(iteration) + '.png', img)
while changed:
changed = False
for r in range(1, rows - 1):
for c in range(1, cols - 1):
if img[r][c] != 0:
continue
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][c
] == 0:
continue
if img[r][c + 1] == 0 and img[r + 1][c] == 0 and img[r][c - 1
] == 0:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
for r in range(1, rows - 1):
for c in range(1, cols - 1):
if img[r][c] != 0:
continue
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1
] == 0:
continue
if img[r - 1][c] == 0 and img[r + 1][c] == 0 and img[r][c - 1
] == 0:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
iteration += 1
cv2.imwrite(file_prefix + str(iteration) + '.png', img)
return img
class BFCell:
"""Brushfire Cell"""
def __init__(self, r, c, id, occupied):
"""BFCell(row, col)"""
self.r = r
self.c = c
self.id = id
self.occupied = occupied
def __repr__(self):
return str(self)
def __str__(self):
return '(%d)' % self.id
class BFCounter:
def __init__(self):
self.count = 0
def i(self):
orig = self.count
self.count += 1
return orig
def brushfire(img):
"""
:param img:
:return: Output Image
"""
WALL = 255
SPACE = 255 - WALL
colours = BFCounter()
VORONOI = colours.i()
LEFT = colours.i()
RIGHT = colours.i()
UP = colours.i()
DOWN = colours.i()
CV = BFCell(-1, -1, -1, False)
CL = BFCell(-1, -1, -2, True)
CR = BFCell(-1, -1, -3, True)
CU = BFCell(-1, -1, -4, True)
CD = BFCell(-1, -1, -5, True)
rows, cols = img.shape[0], img.shape[1]
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
regions = UnionFind()
cells = [[BFCell(r, c, r * cols + c) for c in range(cols)] for r in
range(rows)]
cellsf = [cell for row in cells for cell in row]
regions.insert_objects(itertools.chain(cellsf, (CV, CL, CR, CU, CD)))
visited = set()
for r in range(rows):
pass
return img
<|reserved_special_token_0|>
def mouse_callback(event, x, y, flags, param):
global img_i, down, last_pos, last_time, process
if event == cv2.EVENT_RBUTTONDOWN:
process = True
elif event == cv2.EVENT_LBUTTONDOWN:
down = True
last_pos = x, y
elif event == cv2.EVENT_LBUTTONUP:
down = False
last_pos = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if down:
cv2.line(img_i, last_pos, (x, y), 255, 5)
last_pos = x, y
last_time = time.time()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def wtf(img):
"""
Source: http://opencvpython.blogspot.com.au/2012/05/skeletonization-using-opencv-python.html
:param img:
:return: thinned image
"""
thinned = np.zeros(img.shape, np.uint8)
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
iteration = 0
file_prefix = './images/' + time.strftime('wtf_%Y-%m-%d_%H-%M-%S_')
joined = np.zeros((img.shape[0], img.shape[1] * 2), np.uint8)
joined[:img.shape[0], 0:img.shape[1]] = img
joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned
cv2.imwrite(file_prefix + str(iteration) + '.png', joined)
while True:
eroded = cv2.erode(img, element)
temp = cv2.dilate(eroded, element)
temp = cv2.subtract(img, temp)
thinned = cv2.bitwise_or(thinned, temp)
img = eroded.copy()
iteration += 1
joined[:img.shape[0], 0:img.shape[1]] = img
joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned
cv2.imwrite(file_prefix + str(iteration) + '.png', joined)
if cv2.countNonZero(img) == 0:
break
return thinned
def neighbours8(bounds, pos, repeat_first_last=False):
rows, cols = bounds
r, c = pos
cup = r > 0
crh = c < cols - 1
cdn = r < rows - 1
clf = c > 0
if cup:
yield r - 1, c
if crh:
yield r - 1, c + 1
if crh:
yield r, c + 1
if cdn:
yield r + 1, c + 1
if cdn:
yield r + 1, c
if clf:
yield r + 1, c - 1
if clf:
yield r, c - 1
if cup:
yield r - 1, c - 1
if repeat_first_last and cup:
yield r - 1, c
def neighbour_transitions_to_white(img, pos):
last_value = None
count = 0
for neighbour in neighbours8((img.shape[0], img.shape[1]), pos, True):
r, c = neighbour
if last_value is None:
last_value = img[r][c]
continue
count += last_value == 0 and img[r][c] != 0
last_value = img[r][c]
return count
def black_neighbours(img, pos):
count = 0
for neighbour in neighbours8((img.shape[0], img.shape[1]), pos):
r, c = neighbour
count += img[r][c] == 0
return count
def hilditch(img):
"""
Source: http://cgm.cs.mcgill.ca/~godfried/teaching/projects97/azar/skeleton.html
:param img:
:return: thinned image
"""
rows, cols = img.shape[0], img.shape[1]
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
temp = np.copy(img)
changed = True
iteration = 0
file_prefix = './images/' + time.strftime('hilditch_%Y-%m-%d_%H-%M-%S_')
cv2.imwrite(file_prefix + str(iteration) + '.png', img)
while changed:
changed = False
for r in range(1, rows - 1):
for c in range(1, cols - 1):
if img[r][c] != 0:
continue
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1
] == 0 and neighbour_transitions_to_white(img, (r - 1, c)
) == 1:
continue
if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][
c - 1] == 0 and neighbour_transitions_to_white(img, (r,
c + 1)) == 1:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
iteration += 1
cv2.imwrite(file_prefix + str(iteration) + '.png', img)
return img
def zhangsuen(img):
"""
Source: http://rosettacode.org/wiki/Zhang-Suen_thinning_algorithm
:param img:
:return: thinned image
"""
rows, cols = img.shape[0], img.shape[1]
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
temp = np.copy(img)
changed = True
iteration = 0
file_prefix = './images/' + time.strftime('zhangsuen_%Y-%m-%d_%H-%M-%S_')
cv2.imwrite(file_prefix + str(iteration) + '.png', img)
while changed:
changed = False
for r in range(1, rows - 1):
for c in range(1, cols - 1):
if img[r][c] != 0:
continue
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][c
] == 0:
continue
if img[r][c + 1] == 0 and img[r + 1][c] == 0 and img[r][c - 1
] == 0:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
for r in range(1, rows - 1):
for c in range(1, cols - 1):
if img[r][c] != 0:
continue
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1
] == 0:
continue
if img[r - 1][c] == 0 and img[r + 1][c] == 0 and img[r][c - 1
] == 0:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
iteration += 1
cv2.imwrite(file_prefix + str(iteration) + '.png', img)
return img
class BFCell:
"""Brushfire Cell"""
def __init__(self, r, c, id, occupied):
"""BFCell(row, col)"""
self.r = r
self.c = c
self.id = id
self.occupied = occupied
def __repr__(self):
return str(self)
def __str__(self):
return '(%d)' % self.id
class BFCounter:
def __init__(self):
self.count = 0
def i(self):
orig = self.count
self.count += 1
return orig
def brushfire(img):
"""
:param img:
:return: Output Image
"""
WALL = 255
SPACE = 255 - WALL
colours = BFCounter()
VORONOI = colours.i()
LEFT = colours.i()
RIGHT = colours.i()
UP = colours.i()
DOWN = colours.i()
CV = BFCell(-1, -1, -1, False)
CL = BFCell(-1, -1, -2, True)
CR = BFCell(-1, -1, -3, True)
CU = BFCell(-1, -1, -4, True)
CD = BFCell(-1, -1, -5, True)
rows, cols = img.shape[0], img.shape[1]
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
regions = UnionFind()
cells = [[BFCell(r, c, r * cols + c) for c in range(cols)] for r in
range(rows)]
cellsf = [cell for row in cells for cell in row]
regions.insert_objects(itertools.chain(cellsf, (CV, CL, CR, CU, CD)))
visited = set()
for r in range(rows):
pass
return img
<|reserved_special_token_0|>
def mouse_callback(event, x, y, flags, param):
global img_i, down, last_pos, last_time, process
if event == cv2.EVENT_RBUTTONDOWN:
process = True
elif event == cv2.EVENT_LBUTTONDOWN:
down = True
last_pos = x, y
elif event == cv2.EVENT_LBUTTONUP:
down = False
last_pos = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if down:
cv2.line(img_i, last_pos, (x, y), 255, 5)
last_pos = x, y
last_time = time.time()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import cv2
import numpy as np
import time
import itertools
from unionfind import UnionFind
R = 512
C = 512
# Setup window
cv2.namedWindow('main')
#img_i = np.zeros((R, C), np.uint8)
img_i = cv2.imread("window1.png", cv2.IMREAD_GRAYSCALE)
#img_i = cv2.threshold(img_i, 127, 255, cv2.THRESH_BINARY)[1]
down = False
last_pos = (0,0)
last_time = time.time()
def wtf(img):
"""
Source: http://opencvpython.blogspot.com.au/2012/05/skeletonization-using-opencv-python.html
:param img:
:return: thinned image
"""
thinned = np.zeros(img.shape, np.uint8)
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
iteration = 0
file_prefix = "./images/" + time.strftime("wtf_%Y-%m-%d_%H-%M-%S_")
joined = np.zeros((img.shape[0], img.shape[1]*2), np.uint8)
joined[:img.shape[0], 0:img.shape[1]] = img
joined[:img.shape[0], img.shape[1]:img.shape[1]*2] = thinned
cv2.imwrite(file_prefix + str(iteration) + ".png", joined)
while True:
eroded = cv2.erode(img, element)
temp = cv2.dilate(eroded, element)
temp = cv2.subtract(img, temp)
thinned = cv2.bitwise_or(thinned, temp)
img = eroded.copy()
iteration += 1
joined[:img.shape[0], 0:img.shape[1]] = img
joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned
cv2.imwrite(file_prefix + str(iteration) + ".png", joined)
if cv2.countNonZero(img) == 0:
break
return thinned
def neighbours8(bounds, pos, repeat_first_last=False):
# nhood8 = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]
rows, cols = bounds
r, c = pos
cup = r > 0
crh = c < cols - 1
cdn = r < rows - 1
clf = c > 0
if cup:
yield (r - 1, c)
if crh:
yield (r - 1, c + 1)
if crh:
yield (r, c + 1)
if cdn:
yield (r + 1, c + 1)
if cdn:
yield (r + 1, c)
if clf:
yield (r + 1, c - 1)
if clf:
yield (r, c - 1)
if cup:
yield (r - 1, c - 1)
if repeat_first_last and cup:
yield (r - 1, c)
def neighbour_transitions_to_white(img, pos):
last_value = None
count = 0
for neighbour in neighbours8((img.shape[0], img.shape[1]), pos, True):
r, c = neighbour
if last_value is None:
last_value = img[r][c]
continue
count += last_value == 0 and img[r][c] != 0
last_value = img[r][c]
return count
def black_neighbours(img, pos):
count = 0
for neighbour in neighbours8((img.shape[0], img.shape[1]), pos):
r, c = neighbour
count += img[r][c] == 0
return count
def hilditch(img):
"""
Source: http://cgm.cs.mcgill.ca/~godfried/teaching/projects97/azar/skeleton.html
:param img:
:return: thinned image
"""
rows, cols = (img.shape[0], img.shape[1])
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
temp = np.copy(img)
# Repeat these two steps till no changes
changed = True
iteration = 0
file_prefix = "./images/" + time.strftime("hilditch_%Y-%m-%d_%H-%M-%S_")
cv2.imwrite(file_prefix + str(iteration) + ".png", img)
while changed:
changed = False
# Step 1
# for each pixel that has 8 neighbours
for r in range(1, rows - 1):
for c in range(1, cols - 1):
# and is black
if img[r][c] != 0:
continue
# and 2 <= B(Pixel) <= 6
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
# and A(Pixel) = 1
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
# and P2||P4||P8||A(P2)!=1
if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r][c-1] == 0 and neighbour_transitions_to_white(img, (r - 1, c)) == 1:
continue
# and P2||P4||P6||A(P4)!=1
if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r+1][c-1] == 0 and neighbour_transitions_to_white(img, (r, c+1)) == 1:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
iteration += 1
cv2.imwrite(file_prefix + str(iteration) + ".png", img)
return img
def zhangsuen(img):
"""
Source: http://rosettacode.org/wiki/Zhang-Suen_thinning_algorithm
:param img:
:return: thinned image
"""
rows, cols = (img.shape[0], img.shape[1])
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
temp = np.copy(img)
# Repeat these two steps till no changes
changed = True
iteration = 0
file_prefix = "./images/" + time.strftime("zhangsuen_%Y-%m-%d_%H-%M-%S_")
cv2.imwrite(file_prefix + str(iteration) + ".png", img)
while changed:
changed = False
# Step 1
# for each pixel that has 8 neighbours
for r in range(1, rows - 1):
for c in range(1, cols - 1):
# and is black
if img[r][c] != 0:
continue
# and 2 <= B(Pixel) <= 6
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
# and A(Pixel) = 1
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
# and P2||P4||P6
if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r+1][c] == 0:
continue
# and P4||P6||P8
if img[r][c+1] == 0 and img[r+1][c] == 0 and img[r][c-1] == 0:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
# Step 2
# for each pixel that has 8 neighbours
for r in range(1, rows - 1):
for c in range(1, cols - 1):
# and is black
if img[r][c] != 0:
continue
# and 2 <= B(Pixel) <= 6
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
# and A(Pixel) = 1
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
# and P2||P4||P8 <===
if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r][c-1] == 0:
continue
# and ===>P2||P6||P8
if img[r-1][c] == 0 and img[r+1][c] == 0 and img[r][c-1] == 0:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
iteration += 1
cv2.imwrite(file_prefix + str(iteration) + ".png", img)
return img
class BFCell:
"""Brushfire Cell"""
def __init__(self, r, c, id, occupied):
"""BFCell(row, col)"""
self.r = r
self.c = c
self.id = id
self.occupied = occupied
def __repr__(self):
return str(self)
def __str__(self):
#return "(%d, %d)" % (self.r, self.c)
return "(%d)" % (self.id)
class BFCounter:
def __init__(self):
self.count = 0
def i(self):
orig = self.count
self.count += 1
return orig
def brushfire(img):
"""
:param img:
:return: Output Image
"""
WALL = 255
SPACE = 255 - WALL
colours = BFCounter()
VORONOI = colours.i()
LEFT = colours.i()
RIGHT = colours.i()
UP = colours.i()
DOWN = colours.i()
CV = BFCell(-1, -1, -1, False) # Voronoi
CL = BFCell(-1, -1, -2, True) # Left
CR = BFCell(-1, -1, -3, True) # Right
CU = BFCell(-1, -1, -4, True) # Up
CD = BFCell(-1, -1, -5, True) # Down
rows, cols = (img.shape[0], img.shape[1])
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
regions = UnionFind()
cells = [[BFCell(r, c, r * cols + c) for c in range(cols)] for r in range(rows)]
cellsf = [cell for row in cells for cell in row]
regions.insert_objects(itertools.chain(cellsf, (CV, CL, CR, CU, CD)))
visited = set()
# Add the border cells to a set
for r in range(rows):
pass
return img
process = False
def mouse_callback(event, x, y, flags, param):
global img_i, down, last_pos, last_time, process
if event == cv2.EVENT_RBUTTONDOWN:
#img_i = np.zeros((R, C), np.uint8)
process = True
elif event == cv2.EVENT_LBUTTONDOWN:
down = True
last_pos = (x, y)
elif event == cv2.EVENT_LBUTTONUP:
down = False
last_pos = (x, y)
elif event == cv2.EVENT_MOUSEMOVE:
if down:
cv2.line(img_i, last_pos, (x, y), 255, 5)
last_pos = (x, y)
last_time = time.time()
cv2.setMouseCallback("main", mouse_callback)
edges = []
img_o = np.copy(img_i)
# iterr = None
while True:
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
# if (time.time() - last_time) > 1:
# last_time = time.time()
# del edges[:]
if process:
process = False
#img_o = hilditch(img_i)
img_o = zhangsuen(img_i)
#img_o = brushfire(img_i)
# iterr = zhangsuen(img_i)
# for edge in edges:
# cv2.line(img_o, edge[0], edge[1], 127, 1)
# if iterr is not None:
# try:
# img_o = iterr.next()
# except:
# iterr = None
combined = np.zeros((img_i.shape[0], img_i.shape[1]*2), np.uint8)
combined[:img_i.shape[0], :img_i.shape[1]] = img_i
combined[:img_i.shape[0], img_i.shape[1]:img_i.shape[1]*2] = img_o
cv2.imshow("main", combined)
|
flexible
|
{
"blob_id": "86d3e90493ed04bbe23792716f46a68948911dc3",
"index": 6861,
"step-1": "<mask token>\n\n\nclass BFCell:\n <mask token>\n\n def __init__(self, r, c, id, occupied):\n \"\"\"BFCell(row, col)\"\"\"\n self.r = r\n self.c = c\n self.id = id\n self.occupied = occupied\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return '(%d)' % self.id\n\n\nclass BFCounter:\n\n def __init__(self):\n self.count = 0\n\n def i(self):\n orig = self.count\n self.count += 1\n return orig\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef black_neighbours(img, pos):\n count = 0\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos):\n r, c = neighbour\n count += img[r][c] == 0\n return count\n\n\ndef hilditch(img):\n \"\"\"\n Source: http://cgm.cs.mcgill.ca/~godfried/teaching/projects97/azar/skeleton.html\n :param img:\n :return: thinned image\n \"\"\"\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n temp = np.copy(img)\n changed = True\n iteration = 0\n file_prefix = './images/' + time.strftime('hilditch_%Y-%m-%d_%H-%M-%S_')\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n while changed:\n changed = False\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1\n ] == 0 and neighbour_transitions_to_white(img, (r - 1, c)\n ) == 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][\n c - 1] == 0 and neighbour_transitions_to_white(img, (r,\n c + 1)) == 1:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n iteration += 1\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n return img\n\n\ndef zhangsuen(img):\n \"\"\"\n Source: http://rosettacode.org/wiki/Zhang-Suen_thinning_algorithm\n :param img:\n :return: thinned image\n \"\"\"\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n temp = np.copy(img)\n changed = True\n iteration = 0\n file_prefix = './images/' + time.strftime('zhangsuen_%Y-%m-%d_%H-%M-%S_')\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n while changed:\n changed = False\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][c\n ] == 0:\n continue\n if img[r][c + 1] == 0 and img[r + 1][c] == 0 and img[r][c - 1\n ] == 0:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1\n ] == 0:\n continue\n if img[r - 1][c] == 0 and img[r + 1][c] == 0 and img[r][c - 1\n ] == 0:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n iteration += 1\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n return img\n\n\nclass BFCell:\n \"\"\"Brushfire Cell\"\"\"\n\n def __init__(self, r, c, id, occupied):\n \"\"\"BFCell(row, col)\"\"\"\n self.r = r\n self.c = c\n self.id = id\n self.occupied = occupied\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return '(%d)' % self.id\n\n\nclass BFCounter:\n\n def __init__(self):\n self.count = 0\n\n def i(self):\n orig = self.count\n self.count += 1\n return orig\n\n\ndef brushfire(img):\n \"\"\"\n :param img:\n :return: Output Image\n \"\"\"\n WALL = 255\n SPACE = 255 - WALL\n colours = BFCounter()\n VORONOI = colours.i()\n LEFT = colours.i()\n RIGHT = colours.i()\n UP = colours.i()\n DOWN = colours.i()\n CV = BFCell(-1, -1, -1, False)\n CL = BFCell(-1, -1, -2, True)\n CR = BFCell(-1, -1, -3, True)\n CU = BFCell(-1, -1, -4, True)\n CD = BFCell(-1, -1, -5, True)\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n regions = UnionFind()\n cells = [[BFCell(r, c, r * cols + c) for c in range(cols)] for r in\n range(rows)]\n cellsf = [cell for row in cells for cell in row]\n regions.insert_objects(itertools.chain(cellsf, (CV, CL, CR, CU, CD)))\n visited = set()\n for r in range(rows):\n pass\n return img\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef wtf(img):\n \"\"\"\n Source: http://opencvpython.blogspot.com.au/2012/05/skeletonization-using-opencv-python.html\n :param img:\n :return: thinned image\n \"\"\"\n thinned = np.zeros(img.shape, np.uint8)\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\n element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))\n iteration = 0\n file_prefix = './images/' + time.strftime('wtf_%Y-%m-%d_%H-%M-%S_')\n joined = np.zeros((img.shape[0], img.shape[1] * 2), np.uint8)\n joined[:img.shape[0], 0:img.shape[1]] = img\n joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned\n cv2.imwrite(file_prefix + str(iteration) + '.png', joined)\n while True:\n eroded = cv2.erode(img, element)\n temp = cv2.dilate(eroded, element)\n temp = cv2.subtract(img, temp)\n thinned = cv2.bitwise_or(thinned, temp)\n img = eroded.copy()\n iteration += 1\n joined[:img.shape[0], 0:img.shape[1]] = img\n joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned\n cv2.imwrite(file_prefix + str(iteration) + '.png', joined)\n if cv2.countNonZero(img) == 0:\n break\n return thinned\n\n\n<mask token>\n\n\ndef neighbour_transitions_to_white(img, pos):\n last_value = None\n count = 0\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos, True):\n r, c = neighbour\n if last_value is None:\n last_value = img[r][c]\n continue\n count += last_value == 0 and img[r][c] != 0\n last_value = img[r][c]\n return count\n\n\ndef black_neighbours(img, pos):\n count = 0\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos):\n r, c = neighbour\n count += img[r][c] == 0\n return count\n\n\ndef hilditch(img):\n \"\"\"\n Source: http://cgm.cs.mcgill.ca/~godfried/teaching/projects97/azar/skeleton.html\n :param img:\n :return: thinned image\n \"\"\"\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n temp = np.copy(img)\n changed = True\n iteration = 0\n file_prefix = './images/' + time.strftime('hilditch_%Y-%m-%d_%H-%M-%S_')\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n while changed:\n changed = False\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1\n ] == 0 and neighbour_transitions_to_white(img, (r - 1, c)\n ) == 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][\n c - 1] == 0 and neighbour_transitions_to_white(img, (r,\n c + 1)) == 1:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n iteration += 1\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n return img\n\n\ndef zhangsuen(img):\n \"\"\"\n Source: http://rosettacode.org/wiki/Zhang-Suen_thinning_algorithm\n :param img:\n :return: thinned image\n \"\"\"\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n temp = np.copy(img)\n changed = True\n iteration = 0\n file_prefix = './images/' + time.strftime('zhangsuen_%Y-%m-%d_%H-%M-%S_')\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n while changed:\n changed = False\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][c\n ] == 0:\n continue\n if img[r][c + 1] == 0 and img[r + 1][c] == 0 and img[r][c - 1\n ] == 0:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1\n ] == 0:\n continue\n if img[r - 1][c] == 0 and img[r + 1][c] == 0 and img[r][c - 1\n ] == 0:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n iteration += 1\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n return img\n\n\nclass BFCell:\n \"\"\"Brushfire Cell\"\"\"\n\n def __init__(self, r, c, id, occupied):\n \"\"\"BFCell(row, col)\"\"\"\n self.r = r\n self.c = c\n self.id = id\n self.occupied = occupied\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return '(%d)' % self.id\n\n\nclass BFCounter:\n\n def __init__(self):\n self.count = 0\n\n def i(self):\n orig = self.count\n self.count += 1\n return orig\n\n\ndef brushfire(img):\n \"\"\"\n :param img:\n :return: Output Image\n \"\"\"\n WALL = 255\n SPACE = 255 - WALL\n colours = BFCounter()\n VORONOI = colours.i()\n LEFT = colours.i()\n RIGHT = colours.i()\n UP = colours.i()\n DOWN = colours.i()\n CV = BFCell(-1, -1, -1, False)\n CL = BFCell(-1, -1, -2, True)\n CR = BFCell(-1, -1, -3, True)\n CU = BFCell(-1, -1, -4, True)\n CD = BFCell(-1, -1, -5, True)\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n regions = UnionFind()\n cells = [[BFCell(r, c, r * cols + c) for c in range(cols)] for r in\n range(rows)]\n cellsf = [cell for row in cells for cell in row]\n regions.insert_objects(itertools.chain(cellsf, (CV, CL, CR, CU, CD)))\n visited = set()\n for r in range(rows):\n pass\n return img\n\n\n<mask token>\n\n\ndef mouse_callback(event, x, y, flags, param):\n global img_i, down, last_pos, last_time, process\n if event == cv2.EVENT_RBUTTONDOWN:\n process = True\n elif event == cv2.EVENT_LBUTTONDOWN:\n down = True\n last_pos = x, y\n elif event == cv2.EVENT_LBUTTONUP:\n down = False\n last_pos = x, y\n elif event == cv2.EVENT_MOUSEMOVE:\n if down:\n cv2.line(img_i, last_pos, (x, y), 255, 5)\n last_pos = x, y\n last_time = time.time()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef wtf(img):\n \"\"\"\n Source: http://opencvpython.blogspot.com.au/2012/05/skeletonization-using-opencv-python.html\n :param img:\n :return: thinned image\n \"\"\"\n thinned = np.zeros(img.shape, np.uint8)\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\n element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))\n iteration = 0\n file_prefix = './images/' + time.strftime('wtf_%Y-%m-%d_%H-%M-%S_')\n joined = np.zeros((img.shape[0], img.shape[1] * 2), np.uint8)\n joined[:img.shape[0], 0:img.shape[1]] = img\n joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned\n cv2.imwrite(file_prefix + str(iteration) + '.png', joined)\n while True:\n eroded = cv2.erode(img, element)\n temp = cv2.dilate(eroded, element)\n temp = cv2.subtract(img, temp)\n thinned = cv2.bitwise_or(thinned, temp)\n img = eroded.copy()\n iteration += 1\n joined[:img.shape[0], 0:img.shape[1]] = img\n joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned\n cv2.imwrite(file_prefix + str(iteration) + '.png', joined)\n if cv2.countNonZero(img) == 0:\n break\n return thinned\n\n\ndef neighbours8(bounds, pos, repeat_first_last=False):\n rows, cols = bounds\n r, c = pos\n cup = r > 0\n crh = c < cols - 1\n cdn = r < rows - 1\n clf = c > 0\n if cup:\n yield r - 1, c\n if crh:\n yield r - 1, c + 1\n if crh:\n yield r, c + 1\n if cdn:\n yield r + 1, c + 1\n if cdn:\n yield r + 1, c\n if clf:\n yield r + 1, c - 1\n if clf:\n yield r, c - 1\n if cup:\n yield r - 1, c - 1\n if repeat_first_last and cup:\n yield r - 1, c\n\n\ndef neighbour_transitions_to_white(img, pos):\n last_value = None\n count = 0\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos, True):\n r, c = neighbour\n if last_value is None:\n last_value = img[r][c]\n continue\n count += last_value == 0 and img[r][c] != 0\n last_value = img[r][c]\n return count\n\n\ndef black_neighbours(img, pos):\n count = 0\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos):\n r, c = neighbour\n count += img[r][c] == 0\n return count\n\n\ndef hilditch(img):\n \"\"\"\n Source: http://cgm.cs.mcgill.ca/~godfried/teaching/projects97/azar/skeleton.html\n :param img:\n :return: thinned image\n \"\"\"\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n temp = np.copy(img)\n changed = True\n iteration = 0\n file_prefix = './images/' + time.strftime('hilditch_%Y-%m-%d_%H-%M-%S_')\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n while changed:\n changed = False\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1\n ] == 0 and neighbour_transitions_to_white(img, (r - 1, c)\n ) == 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][\n c - 1] == 0 and neighbour_transitions_to_white(img, (r,\n c + 1)) == 1:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n iteration += 1\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n return img\n\n\ndef zhangsuen(img):\n \"\"\"\n Source: http://rosettacode.org/wiki/Zhang-Suen_thinning_algorithm\n :param img:\n :return: thinned image\n \"\"\"\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n temp = np.copy(img)\n changed = True\n iteration = 0\n file_prefix = './images/' + time.strftime('zhangsuen_%Y-%m-%d_%H-%M-%S_')\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n while changed:\n changed = False\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][c\n ] == 0:\n continue\n if img[r][c + 1] == 0 and img[r + 1][c] == 0 and img[r][c - 1\n ] == 0:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1\n ] == 0:\n continue\n if img[r - 1][c] == 0 and img[r + 1][c] == 0 and img[r][c - 1\n ] == 0:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n iteration += 1\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n return img\n\n\nclass BFCell:\n \"\"\"Brushfire Cell\"\"\"\n\n def __init__(self, r, c, id, occupied):\n \"\"\"BFCell(row, col)\"\"\"\n self.r = r\n self.c = c\n self.id = id\n self.occupied = occupied\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return '(%d)' % self.id\n\n\nclass BFCounter:\n\n def __init__(self):\n self.count = 0\n\n def i(self):\n orig = self.count\n self.count += 1\n return orig\n\n\ndef brushfire(img):\n \"\"\"\n :param img:\n :return: Output Image\n \"\"\"\n WALL = 255\n SPACE = 255 - WALL\n colours = BFCounter()\n VORONOI = colours.i()\n LEFT = colours.i()\n RIGHT = colours.i()\n UP = colours.i()\n DOWN = colours.i()\n CV = BFCell(-1, -1, -1, False)\n CL = BFCell(-1, -1, -2, True)\n CR = BFCell(-1, -1, -3, True)\n CU = BFCell(-1, -1, -4, True)\n CD = BFCell(-1, -1, -5, True)\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n regions = UnionFind()\n cells = [[BFCell(r, c, r * cols + c) for c in range(cols)] for r in\n range(rows)]\n cellsf = [cell for row in cells for cell in row]\n regions.insert_objects(itertools.chain(cellsf, (CV, CL, CR, CU, CD)))\n visited = set()\n for r in range(rows):\n pass\n return img\n\n\n<mask token>\n\n\ndef mouse_callback(event, x, y, flags, param):\n global img_i, down, last_pos, last_time, process\n if event == cv2.EVENT_RBUTTONDOWN:\n process = True\n elif event == cv2.EVENT_LBUTTONDOWN:\n down = True\n last_pos = x, y\n elif event == cv2.EVENT_LBUTTONUP:\n down = False\n last_pos = x, y\n elif event == cv2.EVENT_MOUSEMOVE:\n if down:\n cv2.line(img_i, last_pos, (x, y), 255, 5)\n last_pos = x, y\n last_time = time.time()\n\n\n<mask token>\n",
"step-5": "import cv2\r\nimport numpy as np\r\nimport time\r\nimport itertools\r\nfrom unionfind import UnionFind\r\n\r\nR = 512\r\nC = 512\r\n\r\n# Setup window\r\ncv2.namedWindow('main')\r\n#img_i = np.zeros((R, C), np.uint8)\r\nimg_i = cv2.imread(\"window1.png\", cv2.IMREAD_GRAYSCALE)\r\n#img_i = cv2.threshold(img_i, 127, 255, cv2.THRESH_BINARY)[1]\r\n\r\ndown = False\r\nlast_pos = (0,0)\r\nlast_time = time.time()\r\n\r\ndef wtf(img):\r\n \"\"\"\r\n Source: http://opencvpython.blogspot.com.au/2012/05/skeletonization-using-opencv-python.html\r\n :param img:\r\n :return: thinned image\r\n \"\"\"\r\n thinned = np.zeros(img.shape, np.uint8)\r\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\r\n element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))\r\n\r\n iteration = 0\r\n file_prefix = \"./images/\" + time.strftime(\"wtf_%Y-%m-%d_%H-%M-%S_\")\r\n joined = np.zeros((img.shape[0], img.shape[1]*2), np.uint8)\r\n joined[:img.shape[0], 0:img.shape[1]] = img\r\n joined[:img.shape[0], img.shape[1]:img.shape[1]*2] = thinned\r\n cv2.imwrite(file_prefix + str(iteration) + \".png\", joined)\r\n while True:\r\n eroded = cv2.erode(img, element)\r\n temp = cv2.dilate(eroded, element)\r\n temp = cv2.subtract(img, temp)\r\n thinned = cv2.bitwise_or(thinned, temp)\r\n img = eroded.copy()\r\n iteration += 1\r\n joined[:img.shape[0], 0:img.shape[1]] = img\r\n joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned\r\n cv2.imwrite(file_prefix + str(iteration) + \".png\", joined)\r\n if cv2.countNonZero(img) == 0:\r\n break\r\n\r\n return thinned\r\n\r\ndef neighbours8(bounds, pos, repeat_first_last=False):\r\n # nhood8 = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]\r\n rows, cols = bounds\r\n r, c = pos\r\n cup = r > 0\r\n crh = c < cols - 1\r\n cdn = r < rows - 1\r\n clf = c > 0\r\n\r\n if cup:\r\n yield (r - 1, c)\r\n if crh:\r\n yield (r - 1, c + 1)\r\n if crh:\r\n yield (r, c + 1)\r\n if cdn:\r\n yield (r + 1, c + 1)\r\n if cdn:\r\n yield (r + 1, c)\r\n if clf:\r\n yield (r + 1, c - 1)\r\n if clf:\r\n yield (r, c - 1)\r\n if cup:\r\n yield (r - 1, c - 1)\r\n if repeat_first_last and cup:\r\n yield (r - 1, c)\r\n\r\ndef neighbour_transitions_to_white(img, pos):\r\n last_value = None\r\n count = 0\r\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos, True):\r\n r, c = neighbour\r\n if last_value is None:\r\n last_value = img[r][c]\r\n continue\r\n count += last_value == 0 and img[r][c] != 0\r\n last_value = img[r][c]\r\n return count\r\n\r\ndef black_neighbours(img, pos):\r\n count = 0\r\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos):\r\n r, c = neighbour\r\n count += img[r][c] == 0\r\n return count\r\n\r\ndef hilditch(img):\r\n \"\"\"\r\n Source: http://cgm.cs.mcgill.ca/~godfried/teaching/projects97/azar/skeleton.html\r\n :param img:\r\n :return: thinned image\r\n \"\"\"\r\n rows, cols = (img.shape[0], img.shape[1])\r\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\r\n temp = np.copy(img)\r\n\r\n # Repeat these two steps till no changes\r\n changed = True\r\n iteration = 0\r\n file_prefix = \"./images/\" + time.strftime(\"hilditch_%Y-%m-%d_%H-%M-%S_\")\r\n cv2.imwrite(file_prefix + str(iteration) + \".png\", img)\r\n while changed:\r\n changed = False\r\n # Step 1\r\n # for each pixel that has 8 neighbours\r\n for r in range(1, rows - 1):\r\n for c in range(1, cols - 1):\r\n # and is black\r\n if img[r][c] != 0:\r\n continue\r\n\r\n # and 2 <= B(Pixel) <= 6\r\n B = black_neighbours(img, (r, c))\r\n if B < 2 or B > 6:\r\n continue\r\n\r\n # and A(Pixel) = 1\r\n A = neighbour_transitions_to_white(img, (r, c))\r\n if A != 1:\r\n continue\r\n\r\n # and P2||P4||P8||A(P2)!=1\r\n if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r][c-1] == 0 and neighbour_transitions_to_white(img, (r - 1, c)) == 1:\r\n continue\r\n\r\n # and P2||P4||P6||A(P4)!=1\r\n if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r+1][c-1] == 0 and neighbour_transitions_to_white(img, (r, c+1)) == 1:\r\n continue\r\n\r\n changed = True\r\n temp[r][c] = 255\r\n img = np.copy(temp)\r\n iteration += 1\r\n cv2.imwrite(file_prefix + str(iteration) + \".png\", img)\r\n\r\n return img\r\n\r\ndef zhangsuen(img):\r\n \"\"\"\r\n Source: http://rosettacode.org/wiki/Zhang-Suen_thinning_algorithm\r\n :param img:\r\n :return: thinned image\r\n \"\"\"\r\n rows, cols = (img.shape[0], img.shape[1])\r\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\r\n temp = np.copy(img)\r\n\r\n # Repeat these two steps till no changes\r\n changed = True\r\n iteration = 0\r\n file_prefix = \"./images/\" + time.strftime(\"zhangsuen_%Y-%m-%d_%H-%M-%S_\")\r\n cv2.imwrite(file_prefix + str(iteration) + \".png\", img)\r\n while changed:\r\n changed = False\r\n # Step 1\r\n # for each pixel that has 8 neighbours\r\n for r in range(1, rows - 1):\r\n for c in range(1, cols - 1):\r\n # and is black\r\n if img[r][c] != 0:\r\n continue\r\n\r\n # and 2 <= B(Pixel) <= 6\r\n B = black_neighbours(img, (r, c))\r\n if B < 2 or B > 6:\r\n continue\r\n\r\n # and A(Pixel) = 1\r\n A = neighbour_transitions_to_white(img, (r, c))\r\n if A != 1:\r\n continue\r\n\r\n # and P2||P4||P6\r\n if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r+1][c] == 0:\r\n continue\r\n\r\n # and P4||P6||P8\r\n if img[r][c+1] == 0 and img[r+1][c] == 0 and img[r][c-1] == 0:\r\n continue\r\n\r\n changed = True\r\n temp[r][c] = 255\r\n img = np.copy(temp)\r\n # Step 2\r\n # for each pixel that has 8 neighbours\r\n for r in range(1, rows - 1):\r\n for c in range(1, cols - 1):\r\n # and is black\r\n if img[r][c] != 0:\r\n continue\r\n\r\n # and 2 <= B(Pixel) <= 6\r\n B = black_neighbours(img, (r, c))\r\n if B < 2 or B > 6:\r\n continue\r\n\r\n # and A(Pixel) = 1\r\n A = neighbour_transitions_to_white(img, (r, c))\r\n if A != 1:\r\n continue\r\n\r\n # and P2||P4||P8 <===\r\n if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r][c-1] == 0:\r\n continue\r\n\r\n # and ===>P2||P6||P8\r\n if img[r-1][c] == 0 and img[r+1][c] == 0 and img[r][c-1] == 0:\r\n continue\r\n\r\n changed = True\r\n temp[r][c] = 255\r\n img = np.copy(temp)\r\n iteration += 1\r\n cv2.imwrite(file_prefix + str(iteration) + \".png\", img)\r\n\r\n return img\r\n\r\nclass BFCell:\r\n \"\"\"Brushfire Cell\"\"\"\r\n def __init__(self, r, c, id, occupied):\r\n \"\"\"BFCell(row, col)\"\"\"\r\n self.r = r\r\n self.c = c\r\n self.id = id\r\n self.occupied = occupied\r\n\r\n def __repr__(self):\r\n return str(self)\r\n\r\n def __str__(self):\r\n #return \"(%d, %d)\" % (self.r, self.c)\r\n return \"(%d)\" % (self.id)\r\n\r\nclass BFCounter:\r\n def __init__(self):\r\n self.count = 0\r\n\r\n def i(self):\r\n orig = self.count\r\n self.count += 1\r\n return orig\r\n\r\ndef brushfire(img):\r\n \"\"\"\r\n :param img:\r\n :return: Output Image\r\n \"\"\"\r\n WALL = 255\r\n SPACE = 255 - WALL\r\n\r\n colours = BFCounter()\r\n\r\n VORONOI = colours.i()\r\n LEFT = colours.i()\r\n RIGHT = colours.i()\r\n UP = colours.i()\r\n DOWN = colours.i()\r\n\r\n CV = BFCell(-1, -1, -1, False) # Voronoi\r\n CL = BFCell(-1, -1, -2, True) # Left\r\n CR = BFCell(-1, -1, -3, True) # Right\r\n CU = BFCell(-1, -1, -4, True) # Up\r\n CD = BFCell(-1, -1, -5, True) # Down\r\n\r\n rows, cols = (img.shape[0], img.shape[1])\r\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\r\n regions = UnionFind()\r\n cells = [[BFCell(r, c, r * cols + c) for c in range(cols)] for r in range(rows)]\r\n cellsf = [cell for row in cells for cell in row]\r\n regions.insert_objects(itertools.chain(cellsf, (CV, CL, CR, CU, CD)))\r\n\r\n visited = set()\r\n\r\n # Add the border cells to a set\r\n for r in range(rows):\r\n pass\r\n\r\n return img\r\n\r\nprocess = False\r\n\r\ndef mouse_callback(event, x, y, flags, param):\r\n global img_i, down, last_pos, last_time, process\r\n if event == cv2.EVENT_RBUTTONDOWN:\r\n #img_i = np.zeros((R, C), np.uint8)\r\n process = True\r\n elif event == cv2.EVENT_LBUTTONDOWN:\r\n down = True\r\n last_pos = (x, y)\r\n elif event == cv2.EVENT_LBUTTONUP:\r\n down = False\r\n last_pos = (x, y)\r\n elif event == cv2.EVENT_MOUSEMOVE:\r\n if down:\r\n cv2.line(img_i, last_pos, (x, y), 255, 5)\r\n last_pos = (x, y)\r\n last_time = time.time()\r\n\r\ncv2.setMouseCallback(\"main\", mouse_callback)\r\n\r\nedges = []\r\n\r\nimg_o = np.copy(img_i)\r\n\r\n# iterr = None\r\n\r\nwhile True:\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord('q'):\r\n break\r\n # if (time.time() - last_time) > 1:\r\n # last_time = time.time()\r\n # del edges[:]\r\n if process:\r\n process = False\r\n #img_o = hilditch(img_i)\r\n img_o = zhangsuen(img_i)\r\n #img_o = brushfire(img_i)\r\n # iterr = zhangsuen(img_i)\r\n # for edge in edges:\r\n # cv2.line(img_o, edge[0], edge[1], 127, 1)\r\n # if iterr is not None:\r\n # try:\r\n # img_o = iterr.next()\r\n # except:\r\n # iterr = None\r\n\r\n combined = np.zeros((img_i.shape[0], img_i.shape[1]*2), np.uint8)\r\n combined[:img_i.shape[0], :img_i.shape[1]] = img_i\r\n combined[:img_i.shape[0], img_i.shape[1]:img_i.shape[1]*2] = img_o\r\n cv2.imshow(\"main\", combined)\r\n",
"step-ids": [
7,
12,
15,
16,
20
]
}
|
[
7,
12,
15,
16,
20
] |
# python examples/mnist_rnn.py --bsz 128 --bsz-eval 256
import sys
from argparse import ArgumentParser
import pytorch_lightning as pl
import torch.nn as nn
import torch.optim as optim
from loguru import logger
from slp.config.config_parser import make_cli_parser, parse_config
from slp.data.collators import SequenceClassificationCollator
from slp.modules.rnn import RNN
from slp.plbind import (
FromLogits,
PLDataModuleFromDatasets,
RnnPLModule,
make_trainer,
watch_model,
)
from slp.util.log import configure_logging
from torchvision.datasets import MNIST # type: ignore
from torchvision.transforms import Compose, Normalize, ToTensor # type: ignore
collate_fn = SequenceClassificationCollator()
class Net(nn.Module):
def __init__(self, input_size, hidden_size=40, num_classes=10, bidirectional=False):
super().__init__()
self.encoder = RNN(input_size, hidden_size, bidirectional=bidirectional)
out_size = hidden_size if not bidirectional else 2 * hidden_size
self.clf = nn.Linear(out_size, num_classes)
def forward(self, x, lengths):
_, x, _ = self.encoder(x, lengths)
out = self.clf(x)
return out
def get_parser():
parser = ArgumentParser("MNIST classification example")
parser.add_argument(
"--hidden",
dest="model.hidden_size",
type=int,
help="Intermediate hidden layers for linear module",
)
parser.add_argument(
"--bi",
dest="model.bidirectional",
action="store_true",
help="Use BiLSTM",
)
return parser
def get_data():
# Fix: https://stackoverflow.com/a/66820249
MNIST.resources = [
(
"https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz",
"f68b3c2dcbeaaa9fbdd348bbdeb94873",
),
(
"https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz",
"d53e105ee54ea40749a09fcbcd1e9432",
),
(
"https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz",
"9fb629c4189551a2d022fa330f9573f3",
),
(
"https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz",
"ec29112dd5afa0611ce80d1b7f02629c",
),
]
def squeeze(x):
return x.squeeze()
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,)), squeeze])
train = MNIST(download=True, root=".", transform=data_transform, train=True)
val = MNIST(download=False, root=".", transform=data_transform, train=False)
return train, val
if __name__ == "__main__":
# SETUP ##################################################
parser = get_parser()
parser = make_cli_parser(parser, PLDataModuleFromDatasets)
config = parse_config(parser, parser.parse_args().config)
if config.trainer.experiment_name == "experiment":
config.trainer.experiment_name = "mnist-rnn-classification"
configure_logging(f"logs/{config.trainer.experiment_name}")
if config.seed is not None:
logger.info("Seeding everything with seed={seed}")
pl.utilities.seed.seed_everything(seed=config.seed)
train, test = get_data()
# Get data and make datamodule ##########################
ldm = PLDataModuleFromDatasets(
train, test=test, seed=config.seed, collate_fn=collate_fn, **config.data
)
# Create model, optimizer, criterion, scheduler ###########
model = Net(28, **config.model)
optimizer = getattr(optim, config.optimizer)(model.parameters(), **config.optim)
criterion = nn.CrossEntropyLoss()
lr_scheduler = None
if config.lr_scheduler:
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, **config.lr_schedule
)
# Wrap in PLModule, & configure metrics ####################
lm = RnnPLModule(
model,
optimizer,
criterion,
lr_scheduler=lr_scheduler,
metrics={"acc": FromLogits(pl.metrics.classification.Accuracy())},
hparams=config,
)
# Run debugging session or fit & test the model ############
if config.debug:
logger.info("Running in debug mode: Fast run on 5 batches")
trainer = make_trainer(fast_dev_run=5)
trainer.fit(lm, datamodule=ldm)
logger.info("Running in debug mode: Overfitting 5 batches")
trainer = make_trainer(overfit_batches=5)
trainer.fit(lm, datamodule=ldm)
else:
trainer = make_trainer(**config.trainer)
watch_model(trainer, model)
trainer.fit(lm, datamodule=ldm)
trainer.test(ckpt_path="best", test_dataloaders=ldm.test_dataloader())
logger.info("Run finished. Uploading files to wandb...")
|
normal
|
{
"blob_id": "d8a09f9952856da69120fae6221636dd5bd8c93e",
"index": 3567,
"step-1": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size=40, num_classes=10,\n bidirectional=False):\n super().__init__()\n self.encoder = RNN(input_size, hidden_size, bidirectional=bidirectional\n )\n out_size = hidden_size if not bidirectional else 2 * hidden_size\n self.clf = nn.Linear(out_size, num_classes)\n\n def forward(self, x, lengths):\n _, x, _ = self.encoder(x, lengths)\n out = self.clf(x)\n return out\n\n\ndef get_parser():\n parser = ArgumentParser('MNIST classification example')\n parser.add_argument('--hidden', dest='model.hidden_size', type=int,\n help='Intermediate hidden layers for linear module')\n parser.add_argument('--bi', dest='model.bidirectional', action=\n 'store_true', help='Use BiLSTM')\n return parser\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size=40, num_classes=10,\n bidirectional=False):\n super().__init__()\n self.encoder = RNN(input_size, hidden_size, bidirectional=bidirectional\n )\n out_size = hidden_size if not bidirectional else 2 * hidden_size\n self.clf = nn.Linear(out_size, num_classes)\n\n def forward(self, x, lengths):\n _, x, _ = self.encoder(x, lengths)\n out = self.clf(x)\n return out\n\n\ndef get_parser():\n parser = ArgumentParser('MNIST classification example')\n parser.add_argument('--hidden', dest='model.hidden_size', type=int,\n help='Intermediate hidden layers for linear module')\n parser.add_argument('--bi', dest='model.bidirectional', action=\n 'store_true', help='Use BiLSTM')\n return parser\n\n\ndef get_data():\n MNIST.resources = [(\n 'https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz'\n , 'f68b3c2dcbeaaa9fbdd348bbdeb94873'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz'\n , 'd53e105ee54ea40749a09fcbcd1e9432'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz'\n , '9fb629c4189551a2d022fa330f9573f3'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz'\n , 'ec29112dd5afa0611ce80d1b7f02629c')]\n\n def squeeze(x):\n return x.squeeze()\n data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,)),\n squeeze])\n train = MNIST(download=True, root='.', transform=data_transform, train=True\n )\n val = MNIST(download=False, root='.', transform=data_transform, train=False\n )\n return train, val\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size=40, num_classes=10,\n bidirectional=False):\n super().__init__()\n self.encoder = RNN(input_size, hidden_size, bidirectional=bidirectional\n )\n out_size = hidden_size if not bidirectional else 2 * hidden_size\n self.clf = nn.Linear(out_size, num_classes)\n\n def forward(self, x, lengths):\n _, x, _ = self.encoder(x, lengths)\n out = self.clf(x)\n return out\n\n\ndef get_parser():\n parser = ArgumentParser('MNIST classification example')\n parser.add_argument('--hidden', dest='model.hidden_size', type=int,\n help='Intermediate hidden layers for linear module')\n parser.add_argument('--bi', dest='model.bidirectional', action=\n 'store_true', help='Use BiLSTM')\n return parser\n\n\ndef get_data():\n MNIST.resources = [(\n 'https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz'\n , 'f68b3c2dcbeaaa9fbdd348bbdeb94873'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz'\n , 'd53e105ee54ea40749a09fcbcd1e9432'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz'\n , '9fb629c4189551a2d022fa330f9573f3'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz'\n , 'ec29112dd5afa0611ce80d1b7f02629c')]\n\n def squeeze(x):\n return x.squeeze()\n data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,)),\n squeeze])\n train = MNIST(download=True, root='.', transform=data_transform, train=True\n )\n val = MNIST(download=False, root='.', transform=data_transform, train=False\n )\n return train, val\n\n\nif __name__ == '__main__':\n parser = get_parser()\n parser = make_cli_parser(parser, PLDataModuleFromDatasets)\n config = parse_config(parser, parser.parse_args().config)\n if config.trainer.experiment_name == 'experiment':\n config.trainer.experiment_name = 'mnist-rnn-classification'\n configure_logging(f'logs/{config.trainer.experiment_name}')\n if config.seed is not None:\n logger.info('Seeding everything with seed={seed}')\n pl.utilities.seed.seed_everything(seed=config.seed)\n train, test = get_data()\n ldm = PLDataModuleFromDatasets(train, test=test, seed=config.seed,\n collate_fn=collate_fn, **config.data)\n model = Net(28, **config.model)\n optimizer = getattr(optim, config.optimizer)(model.parameters(), **\n config.optim)\n criterion = nn.CrossEntropyLoss()\n lr_scheduler = None\n if config.lr_scheduler:\n lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, **\n config.lr_schedule)\n lm = RnnPLModule(model, optimizer, criterion, lr_scheduler=lr_scheduler,\n metrics={'acc': FromLogits(pl.metrics.classification.Accuracy())},\n hparams=config)\n if config.debug:\n logger.info('Running in debug mode: Fast run on 5 batches')\n trainer = make_trainer(fast_dev_run=5)\n trainer.fit(lm, datamodule=ldm)\n logger.info('Running in debug mode: Overfitting 5 batches')\n trainer = make_trainer(overfit_batches=5)\n trainer.fit(lm, datamodule=ldm)\n else:\n trainer = make_trainer(**config.trainer)\n watch_model(trainer, model)\n trainer.fit(lm, datamodule=ldm)\n trainer.test(ckpt_path='best', test_dataloaders=ldm.test_dataloader())\n logger.info('Run finished. Uploading files to wandb...')\n",
"step-4": "<mask token>\ncollate_fn = SequenceClassificationCollator()\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size=40, num_classes=10,\n bidirectional=False):\n super().__init__()\n self.encoder = RNN(input_size, hidden_size, bidirectional=bidirectional\n )\n out_size = hidden_size if not bidirectional else 2 * hidden_size\n self.clf = nn.Linear(out_size, num_classes)\n\n def forward(self, x, lengths):\n _, x, _ = self.encoder(x, lengths)\n out = self.clf(x)\n return out\n\n\ndef get_parser():\n parser = ArgumentParser('MNIST classification example')\n parser.add_argument('--hidden', dest='model.hidden_size', type=int,\n help='Intermediate hidden layers for linear module')\n parser.add_argument('--bi', dest='model.bidirectional', action=\n 'store_true', help='Use BiLSTM')\n return parser\n\n\ndef get_data():\n MNIST.resources = [(\n 'https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz'\n , 'f68b3c2dcbeaaa9fbdd348bbdeb94873'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz'\n , 'd53e105ee54ea40749a09fcbcd1e9432'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz'\n , '9fb629c4189551a2d022fa330f9573f3'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz'\n , 'ec29112dd5afa0611ce80d1b7f02629c')]\n\n def squeeze(x):\n return x.squeeze()\n data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,)),\n squeeze])\n train = MNIST(download=True, root='.', transform=data_transform, train=True\n )\n val = MNIST(download=False, root='.', transform=data_transform, train=False\n )\n return train, val\n\n\nif __name__ == '__main__':\n parser = get_parser()\n parser = make_cli_parser(parser, PLDataModuleFromDatasets)\n config = parse_config(parser, parser.parse_args().config)\n if config.trainer.experiment_name == 'experiment':\n config.trainer.experiment_name = 'mnist-rnn-classification'\n configure_logging(f'logs/{config.trainer.experiment_name}')\n if config.seed is not None:\n logger.info('Seeding everything with seed={seed}')\n pl.utilities.seed.seed_everything(seed=config.seed)\n train, test = get_data()\n ldm = PLDataModuleFromDatasets(train, test=test, seed=config.seed,\n collate_fn=collate_fn, **config.data)\n model = Net(28, **config.model)\n optimizer = getattr(optim, config.optimizer)(model.parameters(), **\n config.optim)\n criterion = nn.CrossEntropyLoss()\n lr_scheduler = None\n if config.lr_scheduler:\n lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, **\n config.lr_schedule)\n lm = RnnPLModule(model, optimizer, criterion, lr_scheduler=lr_scheduler,\n metrics={'acc': FromLogits(pl.metrics.classification.Accuracy())},\n hparams=config)\n if config.debug:\n logger.info('Running in debug mode: Fast run on 5 batches')\n trainer = make_trainer(fast_dev_run=5)\n trainer.fit(lm, datamodule=ldm)\n logger.info('Running in debug mode: Overfitting 5 batches')\n trainer = make_trainer(overfit_batches=5)\n trainer.fit(lm, datamodule=ldm)\n else:\n trainer = make_trainer(**config.trainer)\n watch_model(trainer, model)\n trainer.fit(lm, datamodule=ldm)\n trainer.test(ckpt_path='best', test_dataloaders=ldm.test_dataloader())\n logger.info('Run finished. Uploading files to wandb...')\n",
"step-5": "# python examples/mnist_rnn.py --bsz 128 --bsz-eval 256\n\nimport sys\nfrom argparse import ArgumentParser\n\nimport pytorch_lightning as pl\nimport torch.nn as nn\nimport torch.optim as optim\nfrom loguru import logger\nfrom slp.config.config_parser import make_cli_parser, parse_config\nfrom slp.data.collators import SequenceClassificationCollator\nfrom slp.modules.rnn import RNN\nfrom slp.plbind import (\n FromLogits,\n PLDataModuleFromDatasets,\n RnnPLModule,\n make_trainer,\n watch_model,\n)\nfrom slp.util.log import configure_logging\nfrom torchvision.datasets import MNIST # type: ignore\nfrom torchvision.transforms import Compose, Normalize, ToTensor # type: ignore\n\ncollate_fn = SequenceClassificationCollator()\n\n\nclass Net(nn.Module):\n def __init__(self, input_size, hidden_size=40, num_classes=10, bidirectional=False):\n super().__init__()\n self.encoder = RNN(input_size, hidden_size, bidirectional=bidirectional)\n out_size = hidden_size if not bidirectional else 2 * hidden_size\n self.clf = nn.Linear(out_size, num_classes)\n\n def forward(self, x, lengths):\n _, x, _ = self.encoder(x, lengths)\n out = self.clf(x)\n\n return out\n\n\ndef get_parser():\n parser = ArgumentParser(\"MNIST classification example\")\n parser.add_argument(\n \"--hidden\",\n dest=\"model.hidden_size\",\n type=int,\n help=\"Intermediate hidden layers for linear module\",\n )\n parser.add_argument(\n \"--bi\",\n dest=\"model.bidirectional\",\n action=\"store_true\",\n help=\"Use BiLSTM\",\n )\n\n return parser\n\n\ndef get_data():\n # Fix: https://stackoverflow.com/a/66820249\n MNIST.resources = [\n (\n \"https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz\",\n \"f68b3c2dcbeaaa9fbdd348bbdeb94873\",\n ),\n (\n \"https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz\",\n \"d53e105ee54ea40749a09fcbcd1e9432\",\n ),\n (\n \"https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz\",\n \"9fb629c4189551a2d022fa330f9573f3\",\n ),\n (\n \"https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz\",\n \"ec29112dd5afa0611ce80d1b7f02629c\",\n ),\n ]\n\n def squeeze(x):\n return x.squeeze()\n\n data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,)), squeeze])\n train = MNIST(download=True, root=\".\", transform=data_transform, train=True)\n\n val = MNIST(download=False, root=\".\", transform=data_transform, train=False)\n\n return train, val\n\n\nif __name__ == \"__main__\":\n # SETUP ##################################################\n parser = get_parser()\n parser = make_cli_parser(parser, PLDataModuleFromDatasets)\n\n config = parse_config(parser, parser.parse_args().config)\n\n if config.trainer.experiment_name == \"experiment\":\n config.trainer.experiment_name = \"mnist-rnn-classification\"\n\n configure_logging(f\"logs/{config.trainer.experiment_name}\")\n\n if config.seed is not None:\n logger.info(\"Seeding everything with seed={seed}\")\n pl.utilities.seed.seed_everything(seed=config.seed)\n\n train, test = get_data()\n\n # Get data and make datamodule ##########################\n ldm = PLDataModuleFromDatasets(\n train, test=test, seed=config.seed, collate_fn=collate_fn, **config.data\n )\n\n # Create model, optimizer, criterion, scheduler ###########\n model = Net(28, **config.model)\n\n optimizer = getattr(optim, config.optimizer)(model.parameters(), **config.optim)\n criterion = nn.CrossEntropyLoss()\n\n lr_scheduler = None\n\n if config.lr_scheduler:\n lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, **config.lr_schedule\n )\n\n # Wrap in PLModule, & configure metrics ####################\n lm = RnnPLModule(\n model,\n optimizer,\n criterion,\n lr_scheduler=lr_scheduler,\n metrics={\"acc\": FromLogits(pl.metrics.classification.Accuracy())},\n hparams=config,\n )\n\n # Run debugging session or fit & test the model ############\n\n if config.debug:\n logger.info(\"Running in debug mode: Fast run on 5 batches\")\n trainer = make_trainer(fast_dev_run=5)\n trainer.fit(lm, datamodule=ldm)\n\n logger.info(\"Running in debug mode: Overfitting 5 batches\")\n trainer = make_trainer(overfit_batches=5)\n trainer.fit(lm, datamodule=ldm)\n\n else:\n trainer = make_trainer(**config.trainer)\n watch_model(trainer, model)\n\n trainer.fit(lm, datamodule=ldm)\n\n trainer.test(ckpt_path=\"best\", test_dataloaders=ldm.test_dataloader())\n\n logger.info(\"Run finished. Uploading files to wandb...\")\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
from rest_framework.views import APIView
from rest_framework.response import Response
from drf_yasg.utils import swagger_auto_schema
from theme.models import UserProfile
from hs_core.views import serializers
class UserInfo(APIView):
@swagger_auto_schema(operation_description="Get information about the logged in user",
responses={200: serializers.UserInfoSerializer})
def get(self, request):
'''
Get information about the logged in user
:param request:
:return: HttpResponse response containing **user_info**
'''
if not request.user.is_authenticated:
return Response({"title": "None", "organization": "None", "state": "None", "country": "None",
"user_type": "None"})
user_info = {"username": request.user.username}
if request.user.email:
user_info['email'] = request.user.email
if request.user.first_name:
user_info['first_name'] = request.user.first_name
if request.user.id:
user_info['id'] = request.user.id
if request.user.last_name:
user_info['last_name'] = request.user.last_name
user_profile = UserProfile.objects.filter(user=request.user).first()
if user_profile.title:
user_info['title'] = user_profile.title
if user_profile.organization:
user_info['organization'] = user_profile.organization
if user_profile.state and user_profile.state.strip() and user_profile.state != 'Unspecified':
user_info['state'] = user_profile.state.strip()
if user_profile.country and user_profile.country != 'Unspecified':
user_info['country'] = user_profile.country
if user_profile.user_type and user_profile.user_type.strip() and user_profile.user_type != 'Unspecified':
user_info['user_type'] = user_profile.user_type.strip()
return Response(user_info)
|
normal
|
{
"blob_id": "c45ffe8cba8d152e346182252dbc43e22eaf83e2",
"index": 3498,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserInfo(APIView):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass UserInfo(APIView):\n\n @swagger_auto_schema(operation_description=\n 'Get information about the logged in user', responses={(200):\n serializers.UserInfoSerializer})\n def get(self, request):\n \"\"\"\n Get information about the logged in user\n\n :param request:\n :return: HttpResponse response containing **user_info**\n \"\"\"\n if not request.user.is_authenticated:\n return Response({'title': 'None', 'organization': 'None',\n 'state': 'None', 'country': 'None', 'user_type': 'None'})\n user_info = {'username': request.user.username}\n if request.user.email:\n user_info['email'] = request.user.email\n if request.user.first_name:\n user_info['first_name'] = request.user.first_name\n if request.user.id:\n user_info['id'] = request.user.id\n if request.user.last_name:\n user_info['last_name'] = request.user.last_name\n user_profile = UserProfile.objects.filter(user=request.user).first()\n if user_profile.title:\n user_info['title'] = user_profile.title\n if user_profile.organization:\n user_info['organization'] = user_profile.organization\n if user_profile.state and user_profile.state.strip(\n ) and user_profile.state != 'Unspecified':\n user_info['state'] = user_profile.state.strip()\n if user_profile.country and user_profile.country != 'Unspecified':\n user_info['country'] = user_profile.country\n if user_profile.user_type and user_profile.user_type.strip(\n ) and user_profile.user_type != 'Unspecified':\n user_info['user_type'] = user_profile.user_type.strip()\n return Response(user_info)\n",
"step-4": "from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom drf_yasg.utils import swagger_auto_schema\nfrom theme.models import UserProfile\nfrom hs_core.views import serializers\n\n\nclass UserInfo(APIView):\n\n @swagger_auto_schema(operation_description=\n 'Get information about the logged in user', responses={(200):\n serializers.UserInfoSerializer})\n def get(self, request):\n \"\"\"\n Get information about the logged in user\n\n :param request:\n :return: HttpResponse response containing **user_info**\n \"\"\"\n if not request.user.is_authenticated:\n return Response({'title': 'None', 'organization': 'None',\n 'state': 'None', 'country': 'None', 'user_type': 'None'})\n user_info = {'username': request.user.username}\n if request.user.email:\n user_info['email'] = request.user.email\n if request.user.first_name:\n user_info['first_name'] = request.user.first_name\n if request.user.id:\n user_info['id'] = request.user.id\n if request.user.last_name:\n user_info['last_name'] = request.user.last_name\n user_profile = UserProfile.objects.filter(user=request.user).first()\n if user_profile.title:\n user_info['title'] = user_profile.title\n if user_profile.organization:\n user_info['organization'] = user_profile.organization\n if user_profile.state and user_profile.state.strip(\n ) and user_profile.state != 'Unspecified':\n user_info['state'] = user_profile.state.strip()\n if user_profile.country and user_profile.country != 'Unspecified':\n user_info['country'] = user_profile.country\n if user_profile.user_type and user_profile.user_type.strip(\n ) and user_profile.user_type != 'Unspecified':\n user_info['user_type'] = user_profile.user_type.strip()\n return Response(user_info)\n",
"step-5": "from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom drf_yasg.utils import swagger_auto_schema\n\nfrom theme.models import UserProfile\nfrom hs_core.views import serializers\n\n\nclass UserInfo(APIView):\n @swagger_auto_schema(operation_description=\"Get information about the logged in user\",\n responses={200: serializers.UserInfoSerializer})\n def get(self, request):\n '''\n Get information about the logged in user\n\n :param request:\n :return: HttpResponse response containing **user_info**\n '''\n if not request.user.is_authenticated:\n return Response({\"title\": \"None\", \"organization\": \"None\", \"state\": \"None\", \"country\": \"None\",\n \"user_type\": \"None\"})\n\n user_info = {\"username\": request.user.username}\n\n if request.user.email:\n user_info['email'] = request.user.email\n if request.user.first_name:\n user_info['first_name'] = request.user.first_name\n if request.user.id:\n user_info['id'] = request.user.id\n if request.user.last_name:\n user_info['last_name'] = request.user.last_name\n\n user_profile = UserProfile.objects.filter(user=request.user).first()\n if user_profile.title:\n user_info['title'] = user_profile.title\n if user_profile.organization:\n user_info['organization'] = user_profile.organization\n if user_profile.state and user_profile.state.strip() and user_profile.state != 'Unspecified':\n user_info['state'] = user_profile.state.strip()\n if user_profile.country and user_profile.country != 'Unspecified':\n user_info['country'] = user_profile.country\n if user_profile.user_type and user_profile.user_type.strip() and user_profile.user_type != 'Unspecified':\n user_info['user_type'] = user_profile.user_type.strip()\n return Response(user_info)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os as os
import io as io
import re
class Stopwords:
def __init__(self, base_dir='data'):
self.base_dir = base_dir
def load_stopwords(self, base_dir=None, stopwords_file='stopwords.csv'):
# Load stopwords from file.
if base_dir is not None:
self.base_dir = base_dir
filename = os.path.join(self.base_dir, stopwords_file)
self.stopwords = []
pattern = re.compile('[\r\n]')
with open(filename, 'r', encoding='utf-8') as fin:
self.stopwords = [re.sub(pattern, '', word.lower()) for word in fin]
return self.stopwords
|
normal
|
{
"blob_id": "dad4e14da734f2e2329f4cbe064c73c82a4ae27c",
"index": 8119,
"step-1": "<mask token>\n\n\nclass Stopwords:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Stopwords:\n\n def __init__(self, base_dir='data'):\n self.base_dir = base_dir\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Stopwords:\n\n def __init__(self, base_dir='data'):\n self.base_dir = base_dir\n\n def load_stopwords(self, base_dir=None, stopwords_file='stopwords.csv'):\n if base_dir is not None:\n self.base_dir = base_dir\n filename = os.path.join(self.base_dir, stopwords_file)\n self.stopwords = []\n pattern = re.compile('[\\r\\n]')\n with open(filename, 'r', encoding='utf-8') as fin:\n self.stopwords = [re.sub(pattern, '', word.lower()) for word in fin\n ]\n return self.stopwords\n",
"step-4": "import os as os\nimport io as io\nimport re\n\n\nclass Stopwords:\n\n def __init__(self, base_dir='data'):\n self.base_dir = base_dir\n\n def load_stopwords(self, base_dir=None, stopwords_file='stopwords.csv'):\n if base_dir is not None:\n self.base_dir = base_dir\n filename = os.path.join(self.base_dir, stopwords_file)\n self.stopwords = []\n pattern = re.compile('[\\r\\n]')\n with open(filename, 'r', encoding='utf-8') as fin:\n self.stopwords = [re.sub(pattern, '', word.lower()) for word in fin\n ]\n return self.stopwords\n",
"step-5": "import os as os\nimport io as io\nimport re\n\nclass Stopwords:\n\n def __init__(self, base_dir='data'):\n self.base_dir = base_dir\n\n def load_stopwords(self, base_dir=None, stopwords_file='stopwords.csv'):\n # Load stopwords from file.\n if base_dir is not None:\n self.base_dir = base_dir\n filename = os.path.join(self.base_dir, stopwords_file)\n\n self.stopwords = []\n pattern = re.compile('[\\r\\n]')\n with open(filename, 'r', encoding='utf-8') as fin:\n self.stopwords = [re.sub(pattern, '', word.lower()) for word in fin]\n return self.stopwords",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def counter(count):
for i in range(count):
time.sleep(1)
print('[%s] => %s' % (os.getpid(), i))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def counter(count):
for i in range(count):
time.sleep(1)
print('[%s] => %s' % (os.getpid(), i))
<|reserved_special_token_0|>
pdb.set_trace()
for i in range(5):
pid = os.fork()
if pid != 0:
print('Process %d spawned' % pid)
else:
counter(5)
os._exit(0)
print('Main process exiting.')
<|reserved_special_token_1|>
import os, time
def counter(count):
for i in range(count):
time.sleep(1)
print('[%s] => %s' % (os.getpid(), i))
import pdb
pdb.set_trace()
for i in range(5):
pid = os.fork()
if pid != 0:
print('Process %d spawned' % pid)
else:
counter(5)
os._exit(0)
print('Main process exiting.')
<|reserved_special_token_1|>
import os, time
def counter(count): # run in new process
for i in range(count):
time.sleep(1) # simulate real work
print('[%s] => %s' % (os.getpid(), i))
import pdb;pdb.set_trace()
for i in range(5):
pid= os.fork()
if pid != 0:
print('Process %d spawned' % pid) # in parent: continue
else:
counter(5) # else in child/new process
os._exit(0) # run function and exit
print('Main process exiting.')
|
flexible
|
{
"blob_id": "fd564d09d7320fd444ed6eec7e51afa4d065ec4d",
"index": 6945,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef counter(count):\n for i in range(count):\n time.sleep(1)\n print('[%s] => %s' % (os.getpid(), i))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef counter(count):\n for i in range(count):\n time.sleep(1)\n print('[%s] => %s' % (os.getpid(), i))\n\n\n<mask token>\npdb.set_trace()\nfor i in range(5):\n pid = os.fork()\n if pid != 0:\n print('Process %d spawned' % pid)\n else:\n counter(5)\n os._exit(0)\nprint('Main process exiting.')\n",
"step-4": "import os, time\n\n\ndef counter(count):\n for i in range(count):\n time.sleep(1)\n print('[%s] => %s' % (os.getpid(), i))\n\n\nimport pdb\npdb.set_trace()\nfor i in range(5):\n pid = os.fork()\n if pid != 0:\n print('Process %d spawned' % pid)\n else:\n counter(5)\n os._exit(0)\nprint('Main process exiting.')\n",
"step-5": "import os, time\ndef counter(count): # run in new process\n for i in range(count):\n time.sleep(1) # simulate real work\n print('[%s] => %s' % (os.getpid(), i))\n\nimport pdb;pdb.set_trace()\nfor i in range(5):\n pid= os.fork()\n if pid != 0:\n print('Process %d spawned' % pid) # in parent: continue\n else:\n counter(5) # else in child/new process\n os._exit(0) # run function and exit\n\nprint('Main process exiting.') \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Import the otb applications package
import otbApplication
def ComputeHaralick(image, chan, xrad, yrad):
# The following line creates an instance of the HaralickTextureExtraction application
HaralickTextureExtraction = otbApplication.Registry.CreateApplication("HaralickTextureExtraction")
# The following lines set all the application parameters:
HaralickTextureExtraction.SetParameterString("in", image)
HaralickTextureExtraction.SetParameterInt("channel", int(chan))
HaralickTextureExtraction.SetParameterInt("parameters.xrad", int(xrad))
HaralickTextureExtraction.SetParameterInt("parameters.yrad", int(yrad))
HaralickTextureExtraction.SetParameterString("texture","simple")
HaralickTextureExtraction.SetParameterString("out", "HaralickTextures.tif")
# The following line execute the application
HaralickTextureExtraction.ExecuteAndWriteOutput()
print "HaralickTextures.tif a été écrit"
|
normal
|
{
"blob_id": "b4992a5b396b6809813875443eb8dbb5b00eb6a9",
"index": 4865,
"step-1": "#!/usr/bin/python \n# -*- coding: utf-8 -*-\n\n# Import the otb applications package \nimport otbApplication \n \ndef ComputeHaralick(image, chan, xrad, yrad):\n\n\t# The following line creates an instance of the HaralickTextureExtraction application \n\tHaralickTextureExtraction = otbApplication.Registry.CreateApplication(\"HaralickTextureExtraction\") \n\t# The following lines set all the application parameters: \n\tHaralickTextureExtraction.SetParameterString(\"in\", image) \n\tHaralickTextureExtraction.SetParameterInt(\"channel\", int(chan))\n\tHaralickTextureExtraction.SetParameterInt(\"parameters.xrad\", int(xrad)) \n\tHaralickTextureExtraction.SetParameterInt(\"parameters.yrad\", int(yrad)) \t \n\tHaralickTextureExtraction.SetParameterString(\"texture\",\"simple\") \n\tHaralickTextureExtraction.SetParameterString(\"out\", \"HaralickTextures.tif\") \t \n\t# The following line execute the application \n\tHaralickTextureExtraction.ExecuteAndWriteOutput()\n\tprint \"HaralickTextures.tif a été écrit\"\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class SearchConfig(AppConfig):
name = 'search'
verbose_name = _("Search")
|
normal
|
{
"blob_id": "f47e4d6ff079b6ac2320467d87b34ae82face032",
"index": 4506,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SearchConfig(AppConfig):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SearchConfig(AppConfig):\n name = 'search'\n verbose_name = _('Search')\n",
"step-4": "from django.apps import AppConfig\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass SearchConfig(AppConfig):\n name = 'search'\n verbose_name = _('Search')\n",
"step-5": "from django.apps import AppConfig\r\nfrom django.utils.translation import gettext_lazy as _\r\n\r\nclass SearchConfig(AppConfig):\r\n name = 'search'\r\n verbose_name = _(\"Search\")\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Primer3 input form.
For details on input params see:
https://primer3.org/manual.html#globalTags
"""
from django import forms
from django.core.exceptions import ValidationError
from .fasta import Fasta
class PrimerForm(forms.Form):
"""Collect user input to run primer prediction."""
fasta = forms.CharField(initial="")
# Primer size range
primer_min = forms.IntegerField(initial=18, max_value=35)
primer_max = forms.IntegerField(initial=27, max_value=35)
primer_optimum = forms.IntegerField(initial=20, max_value=35)
# Amplicon size range
amplicon_min = forms.IntegerField(
initial=60, min_value=50, max_value=20000)
amplicon_max = forms.IntegerField(
initial=80, min_value=50, max_value=20000)
# Primer melting temperature range
tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)
tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)
tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)
# Max self complement
self_dimer_any = forms.FloatField(
initial=8.0, min_value=0, max_value=9999.99)
# Max self complement 3'
self_dimer_end = forms.FloatField(
initial=3.0, min_value=0, max_value=9999.99)
# GC content
gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)
gc_clamp = forms.IntegerField(initial=0)
def clean(self):
"""Validate and return user input."""
data = self.cleaned_data
data['fasta'] = Fasta.from_string(data['fasta'])
validate_fasta(data)
return data
def validate_fasta(data):
"""Validate input sequence lengths."""
for sequence in data['fasta'].values():
print(f'Sequence length {len(sequence)} nt')
if len(sequence) < data['amplicon_min']:
raise ValidationError({'fasta':
f'Input sequence must be longer than minimum'
+ f' amplicon length parameter ({data["amplicon_min"]} nt)'
})
|
normal
|
{
"blob_id": "6291375738db7914d551f9a1c6d2897b7d236b87",
"index": 1742,
"step-1": "<mask token>\n\n\nclass PrimerForm(forms.Form):\n <mask token>\n fasta = forms.CharField(initial='')\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000\n )\n amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000\n )\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=\n 9999.99)\n self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=\n 9999.99)\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PrimerForm(forms.Form):\n \"\"\"Collect user input to run primer prediction.\"\"\"\n fasta = forms.CharField(initial='')\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000\n )\n amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000\n )\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=\n 9999.99)\n self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=\n 9999.99)\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PrimerForm(forms.Form):\n \"\"\"Collect user input to run primer prediction.\"\"\"\n fasta = forms.CharField(initial='')\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000\n )\n amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000\n )\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=\n 9999.99)\n self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=\n 9999.99)\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\ndef validate_fasta(data):\n \"\"\"Validate input sequence lengths.\"\"\"\n for sequence in data['fasta'].values():\n print(f'Sequence length {len(sequence)} nt')\n if len(sequence) < data['amplicon_min']:\n raise ValidationError({'fasta': \n f'Input sequence must be longer than minimum' +\n f\" amplicon length parameter ({data['amplicon_min']} nt)\"})\n",
"step-4": "<mask token>\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom .fasta import Fasta\n\n\nclass PrimerForm(forms.Form):\n \"\"\"Collect user input to run primer prediction.\"\"\"\n fasta = forms.CharField(initial='')\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000\n )\n amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000\n )\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=\n 9999.99)\n self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=\n 9999.99)\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\ndef validate_fasta(data):\n \"\"\"Validate input sequence lengths.\"\"\"\n for sequence in data['fasta'].values():\n print(f'Sequence length {len(sequence)} nt')\n if len(sequence) < data['amplicon_min']:\n raise ValidationError({'fasta': \n f'Input sequence must be longer than minimum' +\n f\" amplicon length parameter ({data['amplicon_min']} nt)\"})\n",
"step-5": "\"\"\"Primer3 input form.\n\nFor details on input params see:\nhttps://primer3.org/manual.html#globalTags\n\"\"\"\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom .fasta import Fasta\n\n\nclass PrimerForm(forms.Form):\n \"\"\"Collect user input to run primer prediction.\"\"\"\n\n fasta = forms.CharField(initial=\"\")\n # Primer size range\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n # Amplicon size range\n amplicon_min = forms.IntegerField(\n initial=60, min_value=50, max_value=20000)\n amplicon_max = forms.IntegerField(\n initial=80, min_value=50, max_value=20000)\n # Primer melting temperature range\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n # Max self complement\n self_dimer_any = forms.FloatField(\n initial=8.0, min_value=0, max_value=9999.99)\n # Max self complement 3'\n self_dimer_end = forms.FloatField(\n initial=3.0, min_value=0, max_value=9999.99)\n # GC content\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\ndef validate_fasta(data):\n \"\"\"Validate input sequence lengths.\"\"\"\n for sequence in data['fasta'].values():\n print(f'Sequence length {len(sequence)} nt')\n if len(sequence) < data['amplicon_min']:\n raise ValidationError({'fasta':\n f'Input sequence must be longer than minimum'\n + f' amplicon length parameter ({data[\"amplicon_min\"]} nt)'\n })\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import json
import random
from time import sleep
url = "data/data.json"
def loop(run_state):
error = 1
simulations = 1
while run:
error_margin = str((error/simulations) * 100) + "%"
prediction = get_prediction()
print("Prediction: %s" % prediction)
print("Error Margin: %s" % error_margin)
print("Flip the coin and insert your result:\nh = head\nt = tail")
answer = input()
comparator = ""
if answer is "h" or answer is "t":
if answer == "t":
write_data(False)
comparator = "tail"
elif answer == "h":
write_data(True)
comparator = "head"
simulations += 1
if comparator != prediction:
error += 1
else:
print("Invalid answer\n")
def get_prediction():
file = read_file()
data = file["coin-result"]
true = 0
for i in data:
if i is True:
true += 1
head = true/len(data)
tail = 1-head
if head + tail == 1:
rand = random.uniform(0.0, 1.0)
if head == 1:
return "head"
elif tail == 1:
return "tail"
elif head > tail:
if rand > head:
return "head"
else:
return "tail"
elif head < tail:
if rand > tail:
return "tail"
else:
return "head"
elif head == tail:
rand = random.randint(0, 1)
if rand == 0:
return "tail"
else:
return "head"
def read_file():
file = open(url, "r")
data = json.loads(file.read())
file.close()
return data
def write_data(value):
data = read_file()
file = open(url, "w")
data["coin-result"].append(value)
json.dump(data, file)
file.close()
def get_answer(answer):
if answer == "c":
return "head"
elif answer == "t":
return "tail"
else:
print("Invalid answer")
# OnRun
run = True
print("Welcome to CoinPredictor\n")
loop(run)
'''
file = open("data/data.json", "w")
data['coin-result'].append(False)
data = json.dump(data, file)
print(data)
file.close()'''
|
normal
|
{
"blob_id": "25ff54a969651d365de33f2420c662518dd63738",
"index": 864,
"step-1": "<mask token>\n\n\ndef loop(run_state):\n error = 1\n simulations = 1\n while run:\n error_margin = str(error / simulations * 100) + '%'\n prediction = get_prediction()\n print('Prediction: %s' % prediction)\n print('Error Margin: %s' % error_margin)\n print('Flip the coin and insert your result:\\nh = head\\nt = tail')\n answer = input()\n comparator = ''\n if answer is 'h' or answer is 't':\n if answer == 't':\n write_data(False)\n comparator = 'tail'\n elif answer == 'h':\n write_data(True)\n comparator = 'head'\n simulations += 1\n if comparator != prediction:\n error += 1\n else:\n print('Invalid answer\\n')\n\n\ndef get_prediction():\n file = read_file()\n data = file['coin-result']\n true = 0\n for i in data:\n if i is True:\n true += 1\n head = true / len(data)\n tail = 1 - head\n if head + tail == 1:\n rand = random.uniform(0.0, 1.0)\n if head == 1:\n return 'head'\n elif tail == 1:\n return 'tail'\n elif head > tail:\n if rand > head:\n return 'head'\n else:\n return 'tail'\n elif head < tail:\n if rand > tail:\n return 'tail'\n else:\n return 'head'\n elif head == tail:\n rand = random.randint(0, 1)\n if rand == 0:\n return 'tail'\n else:\n return 'head'\n\n\ndef read_file():\n file = open(url, 'r')\n data = json.loads(file.read())\n file.close()\n return data\n\n\ndef write_data(value):\n data = read_file()\n file = open(url, 'w')\n data['coin-result'].append(value)\n json.dump(data, file)\n file.close()\n\n\ndef get_answer(answer):\n if answer == 'c':\n return 'head'\n elif answer == 't':\n return 'tail'\n else:\n print('Invalid answer')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loop(run_state):\n error = 1\n simulations = 1\n while run:\n error_margin = str(error / simulations * 100) + '%'\n prediction = get_prediction()\n print('Prediction: %s' % prediction)\n print('Error Margin: %s' % error_margin)\n print('Flip the coin and insert your result:\\nh = head\\nt = tail')\n answer = input()\n comparator = ''\n if answer is 'h' or answer is 't':\n if answer == 't':\n write_data(False)\n comparator = 'tail'\n elif answer == 'h':\n write_data(True)\n comparator = 'head'\n simulations += 1\n if comparator != prediction:\n error += 1\n else:\n print('Invalid answer\\n')\n\n\ndef get_prediction():\n file = read_file()\n data = file['coin-result']\n true = 0\n for i in data:\n if i is True:\n true += 1\n head = true / len(data)\n tail = 1 - head\n if head + tail == 1:\n rand = random.uniform(0.0, 1.0)\n if head == 1:\n return 'head'\n elif tail == 1:\n return 'tail'\n elif head > tail:\n if rand > head:\n return 'head'\n else:\n return 'tail'\n elif head < tail:\n if rand > tail:\n return 'tail'\n else:\n return 'head'\n elif head == tail:\n rand = random.randint(0, 1)\n if rand == 0:\n return 'tail'\n else:\n return 'head'\n\n\ndef read_file():\n file = open(url, 'r')\n data = json.loads(file.read())\n file.close()\n return data\n\n\ndef write_data(value):\n data = read_file()\n file = open(url, 'w')\n data['coin-result'].append(value)\n json.dump(data, file)\n file.close()\n\n\ndef get_answer(answer):\n if answer == 'c':\n return 'head'\n elif answer == 't':\n return 'tail'\n else:\n print('Invalid answer')\n\n\n<mask token>\nprint('Welcome to CoinPredictor\\n')\nloop(run)\n<mask token>\n",
"step-3": "<mask token>\nurl = 'data/data.json'\n\n\ndef loop(run_state):\n error = 1\n simulations = 1\n while run:\n error_margin = str(error / simulations * 100) + '%'\n prediction = get_prediction()\n print('Prediction: %s' % prediction)\n print('Error Margin: %s' % error_margin)\n print('Flip the coin and insert your result:\\nh = head\\nt = tail')\n answer = input()\n comparator = ''\n if answer is 'h' or answer is 't':\n if answer == 't':\n write_data(False)\n comparator = 'tail'\n elif answer == 'h':\n write_data(True)\n comparator = 'head'\n simulations += 1\n if comparator != prediction:\n error += 1\n else:\n print('Invalid answer\\n')\n\n\ndef get_prediction():\n file = read_file()\n data = file['coin-result']\n true = 0\n for i in data:\n if i is True:\n true += 1\n head = true / len(data)\n tail = 1 - head\n if head + tail == 1:\n rand = random.uniform(0.0, 1.0)\n if head == 1:\n return 'head'\n elif tail == 1:\n return 'tail'\n elif head > tail:\n if rand > head:\n return 'head'\n else:\n return 'tail'\n elif head < tail:\n if rand > tail:\n return 'tail'\n else:\n return 'head'\n elif head == tail:\n rand = random.randint(0, 1)\n if rand == 0:\n return 'tail'\n else:\n return 'head'\n\n\ndef read_file():\n file = open(url, 'r')\n data = json.loads(file.read())\n file.close()\n return data\n\n\ndef write_data(value):\n data = read_file()\n file = open(url, 'w')\n data['coin-result'].append(value)\n json.dump(data, file)\n file.close()\n\n\ndef get_answer(answer):\n if answer == 'c':\n return 'head'\n elif answer == 't':\n return 'tail'\n else:\n print('Invalid answer')\n\n\nrun = True\nprint('Welcome to CoinPredictor\\n')\nloop(run)\n<mask token>\n",
"step-4": "import json\nimport random\nfrom time import sleep\nurl = 'data/data.json'\n\n\ndef loop(run_state):\n error = 1\n simulations = 1\n while run:\n error_margin = str(error / simulations * 100) + '%'\n prediction = get_prediction()\n print('Prediction: %s' % prediction)\n print('Error Margin: %s' % error_margin)\n print('Flip the coin and insert your result:\\nh = head\\nt = tail')\n answer = input()\n comparator = ''\n if answer is 'h' or answer is 't':\n if answer == 't':\n write_data(False)\n comparator = 'tail'\n elif answer == 'h':\n write_data(True)\n comparator = 'head'\n simulations += 1\n if comparator != prediction:\n error += 1\n else:\n print('Invalid answer\\n')\n\n\ndef get_prediction():\n file = read_file()\n data = file['coin-result']\n true = 0\n for i in data:\n if i is True:\n true += 1\n head = true / len(data)\n tail = 1 - head\n if head + tail == 1:\n rand = random.uniform(0.0, 1.0)\n if head == 1:\n return 'head'\n elif tail == 1:\n return 'tail'\n elif head > tail:\n if rand > head:\n return 'head'\n else:\n return 'tail'\n elif head < tail:\n if rand > tail:\n return 'tail'\n else:\n return 'head'\n elif head == tail:\n rand = random.randint(0, 1)\n if rand == 0:\n return 'tail'\n else:\n return 'head'\n\n\ndef read_file():\n file = open(url, 'r')\n data = json.loads(file.read())\n file.close()\n return data\n\n\ndef write_data(value):\n data = read_file()\n file = open(url, 'w')\n data['coin-result'].append(value)\n json.dump(data, file)\n file.close()\n\n\ndef get_answer(answer):\n if answer == 'c':\n return 'head'\n elif answer == 't':\n return 'tail'\n else:\n print('Invalid answer')\n\n\nrun = True\nprint('Welcome to CoinPredictor\\n')\nloop(run)\n<mask token>\n",
"step-5": "import json\nimport random\nfrom time import sleep\n\nurl = \"data/data.json\"\n\n\ndef loop(run_state):\n error = 1\n simulations = 1\n\n while run:\n\n error_margin = str((error/simulations) * 100) + \"%\"\n prediction = get_prediction()\n print(\"Prediction: %s\" % prediction)\n print(\"Error Margin: %s\" % error_margin)\n print(\"Flip the coin and insert your result:\\nh = head\\nt = tail\")\n answer = input()\n comparator = \"\"\n\n if answer is \"h\" or answer is \"t\":\n if answer == \"t\":\n write_data(False)\n comparator = \"tail\"\n\n elif answer == \"h\":\n write_data(True)\n comparator = \"head\"\n\n simulations += 1\n\n if comparator != prediction:\n error += 1\n\n else:\n print(\"Invalid answer\\n\")\n\n\ndef get_prediction():\n file = read_file()\n data = file[\"coin-result\"]\n true = 0\n\n for i in data:\n if i is True:\n true += 1\n\n head = true/len(data)\n tail = 1-head\n\n if head + tail == 1:\n rand = random.uniform(0.0, 1.0)\n\n if head == 1:\n return \"head\"\n\n elif tail == 1:\n return \"tail\"\n\n elif head > tail:\n if rand > head:\n return \"head\"\n else:\n return \"tail\"\n\n elif head < tail:\n if rand > tail:\n return \"tail\"\n else:\n return \"head\"\n\n elif head == tail:\n rand = random.randint(0, 1)\n if rand == 0:\n return \"tail\"\n else:\n return \"head\"\n\n\ndef read_file():\n file = open(url, \"r\")\n data = json.loads(file.read())\n file.close()\n return data\n\n\ndef write_data(value):\n data = read_file()\n file = open(url, \"w\")\n data[\"coin-result\"].append(value)\n json.dump(data, file)\n file.close()\n\n\ndef get_answer(answer):\n if answer == \"c\":\n return \"head\"\n elif answer == \"t\":\n return \"tail\"\n else:\n print(\"Invalid answer\")\n\n\n# OnRun\nrun = True\nprint(\"Welcome to CoinPredictor\\n\")\nloop(run)\n\n\n'''\n\nfile = open(\"data/data.json\", \"w\")\ndata['coin-result'].append(False)\ndata = json.dump(data, file)\nprint(data)\nfile.close()'''\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
"""byte - property model module."""
from __future__ import absolute_import, division, print_function
class BaseProperty(object):
"""Base class for properties."""
def get(self, obj):
"""Get property value from object.
:param obj: Item
:type obj: byte.model.Model
"""
raise NotImplementedError
def set(self, obj, value):
"""Set property value on object.
:param obj: Item
:type obj: byte.model.Model
:param value: Value
:type value: any
"""
raise NotImplementedError
|
normal
|
{
"blob_id": "382f7119beba81087c497baf170eb6814c26c03e",
"index": 5458,
"step-1": "<mask token>\n\n\nclass BaseProperty(object):\n <mask token>\n <mask token>\n\n def set(self, obj, value):\n \"\"\"Set property value on object.\n\n :param obj: Item\n :type obj: byte.model.Model\n\n :param value: Value\n :type value: any\n \"\"\"\n raise NotImplementedError\n",
"step-2": "<mask token>\n\n\nclass BaseProperty(object):\n <mask token>\n\n def get(self, obj):\n \"\"\"Get property value from object.\n\n :param obj: Item\n :type obj: byte.model.Model\n \"\"\"\n raise NotImplementedError\n\n def set(self, obj, value):\n \"\"\"Set property value on object.\n\n :param obj: Item\n :type obj: byte.model.Model\n\n :param value: Value\n :type value: any\n \"\"\"\n raise NotImplementedError\n",
"step-3": "<mask token>\n\n\nclass BaseProperty(object):\n \"\"\"Base class for properties.\"\"\"\n\n def get(self, obj):\n \"\"\"Get property value from object.\n\n :param obj: Item\n :type obj: byte.model.Model\n \"\"\"\n raise NotImplementedError\n\n def set(self, obj, value):\n \"\"\"Set property value on object.\n\n :param obj: Item\n :type obj: byte.model.Model\n\n :param value: Value\n :type value: any\n \"\"\"\n raise NotImplementedError\n",
"step-4": "<mask token>\nfrom __future__ import absolute_import, division, print_function\n\n\nclass BaseProperty(object):\n \"\"\"Base class for properties.\"\"\"\n\n def get(self, obj):\n \"\"\"Get property value from object.\n\n :param obj: Item\n :type obj: byte.model.Model\n \"\"\"\n raise NotImplementedError\n\n def set(self, obj, value):\n \"\"\"Set property value on object.\n\n :param obj: Item\n :type obj: byte.model.Model\n\n :param value: Value\n :type value: any\n \"\"\"\n raise NotImplementedError\n",
"step-5": "\"\"\"byte - property model module.\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\n\nclass BaseProperty(object):\n \"\"\"Base class for properties.\"\"\"\n\n def get(self, obj):\n \"\"\"Get property value from object.\n\n :param obj: Item\n :type obj: byte.model.Model\n \"\"\"\n raise NotImplementedError\n\n def set(self, obj, value):\n \"\"\"Set property value on object.\n\n :param obj: Item\n :type obj: byte.model.Model\n\n :param value: Value\n :type value: any\n \"\"\"\n raise NotImplementedError\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/python
import sys,os
import argparse
import subprocess
from pprint import pprint
chroot_start_path="/srv/chroot"
chroots_conf="/etc/schroot/chroot.d"
build_pkgs = 'build-essential fakeroot devscripts apt-utils'
include = 'eatmydata,ccache,lintian'
distro_conf={
'debootstrap_mirror':None,
'components':None,
'source_security_suites':None,
'source_security_url':None,
'skip_updates':False,
'skip_security':False,
'keyring':None,
}
def configure_distro(distro="debian",arch="i386",release="unstable"):
if distro not in ['ubuntu','debian']:
print("Unknown Distro %s" % distro)
return False
if (distro == 'ubuntu'):
if ( arch in ['amd64','i386'] ):
distro_conf['debootstrap_mirror'] = "http://archive.ubuntu.com/ubuntu"
elif ( arch in ['armel', 'hppa', 'ia64' , 'lpia', 'sparc'] ):
distro_conf['debootstrap_mirror'] = "http://ports.ubuntu.com/ubuntu-ports"
elif ( arch in ['powerpc'] ):
distro_conf['debootstrap_mirror'] = "http://archive.ubuntu.com/ubuntu"
distro_conf['components'] = ['main','restricted', 'universe', 'multiverse']
distro_conf['keyring'] = "/usr/share/keyrings/ubuntu-archive-keyring.gpg"
elif (distro == 'debian'):
distro_conf['debootstrap_mirror'] = "http://ftp.debian.org/debian"
distro_conf['components'] = ['main','non-free','contrib']
distro_conf['source_security_suites'] = "RELEASE/updates"
distro_conf['source_security_url'] = "http://security.debian.org/"
#Debian only performs security updates
distro_conf['skip_updates'] = True
if (release in ['unstable','sid'] ):
distro_conf['skip_security'] = True
distro_conf['keyring'] = "/usr/share/keyrings/debian-archive-keyring.gpg"
def check_chroot_path(start_path,end_path):
if( os.path.ismount( start_path ) ) :
print("%s is mounted" % start_path)
else:
print("%s is not mounted" % start_path)
exit()
complete_path = os.path.join(start_path,end_path)
cmd = 'btrfs subvolume list "%s" > /dev/null 2>&1' % complete_path
p = subprocess.Popen(cmd,cwd='/',shell=True)
p.wait()
print(p.returncode)
if (not p.returncode):
print("E: %s already exist!"%complete_path)
exit()
else:
cmd = 'btrfs subvolume create "%s"' % complete_path
p = subprocess.Popen(cmd,cwd='/',shell=True)
p.wait()
print(p.returncode)
if __name__ == "__main__":
if os.geteuid() != 0:
print("You must be root")
exit()
parser = argparse.ArgumentParser(description="Create a Sbuild Chroot",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d','--distro',metavar="DISTRIBUTION",help="Install specific distro",default="debian")
parser.add_argument('-a','--arch',metavar="ARCHITECTURE",help="What architecture to select",default="i386")
parser.add_argument('-r','--release',help="What release to select",default="unstable")
args = parser.parse_args()
chroot_end_path = os.path.join( args.distro , "-".join([args.release,args.arch]) )
check_chroot_path(chroot_start_path,chroot_end_path)
configure_distro(args.distro,args.arch,args.release)
pprint(distro_conf)
cmd = [ 'sbuild-createchroot' ,
'--verbose',
'--keyring=%s' % distro_conf['keyring'] ,
'--arch=%s' % args.arch ,
'--include=%s' % include,
'--components=%s' % ",".join(distro_conf['components']),
args.release ,
os.path.join(chroot_start_path,chroot_end_path),
distro_conf['debootstrap_mirror'],
]
pprint(cmd)
p = subprocess.Popen(cmd,cwd='/')
p.wait()
|
normal
|
{
"blob_id": "600691b87f7776e96bbf439d7195b870ed86090b",
"index": 1145,
"step-1": "<mask token>\n\n\ndef configure_distro(distro='debian', arch='i386', release='unstable'):\n if distro not in ['ubuntu', 'debian']:\n print('Unknown Distro %s' % distro)\n return False\n if distro == 'ubuntu':\n if arch in ['amd64', 'i386']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://ports.ubuntu.com/ubuntu-ports'\n elif arch in ['powerpc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n distro_conf['components'] = ['main', 'restricted', 'universe',\n 'multiverse']\n distro_conf['keyring'\n ] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'\n elif distro == 'debian':\n distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'\n distro_conf['components'] = ['main', 'non-free', 'contrib']\n distro_conf['source_security_suites'] = 'RELEASE/updates'\n distro_conf['source_security_url'] = 'http://security.debian.org/'\n distro_conf['skip_updates'] = True\n if release in ['unstable', 'sid']:\n distro_conf['skip_security'] = True\n distro_conf['keyring'\n ] = '/usr/share/keyrings/debian-archive-keyring.gpg'\n\n\ndef check_chroot_path(start_path, end_path):\n if os.path.ismount(start_path):\n print('%s is mounted' % start_path)\n else:\n print('%s is not mounted' % start_path)\n exit()\n complete_path = os.path.join(start_path, end_path)\n cmd = 'btrfs subvolume list \"%s\" > /dev/null 2>&1' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n if not p.returncode:\n print('E: %s already exist!' % complete_path)\n exit()\n else:\n cmd = 'btrfs subvolume create \"%s\"' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef configure_distro(distro='debian', arch='i386', release='unstable'):\n if distro not in ['ubuntu', 'debian']:\n print('Unknown Distro %s' % distro)\n return False\n if distro == 'ubuntu':\n if arch in ['amd64', 'i386']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://ports.ubuntu.com/ubuntu-ports'\n elif arch in ['powerpc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n distro_conf['components'] = ['main', 'restricted', 'universe',\n 'multiverse']\n distro_conf['keyring'\n ] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'\n elif distro == 'debian':\n distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'\n distro_conf['components'] = ['main', 'non-free', 'contrib']\n distro_conf['source_security_suites'] = 'RELEASE/updates'\n distro_conf['source_security_url'] = 'http://security.debian.org/'\n distro_conf['skip_updates'] = True\n if release in ['unstable', 'sid']:\n distro_conf['skip_security'] = True\n distro_conf['keyring'\n ] = '/usr/share/keyrings/debian-archive-keyring.gpg'\n\n\ndef check_chroot_path(start_path, end_path):\n if os.path.ismount(start_path):\n print('%s is mounted' % start_path)\n else:\n print('%s is not mounted' % start_path)\n exit()\n complete_path = os.path.join(start_path, end_path)\n cmd = 'btrfs subvolume list \"%s\" > /dev/null 2>&1' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n if not p.returncode:\n print('E: %s already exist!' % complete_path)\n exit()\n else:\n cmd = 'btrfs subvolume create \"%s\"' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n\n\nif __name__ == '__main__':\n if os.geteuid() != 0:\n print('You must be root')\n exit()\n parser = argparse.ArgumentParser(description='Create a Sbuild Chroot',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-d', '--distro', metavar='DISTRIBUTION', help=\n 'Install specific distro', default='debian')\n parser.add_argument('-a', '--arch', metavar='ARCHITECTURE', help=\n 'What architecture to select', default='i386')\n parser.add_argument('-r', '--release', help='What release to select',\n default='unstable')\n args = parser.parse_args()\n chroot_end_path = os.path.join(args.distro, '-'.join([args.release,\n args.arch]))\n check_chroot_path(chroot_start_path, chroot_end_path)\n configure_distro(args.distro, args.arch, args.release)\n pprint(distro_conf)\n cmd = ['sbuild-createchroot', '--verbose', '--keyring=%s' % distro_conf\n ['keyring'], '--arch=%s' % args.arch, '--include=%s' % include, \n '--components=%s' % ','.join(distro_conf['components']), args.\n release, os.path.join(chroot_start_path, chroot_end_path),\n distro_conf['debootstrap_mirror']]\n pprint(cmd)\n p = subprocess.Popen(cmd, cwd='/')\n p.wait()\n",
"step-3": "<mask token>\nchroot_start_path = '/srv/chroot'\nchroots_conf = '/etc/schroot/chroot.d'\nbuild_pkgs = 'build-essential fakeroot devscripts apt-utils'\ninclude = 'eatmydata,ccache,lintian'\ndistro_conf = {'debootstrap_mirror': None, 'components': None,\n 'source_security_suites': None, 'source_security_url': None,\n 'skip_updates': False, 'skip_security': False, 'keyring': None}\n\n\ndef configure_distro(distro='debian', arch='i386', release='unstable'):\n if distro not in ['ubuntu', 'debian']:\n print('Unknown Distro %s' % distro)\n return False\n if distro == 'ubuntu':\n if arch in ['amd64', 'i386']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://ports.ubuntu.com/ubuntu-ports'\n elif arch in ['powerpc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n distro_conf['components'] = ['main', 'restricted', 'universe',\n 'multiverse']\n distro_conf['keyring'\n ] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'\n elif distro == 'debian':\n distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'\n distro_conf['components'] = ['main', 'non-free', 'contrib']\n distro_conf['source_security_suites'] = 'RELEASE/updates'\n distro_conf['source_security_url'] = 'http://security.debian.org/'\n distro_conf['skip_updates'] = True\n if release in ['unstable', 'sid']:\n distro_conf['skip_security'] = True\n distro_conf['keyring'\n ] = '/usr/share/keyrings/debian-archive-keyring.gpg'\n\n\ndef check_chroot_path(start_path, end_path):\n if os.path.ismount(start_path):\n print('%s is mounted' % start_path)\n else:\n print('%s is not mounted' % start_path)\n exit()\n complete_path = os.path.join(start_path, end_path)\n cmd = 'btrfs subvolume list \"%s\" > /dev/null 2>&1' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n if not p.returncode:\n print('E: %s already exist!' % complete_path)\n exit()\n else:\n cmd = 'btrfs subvolume create \"%s\"' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n\n\nif __name__ == '__main__':\n if os.geteuid() != 0:\n print('You must be root')\n exit()\n parser = argparse.ArgumentParser(description='Create a Sbuild Chroot',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-d', '--distro', metavar='DISTRIBUTION', help=\n 'Install specific distro', default='debian')\n parser.add_argument('-a', '--arch', metavar='ARCHITECTURE', help=\n 'What architecture to select', default='i386')\n parser.add_argument('-r', '--release', help='What release to select',\n default='unstable')\n args = parser.parse_args()\n chroot_end_path = os.path.join(args.distro, '-'.join([args.release,\n args.arch]))\n check_chroot_path(chroot_start_path, chroot_end_path)\n configure_distro(args.distro, args.arch, args.release)\n pprint(distro_conf)\n cmd = ['sbuild-createchroot', '--verbose', '--keyring=%s' % distro_conf\n ['keyring'], '--arch=%s' % args.arch, '--include=%s' % include, \n '--components=%s' % ','.join(distro_conf['components']), args.\n release, os.path.join(chroot_start_path, chroot_end_path),\n distro_conf['debootstrap_mirror']]\n pprint(cmd)\n p = subprocess.Popen(cmd, cwd='/')\n p.wait()\n",
"step-4": "import sys, os\nimport argparse\nimport subprocess\nfrom pprint import pprint\nchroot_start_path = '/srv/chroot'\nchroots_conf = '/etc/schroot/chroot.d'\nbuild_pkgs = 'build-essential fakeroot devscripts apt-utils'\ninclude = 'eatmydata,ccache,lintian'\ndistro_conf = {'debootstrap_mirror': None, 'components': None,\n 'source_security_suites': None, 'source_security_url': None,\n 'skip_updates': False, 'skip_security': False, 'keyring': None}\n\n\ndef configure_distro(distro='debian', arch='i386', release='unstable'):\n if distro not in ['ubuntu', 'debian']:\n print('Unknown Distro %s' % distro)\n return False\n if distro == 'ubuntu':\n if arch in ['amd64', 'i386']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://ports.ubuntu.com/ubuntu-ports'\n elif arch in ['powerpc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n distro_conf['components'] = ['main', 'restricted', 'universe',\n 'multiverse']\n distro_conf['keyring'\n ] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'\n elif distro == 'debian':\n distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'\n distro_conf['components'] = ['main', 'non-free', 'contrib']\n distro_conf['source_security_suites'] = 'RELEASE/updates'\n distro_conf['source_security_url'] = 'http://security.debian.org/'\n distro_conf['skip_updates'] = True\n if release in ['unstable', 'sid']:\n distro_conf['skip_security'] = True\n distro_conf['keyring'\n ] = '/usr/share/keyrings/debian-archive-keyring.gpg'\n\n\ndef check_chroot_path(start_path, end_path):\n if os.path.ismount(start_path):\n print('%s is mounted' % start_path)\n else:\n print('%s is not mounted' % start_path)\n exit()\n complete_path = os.path.join(start_path, end_path)\n cmd = 'btrfs subvolume list \"%s\" > /dev/null 2>&1' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n if not p.returncode:\n print('E: %s already exist!' % complete_path)\n exit()\n else:\n cmd = 'btrfs subvolume create \"%s\"' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n\n\nif __name__ == '__main__':\n if os.geteuid() != 0:\n print('You must be root')\n exit()\n parser = argparse.ArgumentParser(description='Create a Sbuild Chroot',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-d', '--distro', metavar='DISTRIBUTION', help=\n 'Install specific distro', default='debian')\n parser.add_argument('-a', '--arch', metavar='ARCHITECTURE', help=\n 'What architecture to select', default='i386')\n parser.add_argument('-r', '--release', help='What release to select',\n default='unstable')\n args = parser.parse_args()\n chroot_end_path = os.path.join(args.distro, '-'.join([args.release,\n args.arch]))\n check_chroot_path(chroot_start_path, chroot_end_path)\n configure_distro(args.distro, args.arch, args.release)\n pprint(distro_conf)\n cmd = ['sbuild-createchroot', '--verbose', '--keyring=%s' % distro_conf\n ['keyring'], '--arch=%s' % args.arch, '--include=%s' % include, \n '--components=%s' % ','.join(distro_conf['components']), args.\n release, os.path.join(chroot_start_path, chroot_end_path),\n distro_conf['debootstrap_mirror']]\n pprint(cmd)\n p = subprocess.Popen(cmd, cwd='/')\n p.wait()\n",
"step-5": "#!/usr/bin/python\n\nimport sys,os\nimport argparse\nimport subprocess\nfrom pprint import pprint\n\nchroot_start_path=\"/srv/chroot\"\nchroots_conf=\"/etc/schroot/chroot.d\"\n\nbuild_pkgs = 'build-essential fakeroot devscripts apt-utils'\ninclude = 'eatmydata,ccache,lintian'\ndistro_conf={\n 'debootstrap_mirror':None,\n 'components':None,\n 'source_security_suites':None,\n 'source_security_url':None,\n 'skip_updates':False,\n 'skip_security':False,\n 'keyring':None,\n}\ndef configure_distro(distro=\"debian\",arch=\"i386\",release=\"unstable\"):\n\n if distro not in ['ubuntu','debian']:\n print(\"Unknown Distro %s\" % distro)\n return False\n\n if (distro == 'ubuntu'):\n if ( arch in ['amd64','i386'] ):\n distro_conf['debootstrap_mirror'] = \"http://archive.ubuntu.com/ubuntu\"\n elif ( arch in ['armel', 'hppa', 'ia64' , 'lpia', 'sparc'] ):\n distro_conf['debootstrap_mirror'] = \"http://ports.ubuntu.com/ubuntu-ports\"\n elif ( arch in ['powerpc'] ):\n distro_conf['debootstrap_mirror'] = \"http://archive.ubuntu.com/ubuntu\"\n\n distro_conf['components'] = ['main','restricted', 'universe', 'multiverse']\n\n distro_conf['keyring'] = \"/usr/share/keyrings/ubuntu-archive-keyring.gpg\"\n elif (distro == 'debian'):\n distro_conf['debootstrap_mirror'] = \"http://ftp.debian.org/debian\"\n distro_conf['components'] = ['main','non-free','contrib']\n distro_conf['source_security_suites'] = \"RELEASE/updates\"\n distro_conf['source_security_url'] = \"http://security.debian.org/\"\n #Debian only performs security updates\n distro_conf['skip_updates'] = True\n\n if (release in ['unstable','sid'] ):\n distro_conf['skip_security'] = True\n\n distro_conf['keyring'] = \"/usr/share/keyrings/debian-archive-keyring.gpg\"\n\n\ndef check_chroot_path(start_path,end_path):\n if( os.path.ismount( start_path ) ) :\n print(\"%s is mounted\" % start_path)\n else:\n print(\"%s is not mounted\" % start_path)\n exit()\n\n complete_path = os.path.join(start_path,end_path)\n cmd = 'btrfs subvolume list \"%s\" > /dev/null 2>&1' % complete_path\n p = subprocess.Popen(cmd,cwd='/',shell=True)\n p.wait()\n print(p.returncode)\n if (not p.returncode):\n print(\"E: %s already exist!\"%complete_path)\n exit()\n else:\n cmd = 'btrfs subvolume create \"%s\"' % complete_path\n p = subprocess.Popen(cmd,cwd='/',shell=True)\n p.wait()\n print(p.returncode)\n \nif __name__ == \"__main__\":\n\n if os.geteuid() != 0:\n print(\"You must be root\")\n exit()\n\n parser = argparse.ArgumentParser(description=\"Create a Sbuild Chroot\",formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('-d','--distro',metavar=\"DISTRIBUTION\",help=\"Install specific distro\",default=\"debian\")\n parser.add_argument('-a','--arch',metavar=\"ARCHITECTURE\",help=\"What architecture to select\",default=\"i386\")\n parser.add_argument('-r','--release',help=\"What release to select\",default=\"unstable\")\n\n args = parser.parse_args()\n chroot_end_path = os.path.join( args.distro , \"-\".join([args.release,args.arch]) )\n check_chroot_path(chroot_start_path,chroot_end_path)\n\n configure_distro(args.distro,args.arch,args.release)\n\n pprint(distro_conf)\n cmd = [ 'sbuild-createchroot' ,\n '--verbose',\n '--keyring=%s' % distro_conf['keyring'] ,\n '--arch=%s' % args.arch ,\n '--include=%s' % include,\n '--components=%s' % \",\".join(distro_conf['components']),\n args.release ,\n os.path.join(chroot_start_path,chroot_end_path),\n distro_conf['debootstrap_mirror'],\n ]\n pprint(cmd)\n p = subprocess.Popen(cmd,cwd='/')\n p.wait()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from scipy.misc import imread
import os
import numpy as np
files = [ "oracle.PNG",
"SQL.jpg" ]
def plotImage(f):
folder = "C:/temp/"
im = imread(os.path.join(folder, f)).astype(np.float32) / 255
plt.imshow(im)
a = plt.gca()
a.get_xaxis().set_visible(False) # We don't need axis ticks
a.get_yaxis().set_visible(False)
pp = PdfPages("c:/temp/page1.pdf")
plt.subplot(121)
plotImage(files[0])
plt.subplot(122)
plotImage(files[1])
pp.savefig(plt.gcf()) # This generates page 1
pp.savefig(plt.gcf()) # This generates page 2
pp.close()
|
normal
|
{
"blob_id": "146db68fb84569b914fa741457c595108088dc63",
"index": 7199,
"step-1": "<mask token>\n\n\ndef plotImage(f):\n folder = 'C:/temp/'\n im = imread(os.path.join(folder, f)).astype(np.float32) / 255\n plt.imshow(im)\n a = plt.gca()\n a.get_xaxis().set_visible(False)\n a.get_yaxis().set_visible(False)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plotImage(f):\n folder = 'C:/temp/'\n im = imread(os.path.join(folder, f)).astype(np.float32) / 255\n plt.imshow(im)\n a = plt.gca()\n a.get_xaxis().set_visible(False)\n a.get_yaxis().set_visible(False)\n\n\n<mask token>\nplt.subplot(121)\nplotImage(files[0])\nplt.subplot(122)\nplotImage(files[1])\npp.savefig(plt.gcf())\npp.savefig(plt.gcf())\npp.close()\n",
"step-3": "<mask token>\nfiles = ['oracle.PNG', 'SQL.jpg']\n\n\ndef plotImage(f):\n folder = 'C:/temp/'\n im = imread(os.path.join(folder, f)).astype(np.float32) / 255\n plt.imshow(im)\n a = plt.gca()\n a.get_xaxis().set_visible(False)\n a.get_yaxis().set_visible(False)\n\n\npp = PdfPages('c:/temp/page1.pdf')\nplt.subplot(121)\nplotImage(files[0])\nplt.subplot(122)\nplotImage(files[1])\npp.savefig(plt.gcf())\npp.savefig(plt.gcf())\npp.close()\n",
"step-4": "from matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib.pyplot as plt\nfrom scipy.misc import imread\nimport os\nimport numpy as np\nfiles = ['oracle.PNG', 'SQL.jpg']\n\n\ndef plotImage(f):\n folder = 'C:/temp/'\n im = imread(os.path.join(folder, f)).astype(np.float32) / 255\n plt.imshow(im)\n a = plt.gca()\n a.get_xaxis().set_visible(False)\n a.get_yaxis().set_visible(False)\n\n\npp = PdfPages('c:/temp/page1.pdf')\nplt.subplot(121)\nplotImage(files[0])\nplt.subplot(122)\nplotImage(files[1])\npp.savefig(plt.gcf())\npp.savefig(plt.gcf())\npp.close()\n",
"step-5": "from matplotlib.backends.backend_pdf import PdfPages\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.misc import imread\r\nimport os\r\nimport numpy as np\r\n\r\nfiles = [ \"oracle.PNG\",\r\n \"SQL.jpg\" ]\r\ndef plotImage(f):\r\n folder = \"C:/temp/\"\r\n im = imread(os.path.join(folder, f)).astype(np.float32) / 255\r\n plt.imshow(im)\r\n a = plt.gca()\r\n a.get_xaxis().set_visible(False) # We don't need axis ticks\r\n a.get_yaxis().set_visible(False)\r\n\r\npp = PdfPages(\"c:/temp/page1.pdf\")\r\nplt.subplot(121)\r\nplotImage(files[0])\r\nplt.subplot(122)\r\nplotImage(files[1])\r\npp.savefig(plt.gcf()) # This generates page 1\r\npp.savefig(plt.gcf()) # This generates page 2\r\n\r\npp.close()\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""
《Engineering a Compiler》
即《编译器设计第二版》
https://www.clear.rice.edu/comp412/
"""
# 《parsing-techniques》 讲前端
## http://parsing-techniques.duguying.net/ebook/2/1/3.html
"""
前端看Parsing Techniques,后端看鲸书,都是最好的。
"""
# 《essential of programming language》
# sicp
"""
如果对编程语言设计方面感兴趣,想对编程语言和编译器设计有大概的概念,可以看看PLP。
想快速实践可以看《自制脚本语言》,《engineer a compiler》和《编程语言实现模式》。
还是那句话,多做少说,实现一遍,比啥都好使。
"""
"""
Flex&Bison 算是《lex与yacc》的后一版。很详细的介绍了Flex与Bison这两个工具。
书的后面章节从无到有的构造了一个SQL的解释器。
"""
## 有何用
"""
作者:蓝色
链接:https://www.zhihu.com/question/21755487/answer/30574966
你现在觉得枯燥,我想既跟编译原理本身比较抽象的知识有关,也跟讲述者有关。
一个好的讲述者会试着化抽象为形象,以丰富生动的例子来为你解释。而编译原理是否有用?
我认为这门课是一门真正与代码做斗争的课程,对于一个有至于追求技术的人是不容错过的课程,
而且编译原理可以说是一个计算机科学的缩影。你学习它更多的是去追寻程序设计语言的本质,
如它在寄存器分配中将会使用到贪心算法,死代码消除中将会使用到图论算法,
数据流分析中使用到的Fixed-Point Algorithm,词法分析与语法分析中使用到有限状态机与递归下降这样的
重要思想等等,也许你以后不会成为一个编译器开发工作者,但是编译原理的学习中所获,所思的东西足以让你
终生获益。同时,学完这门课程,对于一个有Geek精神的开发者,他会开始运用自己的所学开发享受“上帝”的感觉,
去尝试创造一门语言,我想这种感觉不是每门课程都能带给你的。
我相信,当你真正完成这个过程后,你对你所写的程序、程序语言都会有更深的本质认识,
这样的认识也会让你站的高度完全不同,如果你真的学的好,我想别人看到的是语法,
你看到的是背后的实现,这样的感觉真的很好的,不信你试试。
有了这么多好处,无论如何都有足够的理由支撑你好好学习了。
自从学会了编译原理,我用编译原理的眼光来看带我自己的代码,写出了优秀的单元测试。
文本编辑器的代码高亮功能,代码提示功能就是运用编译原理知识,如果自己写一个是不是很好玩?
"""
## 学习目标
"""
对于普通程序员,我认为编译原理这门课主要掌握几点就够用了:
1. 词法分析方面,掌握正则表达式,了解dfa/nfa。
2. Parsing 方面,能读懂BNF,知道AST,会写简单的递归下降parser,会用antlr之类的parser generator。
3. 优化方面,知道现代编译器的优化能力有多强,知道如何配合编译器写出高效易读的代码,
避免试图outsmart编译器。
4. 会实现简单的虚拟机(stack-based,不带GC),并把四则运算表达式翻译为虚拟机指令。
作者:陈硕
链接:https://www.zhihu.com/question/21755487/answer/30585811
"""
|
flexible
|
{
"blob_id": "5663ded291405bcf0d410041485487bb17560223",
"index": 3106,
"step-1": "<mask token>\n",
"step-2": "\n\"\"\"\n《Engineering a Compiler》\n即《编译器设计第二版》\nhttps://www.clear.rice.edu/comp412/\n\"\"\"\n# 《parsing-techniques》 讲前端\n## http://parsing-techniques.duguying.net/ebook/2/1/3.html\n\n\"\"\"\n前端看Parsing Techniques,后端看鲸书,都是最好的。\n\"\"\"\n\n# 《essential of programming language》\n# sicp\n\n\"\"\"\n如果对编程语言设计方面感兴趣,想对编程语言和编译器设计有大概的概念,可以看看PLP。\n想快速实践可以看《自制脚本语言》,《engineer a compiler》和《编程语言实现模式》。\n\n还是那句话,多做少说,实现一遍,比啥都好使。\n\"\"\"\n\n\"\"\"\nFlex&Bison 算是《lex与yacc》的后一版。很详细的介绍了Flex与Bison这两个工具。\n书的后面章节从无到有的构造了一个SQL的解释器。\n\"\"\"\n\n## 有何用\n\"\"\"\n作者:蓝色\n链接:https://www.zhihu.com/question/21755487/answer/30574966\n\n\n你现在觉得枯燥,我想既跟编译原理本身比较抽象的知识有关,也跟讲述者有关。\n一个好的讲述者会试着化抽象为形象,以丰富生动的例子来为你解释。而编译原理是否有用?\n我认为这门课是一门真正与代码做斗争的课程,对于一个有至于追求技术的人是不容错过的课程,\n而且编译原理可以说是一个计算机科学的缩影。你学习它更多的是去追寻程序设计语言的本质,\n如它在寄存器分配中将会使用到贪心算法,死代码消除中将会使用到图论算法,\n数据流分析中使用到的Fixed-Point Algorithm,词法分析与语法分析中使用到有限状态机与递归下降这样的\n重要思想等等,也许你以后不会成为一个编译器开发工作者,但是编译原理的学习中所获,所思的东西足以让你\n终生获益。同时,学完这门课程,对于一个有Geek精神的开发者,他会开始运用自己的所学开发享受“上帝”的感觉,\n去尝试创造一门语言,我想这种感觉不是每门课程都能带给你的。\n我相信,当你真正完成这个过程后,你对你所写的程序、程序语言都会有更深的本质认识,\n这样的认识也会让你站的高度完全不同,如果你真的学的好,我想别人看到的是语法,\n你看到的是背后的实现,这样的感觉真的很好的,不信你试试。\n有了这么多好处,无论如何都有足够的理由支撑你好好学习了。\n\n自从学会了编译原理,我用编译原理的眼光来看带我自己的代码,写出了优秀的单元测试。\n\n文本编辑器的代码高亮功能,代码提示功能就是运用编译原理知识,如果自己写一个是不是很好玩?\n\n\"\"\"\n## 学习目标\n\"\"\"\n对于普通程序员,我认为编译原理这门课主要掌握几点就够用了:\n1. 词法分析方面,掌握正则表达式,了解dfa/nfa。\n\n2. Parsing 方面,能读懂BNF,知道AST,会写简单的递归下降parser,会用antlr之类的parser generator。\n\n3. 优化方面,知道现代编译器的优化能力有多强,知道如何配合编译器写出高效易读的代码,\n避免试图outsmart编译器。\n\n4. 会实现简单的虚拟机(stack-based,不带GC),并把四则运算表达式翻译为虚拟机指令。\n\n作者:陈硕\n链接:https://www.zhihu.com/question/21755487/answer/30585811\n\"\"\"\n\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import struct
def parse(message):
return IGENMessage.from_bytes(message)
class IGENMessage(object):
def __init__(self):
self.serial = None
self.temperature = None
self.pv1 = 0
self.pv2 = 0
self.pv3 = 0
self.pa1 = 0
self.pa2 = 0
self.pa3 = 0
self.ov1 = 0
self.ov2 = 0
self.ov3 = 0
self.oa1 = 0
self.oa2 = 0
self.oa3 = 0
self.oHz = 0
self.op1 = 0
self.op2 = 0
self.op3 = 0
self.energy_today = None
self.energy_overall = None
self.operational_hours = None
@classmethod
def from_bytes(cls, data):
if len(data) != 103:
raise Exception('Packet should be exactly 103 bytes')
self = cls()
parsed = struct.unpack('!17x 14s H HHH HHH HHH HHH H HHH 4x H 2x H 2x H 24x', data)
self.serial = parsed[0].decode('ascii')
self.temperature = parsed[1] / 10
self.pv1 = parsed[2] / 10
self.pv2 = parsed[3] / 10
self.pv3 = parsed[4] / 10
self.pa1 = parsed[5] / 10
self.pa2 = parsed[6] / 10
self.pa3 = parsed[7] / 10
self.oa1 = parsed[8] / 10
self.oa2 = parsed[9] / 10
self.oa3 = parsed[10] / 10
self.ov1 = parsed[11] / 10
self.ov2 = parsed[12] / 10
self.ov3 = parsed[13] / 10
self.oHz = parsed[14] / 100
self.op1 = parsed[15]
self.op2 = parsed[16]
self.op3 = parsed[17]
self.energy_today = parsed[18] / 100
self.energy_overall = parsed[19] / 10
self.operational_hours = parsed[20]
return self
def outputs(self):
return [
(self.ov1, self.oa1, self.op1),
(self.ov2, self.oa2, self.op2),
(self.ov3, self.oa3, self.op3)
]
def inputs(self):
return [
(self.pv1, self.pa1),
(self.pv2, self.pa2),
(self.pv3, self.pa3)
]
def report(self):
print("Logger: {}".format(self.serial))
print("Temperature: {} degrees celcius".format(self.temperature))
print()
print("Inputs: ")
print(" Channel 1: {:6.2f} V {:5.2f} A".format(self.pv1, self.pa1))
print(" Channel 2: {:6.2f} V {:5.2f} A".format(self.pv2, self.pa2))
print(" Channel 3: {:6.2f} V {:5.2f} A".format(self.pv3, self.pa3))
print()
print("Outputs: ({} Hz)".format(self.oHz))
print(" L1: {:6.2f} V {:5.2f} A {:5.0f} W".format(self.ov1, self.oa1, self.op1))
print(" L2: {:6.2f} V {:5.2f} A {:5.0f} W".format(self.ov2, self.oa2, self.op2))
print(" L3: {:6.2f} V {:5.2f} A {:5.0f} W".format(self.ov3, self.oa3, self.op3))
print()
print("Energy today: {:8.1f} kWh".format(self.energy_today))
print("Energy overall: {:8.1f} kWh".format(self.energy_overall))
print("Operational hours: {}".format(self.operational_hours))
def __repr__(self):
total_power = self.op1 + self.op2 + self.op3
return "<IGENMessage {} watt ({} kWh today)>".format(total_power, self.energy_today)
|
normal
|
{
"blob_id": "5df42a024e1edbe5cc977a814efe580db04b8b76",
"index": 2386,
"step-1": "<mask token>\n\n\nclass IGENMessage(object):\n\n def __init__(self):\n self.serial = None\n self.temperature = None\n self.pv1 = 0\n self.pv2 = 0\n self.pv3 = 0\n self.pa1 = 0\n self.pa2 = 0\n self.pa3 = 0\n self.ov1 = 0\n self.ov2 = 0\n self.ov3 = 0\n self.oa1 = 0\n self.oa2 = 0\n self.oa3 = 0\n self.oHz = 0\n self.op1 = 0\n self.op2 = 0\n self.op3 = 0\n self.energy_today = None\n self.energy_overall = None\n self.operational_hours = None\n <mask token>\n\n def outputs(self):\n return [(self.ov1, self.oa1, self.op1), (self.ov2, self.oa2, self.\n op2), (self.ov3, self.oa3, self.op3)]\n\n def inputs(self):\n return [(self.pv1, self.pa1), (self.pv2, self.pa2), (self.pv3, self\n .pa3)]\n\n def report(self):\n print('Logger: {}'.format(self.serial))\n print('Temperature: {} degrees celcius'.format(self.temperature))\n print()\n print('Inputs: ')\n print(' Channel 1: {:6.2f} V {:5.2f} A'.format(self.pv1, self.pa1))\n print(' Channel 2: {:6.2f} V {:5.2f} A'.format(self.pv2, self.pa2))\n print(' Channel 3: {:6.2f} V {:5.2f} A'.format(self.pv3, self.pa3))\n print()\n print('Outputs: ({} Hz)'.format(self.oHz))\n print(' L1: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov1,\n self.oa1, self.op1))\n print(' L2: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov2,\n self.oa2, self.op2))\n print(' L3: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov3,\n self.oa3, self.op3))\n print()\n print('Energy today: {:8.1f} kWh'.format(self.energy_today))\n print('Energy overall: {:8.1f} kWh'.format(self.energy_overall))\n print('Operational hours: {}'.format(self.operational_hours))\n\n def __repr__(self):\n total_power = self.op1 + self.op2 + self.op3\n return '<IGENMessage {} watt ({} kWh today)>'.format(total_power,\n self.energy_today)\n",
"step-2": "<mask token>\n\n\nclass IGENMessage(object):\n\n def __init__(self):\n self.serial = None\n self.temperature = None\n self.pv1 = 0\n self.pv2 = 0\n self.pv3 = 0\n self.pa1 = 0\n self.pa2 = 0\n self.pa3 = 0\n self.ov1 = 0\n self.ov2 = 0\n self.ov3 = 0\n self.oa1 = 0\n self.oa2 = 0\n self.oa3 = 0\n self.oHz = 0\n self.op1 = 0\n self.op2 = 0\n self.op3 = 0\n self.energy_today = None\n self.energy_overall = None\n self.operational_hours = None\n\n @classmethod\n def from_bytes(cls, data):\n if len(data) != 103:\n raise Exception('Packet should be exactly 103 bytes')\n self = cls()\n parsed = struct.unpack(\n '!17x 14s H HHH HHH HHH HHH H HHH 4x H 2x H 2x H 24x', data)\n self.serial = parsed[0].decode('ascii')\n self.temperature = parsed[1] / 10\n self.pv1 = parsed[2] / 10\n self.pv2 = parsed[3] / 10\n self.pv3 = parsed[4] / 10\n self.pa1 = parsed[5] / 10\n self.pa2 = parsed[6] / 10\n self.pa3 = parsed[7] / 10\n self.oa1 = parsed[8] / 10\n self.oa2 = parsed[9] / 10\n self.oa3 = parsed[10] / 10\n self.ov1 = parsed[11] / 10\n self.ov2 = parsed[12] / 10\n self.ov3 = parsed[13] / 10\n self.oHz = parsed[14] / 100\n self.op1 = parsed[15]\n self.op2 = parsed[16]\n self.op3 = parsed[17]\n self.energy_today = parsed[18] / 100\n self.energy_overall = parsed[19] / 10\n self.operational_hours = parsed[20]\n return self\n\n def outputs(self):\n return [(self.ov1, self.oa1, self.op1), (self.ov2, self.oa2, self.\n op2), (self.ov3, self.oa3, self.op3)]\n\n def inputs(self):\n return [(self.pv1, self.pa1), (self.pv2, self.pa2), (self.pv3, self\n .pa3)]\n\n def report(self):\n print('Logger: {}'.format(self.serial))\n print('Temperature: {} degrees celcius'.format(self.temperature))\n print()\n print('Inputs: ')\n print(' Channel 1: {:6.2f} V {:5.2f} A'.format(self.pv1, self.pa1))\n print(' Channel 2: {:6.2f} V {:5.2f} A'.format(self.pv2, self.pa2))\n print(' Channel 3: {:6.2f} V {:5.2f} A'.format(self.pv3, self.pa3))\n print()\n print('Outputs: ({} Hz)'.format(self.oHz))\n print(' L1: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov1,\n self.oa1, self.op1))\n print(' L2: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov2,\n self.oa2, self.op2))\n print(' L3: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov3,\n self.oa3, self.op3))\n print()\n print('Energy today: {:8.1f} kWh'.format(self.energy_today))\n print('Energy overall: {:8.1f} kWh'.format(self.energy_overall))\n print('Operational hours: {}'.format(self.operational_hours))\n\n def __repr__(self):\n total_power = self.op1 + self.op2 + self.op3\n return '<IGENMessage {} watt ({} kWh today)>'.format(total_power,\n self.energy_today)\n",
"step-3": "<mask token>\n\n\ndef parse(message):\n return IGENMessage.from_bytes(message)\n\n\nclass IGENMessage(object):\n\n def __init__(self):\n self.serial = None\n self.temperature = None\n self.pv1 = 0\n self.pv2 = 0\n self.pv3 = 0\n self.pa1 = 0\n self.pa2 = 0\n self.pa3 = 0\n self.ov1 = 0\n self.ov2 = 0\n self.ov3 = 0\n self.oa1 = 0\n self.oa2 = 0\n self.oa3 = 0\n self.oHz = 0\n self.op1 = 0\n self.op2 = 0\n self.op3 = 0\n self.energy_today = None\n self.energy_overall = None\n self.operational_hours = None\n\n @classmethod\n def from_bytes(cls, data):\n if len(data) != 103:\n raise Exception('Packet should be exactly 103 bytes')\n self = cls()\n parsed = struct.unpack(\n '!17x 14s H HHH HHH HHH HHH H HHH 4x H 2x H 2x H 24x', data)\n self.serial = parsed[0].decode('ascii')\n self.temperature = parsed[1] / 10\n self.pv1 = parsed[2] / 10\n self.pv2 = parsed[3] / 10\n self.pv3 = parsed[4] / 10\n self.pa1 = parsed[5] / 10\n self.pa2 = parsed[6] / 10\n self.pa3 = parsed[7] / 10\n self.oa1 = parsed[8] / 10\n self.oa2 = parsed[9] / 10\n self.oa3 = parsed[10] / 10\n self.ov1 = parsed[11] / 10\n self.ov2 = parsed[12] / 10\n self.ov3 = parsed[13] / 10\n self.oHz = parsed[14] / 100\n self.op1 = parsed[15]\n self.op2 = parsed[16]\n self.op3 = parsed[17]\n self.energy_today = parsed[18] / 100\n self.energy_overall = parsed[19] / 10\n self.operational_hours = parsed[20]\n return self\n\n def outputs(self):\n return [(self.ov1, self.oa1, self.op1), (self.ov2, self.oa2, self.\n op2), (self.ov3, self.oa3, self.op3)]\n\n def inputs(self):\n return [(self.pv1, self.pa1), (self.pv2, self.pa2), (self.pv3, self\n .pa3)]\n\n def report(self):\n print('Logger: {}'.format(self.serial))\n print('Temperature: {} degrees celcius'.format(self.temperature))\n print()\n print('Inputs: ')\n print(' Channel 1: {:6.2f} V {:5.2f} A'.format(self.pv1, self.pa1))\n print(' Channel 2: {:6.2f} V {:5.2f} A'.format(self.pv2, self.pa2))\n print(' Channel 3: {:6.2f} V {:5.2f} A'.format(self.pv3, self.pa3))\n print()\n print('Outputs: ({} Hz)'.format(self.oHz))\n print(' L1: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov1,\n self.oa1, self.op1))\n print(' L2: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov2,\n self.oa2, self.op2))\n print(' L3: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov3,\n self.oa3, self.op3))\n print()\n print('Energy today: {:8.1f} kWh'.format(self.energy_today))\n print('Energy overall: {:8.1f} kWh'.format(self.energy_overall))\n print('Operational hours: {}'.format(self.operational_hours))\n\n def __repr__(self):\n total_power = self.op1 + self.op2 + self.op3\n return '<IGENMessage {} watt ({} kWh today)>'.format(total_power,\n self.energy_today)\n",
"step-4": "import struct\n\n\ndef parse(message):\n return IGENMessage.from_bytes(message)\n\n\nclass IGENMessage(object):\n\n def __init__(self):\n self.serial = None\n self.temperature = None\n self.pv1 = 0\n self.pv2 = 0\n self.pv3 = 0\n self.pa1 = 0\n self.pa2 = 0\n self.pa3 = 0\n self.ov1 = 0\n self.ov2 = 0\n self.ov3 = 0\n self.oa1 = 0\n self.oa2 = 0\n self.oa3 = 0\n self.oHz = 0\n self.op1 = 0\n self.op2 = 0\n self.op3 = 0\n self.energy_today = None\n self.energy_overall = None\n self.operational_hours = None\n\n @classmethod\n def from_bytes(cls, data):\n if len(data) != 103:\n raise Exception('Packet should be exactly 103 bytes')\n self = cls()\n parsed = struct.unpack(\n '!17x 14s H HHH HHH HHH HHH H HHH 4x H 2x H 2x H 24x', data)\n self.serial = parsed[0].decode('ascii')\n self.temperature = parsed[1] / 10\n self.pv1 = parsed[2] / 10\n self.pv2 = parsed[3] / 10\n self.pv3 = parsed[4] / 10\n self.pa1 = parsed[5] / 10\n self.pa2 = parsed[6] / 10\n self.pa3 = parsed[7] / 10\n self.oa1 = parsed[8] / 10\n self.oa2 = parsed[9] / 10\n self.oa3 = parsed[10] / 10\n self.ov1 = parsed[11] / 10\n self.ov2 = parsed[12] / 10\n self.ov3 = parsed[13] / 10\n self.oHz = parsed[14] / 100\n self.op1 = parsed[15]\n self.op2 = parsed[16]\n self.op3 = parsed[17]\n self.energy_today = parsed[18] / 100\n self.energy_overall = parsed[19] / 10\n self.operational_hours = parsed[20]\n return self\n\n def outputs(self):\n return [(self.ov1, self.oa1, self.op1), (self.ov2, self.oa2, self.\n op2), (self.ov3, self.oa3, self.op3)]\n\n def inputs(self):\n return [(self.pv1, self.pa1), (self.pv2, self.pa2), (self.pv3, self\n .pa3)]\n\n def report(self):\n print('Logger: {}'.format(self.serial))\n print('Temperature: {} degrees celcius'.format(self.temperature))\n print()\n print('Inputs: ')\n print(' Channel 1: {:6.2f} V {:5.2f} A'.format(self.pv1, self.pa1))\n print(' Channel 2: {:6.2f} V {:5.2f} A'.format(self.pv2, self.pa2))\n print(' Channel 3: {:6.2f} V {:5.2f} A'.format(self.pv3, self.pa3))\n print()\n print('Outputs: ({} Hz)'.format(self.oHz))\n print(' L1: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov1,\n self.oa1, self.op1))\n print(' L2: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov2,\n self.oa2, self.op2))\n print(' L3: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov3,\n self.oa3, self.op3))\n print()\n print('Energy today: {:8.1f} kWh'.format(self.energy_today))\n print('Energy overall: {:8.1f} kWh'.format(self.energy_overall))\n print('Operational hours: {}'.format(self.operational_hours))\n\n def __repr__(self):\n total_power = self.op1 + self.op2 + self.op3\n return '<IGENMessage {} watt ({} kWh today)>'.format(total_power,\n self.energy_today)\n",
"step-5": "import struct\n\n\ndef parse(message):\n return IGENMessage.from_bytes(message)\n\n\nclass IGENMessage(object):\n def __init__(self):\n self.serial = None\n self.temperature = None\n self.pv1 = 0\n self.pv2 = 0\n self.pv3 = 0\n self.pa1 = 0\n self.pa2 = 0\n self.pa3 = 0\n self.ov1 = 0\n self.ov2 = 0\n self.ov3 = 0\n self.oa1 = 0\n self.oa2 = 0\n self.oa3 = 0\n self.oHz = 0\n self.op1 = 0\n self.op2 = 0\n self.op3 = 0\n self.energy_today = None\n self.energy_overall = None\n self.operational_hours = None\n\n @classmethod\n def from_bytes(cls, data):\n if len(data) != 103:\n raise Exception('Packet should be exactly 103 bytes')\n\n self = cls()\n\n parsed = struct.unpack('!17x 14s H HHH HHH HHH HHH H HHH 4x H 2x H 2x H 24x', data)\n self.serial = parsed[0].decode('ascii')\n\n self.temperature = parsed[1] / 10\n\n self.pv1 = parsed[2] / 10\n self.pv2 = parsed[3] / 10\n self.pv3 = parsed[4] / 10\n\n self.pa1 = parsed[5] / 10\n self.pa2 = parsed[6] / 10\n self.pa3 = parsed[7] / 10\n\n self.oa1 = parsed[8] / 10\n self.oa2 = parsed[9] / 10\n self.oa3 = parsed[10] / 10\n\n self.ov1 = parsed[11] / 10\n self.ov2 = parsed[12] / 10\n self.ov3 = parsed[13] / 10\n\n self.oHz = parsed[14] / 100\n\n self.op1 = parsed[15]\n self.op2 = parsed[16]\n self.op3 = parsed[17]\n\n self.energy_today = parsed[18] / 100\n self.energy_overall = parsed[19] / 10\n\n self.operational_hours = parsed[20]\n\n return self\n\n def outputs(self):\n return [\n (self.ov1, self.oa1, self.op1),\n (self.ov2, self.oa2, self.op2),\n (self.ov3, self.oa3, self.op3)\n ]\n\n def inputs(self):\n return [\n (self.pv1, self.pa1),\n (self.pv2, self.pa2),\n (self.pv3, self.pa3)\n ]\n\n def report(self):\n print(\"Logger: {}\".format(self.serial))\n print(\"Temperature: {} degrees celcius\".format(self.temperature))\n print()\n print(\"Inputs: \")\n print(\" Channel 1: {:6.2f} V {:5.2f} A\".format(self.pv1, self.pa1))\n print(\" Channel 2: {:6.2f} V {:5.2f} A\".format(self.pv2, self.pa2))\n print(\" Channel 3: {:6.2f} V {:5.2f} A\".format(self.pv3, self.pa3))\n print()\n print(\"Outputs: ({} Hz)\".format(self.oHz))\n print(\" L1: {:6.2f} V {:5.2f} A {:5.0f} W\".format(self.ov1, self.oa1, self.op1))\n print(\" L2: {:6.2f} V {:5.2f} A {:5.0f} W\".format(self.ov2, self.oa2, self.op2))\n print(\" L3: {:6.2f} V {:5.2f} A {:5.0f} W\".format(self.ov3, self.oa3, self.op3))\n print()\n print(\"Energy today: {:8.1f} kWh\".format(self.energy_today))\n print(\"Energy overall: {:8.1f} kWh\".format(self.energy_overall))\n print(\"Operational hours: {}\".format(self.operational_hours))\n\n def __repr__(self):\n total_power = self.op1 + self.op2 + self.op3\n return \"<IGENMessage {} watt ({} kWh today)>\".format(total_power, self.energy_today)\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
def GetPage5Docx(fileNameWithPath):
word = Dispatch('Word.Application')
word.Visible = False
word = word.Documents.Open(fileNameWithPath)
word.Repaginate()
num_of_sheets = word.ComputeStatistics(2)
return num_of_sheets
def GetPage5PPT(fileNameWithPath):
Application = Dispatch('PowerPoint.Application')
Presentation = Application.Presentations.Open(fileNameWithPath,
WithWindow=False)
slide_count = len(Presentation.Slides)
Presentation.Close()
return slide_count
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def GetPage5Docx(fileNameWithPath):
word = Dispatch('Word.Application')
word.Visible = False
word = word.Documents.Open(fileNameWithPath)
word.Repaginate()
num_of_sheets = word.ComputeStatistics(2)
return num_of_sheets
def GetPage5PPT(fileNameWithPath):
Application = Dispatch('PowerPoint.Application')
Presentation = Application.Presentations.Open(fileNameWithPath,
WithWindow=False)
slide_count = len(Presentation.Slides)
Presentation.Close()
return slide_count
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile = root + '\\Counter.txt'
with open(StatisticFile, 'w') as fid:
pass
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile = root + '\\Counter.txt'
with open(StatisticFile, 'a+') as fid:
pagesTotal = 0
for name in files:
nameFile = os.path.join(root, name)
mainFile, appdFile = os.path.splitext(nameFile)
mainFolder, fullFile = os.path.split(nameFile)
if appdFile == '.docx' and fullFile[0:2] != '~$':
pagesThis = GetPage5Docx(nameFile)
fid.writelines(fullFile + ' ' + str(pagesThis) + '\n')
pagesTotal += pagesThis
fid.writelines('All Docx files in this folder have the pages: ' +
str(pagesTotal) + '\n\n\n\n\n\n')
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile = root + '\\Counter.txt'
with open(StatisticFile, 'a+') as fid:
pagesTotal = 0
for name in files:
nameFile = os.path.join(root, name)
mainFile, appdFile = os.path.splitext(nameFile)
mainFolder, fullFile = os.path.split(nameFile)
if (appdFile == '.pptx' or appdFile == '.ppt') and fullFile[0:2
] != '~$':
pagesThis = GetPage5PPT(nameFile)
fid.writelines(fullFile + ' ' + str(pagesThis) + '\n')
pagesTotal += pagesThis
fid.writelines(
'All PPT/PPTX files in this folder have the pages: ' + str(
pagesTotal) + '\n\n\n\n\n\n')
print('Done. Please check it!')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
folderDealTmp = input('Please input the absolute path of the father-folder:\n')
folderDeal = folderDealTmp.replace('\\', '\\\\')
def GetPage5Docx(fileNameWithPath):
word = Dispatch('Word.Application')
word.Visible = False
word = word.Documents.Open(fileNameWithPath)
word.Repaginate()
num_of_sheets = word.ComputeStatistics(2)
return num_of_sheets
def GetPage5PPT(fileNameWithPath):
Application = Dispatch('PowerPoint.Application')
Presentation = Application.Presentations.Open(fileNameWithPath,
WithWindow=False)
slide_count = len(Presentation.Slides)
Presentation.Close()
return slide_count
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile = root + '\\Counter.txt'
with open(StatisticFile, 'w') as fid:
pass
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile = root + '\\Counter.txt'
with open(StatisticFile, 'a+') as fid:
pagesTotal = 0
for name in files:
nameFile = os.path.join(root, name)
mainFile, appdFile = os.path.splitext(nameFile)
mainFolder, fullFile = os.path.split(nameFile)
if appdFile == '.docx' and fullFile[0:2] != '~$':
pagesThis = GetPage5Docx(nameFile)
fid.writelines(fullFile + ' ' + str(pagesThis) + '\n')
pagesTotal += pagesThis
fid.writelines('All Docx files in this folder have the pages: ' +
str(pagesTotal) + '\n\n\n\n\n\n')
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile = root + '\\Counter.txt'
with open(StatisticFile, 'a+') as fid:
pagesTotal = 0
for name in files:
nameFile = os.path.join(root, name)
mainFile, appdFile = os.path.splitext(nameFile)
mainFolder, fullFile = os.path.split(nameFile)
if (appdFile == '.pptx' or appdFile == '.ppt') and fullFile[0:2
] != '~$':
pagesThis = GetPage5PPT(nameFile)
fid.writelines(fullFile + ' ' + str(pagesThis) + '\n')
pagesTotal += pagesThis
fid.writelines(
'All PPT/PPTX files in this folder have the pages: ' + str(
pagesTotal) + '\n\n\n\n\n\n')
print('Done. Please check it!')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
from win32com.client import Dispatch
folderDealTmp = input('Please input the absolute path of the father-folder:\n')
folderDeal = folderDealTmp.replace('\\', '\\\\')
def GetPage5Docx(fileNameWithPath):
word = Dispatch('Word.Application')
word.Visible = False
word = word.Documents.Open(fileNameWithPath)
word.Repaginate()
num_of_sheets = word.ComputeStatistics(2)
return num_of_sheets
def GetPage5PPT(fileNameWithPath):
Application = Dispatch('PowerPoint.Application')
Presentation = Application.Presentations.Open(fileNameWithPath,
WithWindow=False)
slide_count = len(Presentation.Slides)
Presentation.Close()
return slide_count
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile = root + '\\Counter.txt'
with open(StatisticFile, 'w') as fid:
pass
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile = root + '\\Counter.txt'
with open(StatisticFile, 'a+') as fid:
pagesTotal = 0
for name in files:
nameFile = os.path.join(root, name)
mainFile, appdFile = os.path.splitext(nameFile)
mainFolder, fullFile = os.path.split(nameFile)
if appdFile == '.docx' and fullFile[0:2] != '~$':
pagesThis = GetPage5Docx(nameFile)
fid.writelines(fullFile + ' ' + str(pagesThis) + '\n')
pagesTotal += pagesThis
fid.writelines('All Docx files in this folder have the pages: ' +
str(pagesTotal) + '\n\n\n\n\n\n')
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile = root + '\\Counter.txt'
with open(StatisticFile, 'a+') as fid:
pagesTotal = 0
for name in files:
nameFile = os.path.join(root, name)
mainFile, appdFile = os.path.splitext(nameFile)
mainFolder, fullFile = os.path.split(nameFile)
if (appdFile == '.pptx' or appdFile == '.ppt') and fullFile[0:2
] != '~$':
pagesThis = GetPage5PPT(nameFile)
fid.writelines(fullFile + ' ' + str(pagesThis) + '\n')
pagesTotal += pagesThis
fid.writelines(
'All PPT/PPTX files in this folder have the pages: ' + str(
pagesTotal) + '\n\n\n\n\n\n')
print('Done. Please check it!')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 27 18:34:40 2017
@author: Peiyong Jiang :jiangpeiyong@impcas.ac.cn
Wangsheng Wang : wwshunan@impcas.ac.cn
Chi Feng : fengchi@impcas.ac.cn
supervised by
Zhijun Wang & Yuan He
"""
import os
from win32com.client import Dispatch
folderDealTmp=input('Please input the absolute path of the father-folder:\n')
folderDeal=folderDealTmp.replace('\\','\\\\')
def GetPage5Docx(fileNameWithPath):
#open Word
word = Dispatch('Word.Application')
word.Visible = False
word = word.Documents.Open(fileNameWithPath)
#get number of sheets
word.Repaginate()
num_of_sheets = word.ComputeStatistics(2)
return num_of_sheets
def GetPage5PPT(fileNameWithPath):
Application = Dispatch("PowerPoint.Application")
Presentation = Application.Presentations.Open(fileNameWithPath, WithWindow=False)
slide_count = len(Presentation.Slides)
Presentation.Close()
return slide_count
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile=root+'\\Counter.txt'
with open(StatisticFile,'w') as fid:
pass
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile=root+'\\Counter.txt'
with open(StatisticFile,'a+') as fid:
pagesTotal=0
for name in files:
nameFile=os.path.join(root, name)
mainFile,appdFile=os.path.splitext(nameFile)
mainFolder,fullFile=os.path.split(nameFile)
if (appdFile=='.docx') and (fullFile[0:2]!='~$'):
pagesThis=GetPage5Docx(nameFile)
fid.writelines(fullFile+' '+str(pagesThis)+'\n')
pagesTotal+=pagesThis
fid.writelines('All Docx files in this folder have the pages: '+str(pagesTotal)+'\n\n\n\n\n\n')
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile=root+'\\Counter.txt'
with open(StatisticFile,'a+') as fid:
pagesTotal=0
for name in files:
nameFile=os.path.join(root, name)
mainFile,appdFile=os.path.splitext(nameFile)
mainFolder,fullFile=os.path.split(nameFile)
if ((appdFile=='.pptx') or (appdFile=='.ppt')) and (fullFile[0:2]!='~$'):
pagesThis=GetPage5PPT(nameFile)
fid.writelines(fullFile+' '+str(pagesThis)+'\n')
pagesTotal+=pagesThis
fid.writelines('All PPT/PPTX files in this folder have the pages: '+str(pagesTotal)+'\n\n\n\n\n\n')
print('Done. Please check it!')
|
flexible
|
{
"blob_id": "67f09cd8b41c7a4fe457766dfed916aaf71cc20d",
"index": 9489,
"step-1": "<mask token>\n\n\ndef GetPage5Docx(fileNameWithPath):\n word = Dispatch('Word.Application')\n word.Visible = False\n word = word.Documents.Open(fileNameWithPath)\n word.Repaginate()\n num_of_sheets = word.ComputeStatistics(2)\n return num_of_sheets\n\n\ndef GetPage5PPT(fileNameWithPath):\n Application = Dispatch('PowerPoint.Application')\n Presentation = Application.Presentations.Open(fileNameWithPath,\n WithWindow=False)\n slide_count = len(Presentation.Slides)\n Presentation.Close()\n return slide_count\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef GetPage5Docx(fileNameWithPath):\n word = Dispatch('Word.Application')\n word.Visible = False\n word = word.Documents.Open(fileNameWithPath)\n word.Repaginate()\n num_of_sheets = word.ComputeStatistics(2)\n return num_of_sheets\n\n\ndef GetPage5PPT(fileNameWithPath):\n Application = Dispatch('PowerPoint.Application')\n Presentation = Application.Presentations.Open(fileNameWithPath,\n WithWindow=False)\n slide_count = len(Presentation.Slides)\n Presentation.Close()\n return slide_count\n\n\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'w') as fid:\n pass\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'a+') as fid:\n pagesTotal = 0\n for name in files:\n nameFile = os.path.join(root, name)\n mainFile, appdFile = os.path.splitext(nameFile)\n mainFolder, fullFile = os.path.split(nameFile)\n if appdFile == '.docx' and fullFile[0:2] != '~$':\n pagesThis = GetPage5Docx(nameFile)\n fid.writelines(fullFile + ' ' + str(pagesThis) + '\\n')\n pagesTotal += pagesThis\n fid.writelines('All Docx files in this folder have the pages: ' +\n str(pagesTotal) + '\\n\\n\\n\\n\\n\\n')\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'a+') as fid:\n pagesTotal = 0\n for name in files:\n nameFile = os.path.join(root, name)\n mainFile, appdFile = os.path.splitext(nameFile)\n mainFolder, fullFile = os.path.split(nameFile)\n if (appdFile == '.pptx' or appdFile == '.ppt') and fullFile[0:2\n ] != '~$':\n pagesThis = GetPage5PPT(nameFile)\n fid.writelines(fullFile + ' ' + str(pagesThis) + '\\n')\n pagesTotal += pagesThis\n fid.writelines(\n 'All PPT/PPTX files in this folder have the pages: ' + str(\n pagesTotal) + '\\n\\n\\n\\n\\n\\n')\nprint('Done. Please check it!')\n",
"step-3": "<mask token>\nfolderDealTmp = input('Please input the absolute path of the father-folder:\\n')\nfolderDeal = folderDealTmp.replace('\\\\', '\\\\\\\\')\n\n\ndef GetPage5Docx(fileNameWithPath):\n word = Dispatch('Word.Application')\n word.Visible = False\n word = word.Documents.Open(fileNameWithPath)\n word.Repaginate()\n num_of_sheets = word.ComputeStatistics(2)\n return num_of_sheets\n\n\ndef GetPage5PPT(fileNameWithPath):\n Application = Dispatch('PowerPoint.Application')\n Presentation = Application.Presentations.Open(fileNameWithPath,\n WithWindow=False)\n slide_count = len(Presentation.Slides)\n Presentation.Close()\n return slide_count\n\n\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'w') as fid:\n pass\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'a+') as fid:\n pagesTotal = 0\n for name in files:\n nameFile = os.path.join(root, name)\n mainFile, appdFile = os.path.splitext(nameFile)\n mainFolder, fullFile = os.path.split(nameFile)\n if appdFile == '.docx' and fullFile[0:2] != '~$':\n pagesThis = GetPage5Docx(nameFile)\n fid.writelines(fullFile + ' ' + str(pagesThis) + '\\n')\n pagesTotal += pagesThis\n fid.writelines('All Docx files in this folder have the pages: ' +\n str(pagesTotal) + '\\n\\n\\n\\n\\n\\n')\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'a+') as fid:\n pagesTotal = 0\n for name in files:\n nameFile = os.path.join(root, name)\n mainFile, appdFile = os.path.splitext(nameFile)\n mainFolder, fullFile = os.path.split(nameFile)\n if (appdFile == '.pptx' or appdFile == '.ppt') and fullFile[0:2\n ] != '~$':\n pagesThis = GetPage5PPT(nameFile)\n fid.writelines(fullFile + ' ' + str(pagesThis) + '\\n')\n pagesTotal += pagesThis\n fid.writelines(\n 'All PPT/PPTX files in this folder have the pages: ' + str(\n pagesTotal) + '\\n\\n\\n\\n\\n\\n')\nprint('Done. Please check it!')\n",
"step-4": "<mask token>\nimport os\nfrom win32com.client import Dispatch\nfolderDealTmp = input('Please input the absolute path of the father-folder:\\n')\nfolderDeal = folderDealTmp.replace('\\\\', '\\\\\\\\')\n\n\ndef GetPage5Docx(fileNameWithPath):\n word = Dispatch('Word.Application')\n word.Visible = False\n word = word.Documents.Open(fileNameWithPath)\n word.Repaginate()\n num_of_sheets = word.ComputeStatistics(2)\n return num_of_sheets\n\n\ndef GetPage5PPT(fileNameWithPath):\n Application = Dispatch('PowerPoint.Application')\n Presentation = Application.Presentations.Open(fileNameWithPath,\n WithWindow=False)\n slide_count = len(Presentation.Slides)\n Presentation.Close()\n return slide_count\n\n\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'w') as fid:\n pass\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'a+') as fid:\n pagesTotal = 0\n for name in files:\n nameFile = os.path.join(root, name)\n mainFile, appdFile = os.path.splitext(nameFile)\n mainFolder, fullFile = os.path.split(nameFile)\n if appdFile == '.docx' and fullFile[0:2] != '~$':\n pagesThis = GetPage5Docx(nameFile)\n fid.writelines(fullFile + ' ' + str(pagesThis) + '\\n')\n pagesTotal += pagesThis\n fid.writelines('All Docx files in this folder have the pages: ' +\n str(pagesTotal) + '\\n\\n\\n\\n\\n\\n')\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\n StatisticFile = root + '\\\\Counter.txt'\n with open(StatisticFile, 'a+') as fid:\n pagesTotal = 0\n for name in files:\n nameFile = os.path.join(root, name)\n mainFile, appdFile = os.path.splitext(nameFile)\n mainFolder, fullFile = os.path.split(nameFile)\n if (appdFile == '.pptx' or appdFile == '.ppt') and fullFile[0:2\n ] != '~$':\n pagesThis = GetPage5PPT(nameFile)\n fid.writelines(fullFile + ' ' + str(pagesThis) + '\\n')\n pagesTotal += pagesThis\n fid.writelines(\n 'All PPT/PPTX files in this folder have the pages: ' + str(\n pagesTotal) + '\\n\\n\\n\\n\\n\\n')\nprint('Done. Please check it!')\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 27 18:34:40 2017\r\n\r\n@author: Peiyong Jiang :jiangpeiyong@impcas.ac.cn\r\n Wangsheng Wang : wwshunan@impcas.ac.cn\r\n Chi Feng : fengchi@impcas.ac.cn\r\n \r\n supervised by\r\n Zhijun Wang & Yuan He\r\n\r\n \r\n\"\"\"\r\n\r\nimport os\r\nfrom win32com.client import Dispatch\r\n\r\n\r\nfolderDealTmp=input('Please input the absolute path of the father-folder:\\n')\r\n\r\nfolderDeal=folderDealTmp.replace('\\\\','\\\\\\\\')\r\n\r\ndef GetPage5Docx(fileNameWithPath):\r\n #open Word\r\n word = Dispatch('Word.Application')\r\n word.Visible = False\r\n word = word.Documents.Open(fileNameWithPath)\r\n \r\n #get number of sheets\r\n word.Repaginate()\r\n num_of_sheets = word.ComputeStatistics(2)\r\n \r\n return num_of_sheets\r\n\r\ndef GetPage5PPT(fileNameWithPath):\r\n Application = Dispatch(\"PowerPoint.Application\")\r\n Presentation = Application.Presentations.Open(fileNameWithPath, WithWindow=False)\r\n slide_count = len(Presentation.Slides)\r\n Presentation.Close()\r\n return slide_count\r\n\r\n\r\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\r\n StatisticFile=root+'\\\\Counter.txt'\r\n with open(StatisticFile,'w') as fid:\r\n pass\r\n\r\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\r\n StatisticFile=root+'\\\\Counter.txt'\r\n with open(StatisticFile,'a+') as fid:\r\n pagesTotal=0\r\n for name in files:\r\n nameFile=os.path.join(root, name)\r\n \r\n mainFile,appdFile=os.path.splitext(nameFile)\r\n mainFolder,fullFile=os.path.split(nameFile)\r\n if (appdFile=='.docx') and (fullFile[0:2]!='~$'): \r\n pagesThis=GetPage5Docx(nameFile)\r\n fid.writelines(fullFile+' '+str(pagesThis)+'\\n')\r\n pagesTotal+=pagesThis\r\n \r\n \r\n fid.writelines('All Docx files in this folder have the pages: '+str(pagesTotal)+'\\n\\n\\n\\n\\n\\n')\r\n \r\n\r\nfor root, dirs, files in os.walk(folderDeal, topdown=False):\r\n \r\n StatisticFile=root+'\\\\Counter.txt'\r\n with open(StatisticFile,'a+') as fid:\r\n pagesTotal=0\r\n for name in files:\r\n nameFile=os.path.join(root, name)\r\n \r\n mainFile,appdFile=os.path.splitext(nameFile)\r\n mainFolder,fullFile=os.path.split(nameFile)\r\n if ((appdFile=='.pptx') or (appdFile=='.ppt')) and (fullFile[0:2]!='~$'): \r\n pagesThis=GetPage5PPT(nameFile)\r\n fid.writelines(fullFile+' '+str(pagesThis)+'\\n')\r\n pagesTotal+=pagesThis\r\n \r\n \r\n fid.writelines('All PPT/PPTX files in this folder have the pages: '+str(pagesTotal)+'\\n\\n\\n\\n\\n\\n')\r\n\r\n\r\n\r\nprint('Done. Please check it!')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# Input Output test (입출력 테스트 )
"""
날짜 : 2021/04/27
이름 : 이지영
내용 : 파이썬 표준입출력 실습 _ 교재 p42
"""
# 파이썬 표준 출력
print('hello', end='!') #print : 출력함수 (자바에선 document.write('hello');)
print('python')
print('010', '1234', '1111', sep='-') # seperate 값
# 파이썬 표준 입력
num = input('숫자입력 : ')
print('입력한 숫자 :', num)
print('num type :', type(num))
# 입력받은 문자열을 숫자로 변환하는 작업이 필요함. <class 'str'> 문자열로 읽히기 때문
result = int(num)
print('result :', result)
print('result type :', type(result))
# 서식문자 출력
print('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화')) # %s: string 문자열을 나타냄
# 포맷문자 출력
print('이름 : {}, 나이 : {}, 주소 : {}' .format('김유신', 23, '김해시'))
|
normal
|
{
"blob_id": "cc628270a973866025a5e2a5d07e39b4dbdcd324",
"index": 1718,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('hello', end='!')\nprint('python')\nprint('010', '1234', '1111', sep='-')\n<mask token>\nprint('입력한 숫자 :', num)\nprint('num type :', type(num))\n<mask token>\nprint('result :', result)\nprint('result type :', type(result))\nprint('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화'))\nprint('이름 : {}, 나이 : {}, 주소 : {}'.format('김유신', 23, '김해시'))\n",
"step-3": "<mask token>\nprint('hello', end='!')\nprint('python')\nprint('010', '1234', '1111', sep='-')\nnum = input('숫자입력 : ')\nprint('입력한 숫자 :', num)\nprint('num type :', type(num))\nresult = int(num)\nprint('result :', result)\nprint('result type :', type(result))\nprint('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화'))\nprint('이름 : {}, 나이 : {}, 주소 : {}'.format('김유신', 23, '김해시'))\n",
"step-4": "# Input Output test (입출력 테스트 )\n\"\"\"\n날짜 : 2021/04/27\n이름 : 이지영\n내용 : 파이썬 표준입출력 실습 _ 교재 p42\n\"\"\"\n\n# 파이썬 표준 출력\nprint('hello', end='!') #print : 출력함수 (자바에선 document.write('hello');)\nprint('python')\n\nprint('010', '1234', '1111', sep='-') # seperate 값\n\n# 파이썬 표준 입력\nnum = input('숫자입력 : ')\n\nprint('입력한 숫자 :', num)\nprint('num type :', type(num))\n\n# 입력받은 문자열을 숫자로 변환하는 작업이 필요함. <class 'str'> 문자열로 읽히기 때문\nresult = int(num)\nprint('result :', result)\nprint('result type :', type(result))\n\n\n# 서식문자 출력\nprint('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화')) # %s: string 문자열을 나타냄\n\n# 포맷문자 출력\nprint('이름 : {}, 나이 : {}, 주소 : {}' .format('김유신', 23, '김해시'))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
import json
import os
import convlab
from convlab.modules.dst.multiwoz.dst_util import init_state
from convlab.modules.dst.multiwoz.dst_util import normalize_value
from convlab.modules.dst.state_tracker import Tracker
from convlab.modules.util.multiwoz.multiwoz_slot_trans import REF_SYS_DA
class RuleDST(Tracker):
"""Rule based DST which trivially updates new values from NLU result to states."""
def __init__(self):
Tracker.__init__(self)
self.state = init_state()
prefix = os.path.dirname(os.path.dirname(convlab.__file__))
self.value_dict = json.load(open(prefix+'/data/multiwoz/value_dict.json'))
def update(self, user_act=None):
# print('------------------{}'.format(user_act))
if not isinstance(user_act, dict):
raise Exception('Expect user_act to be <class \'dict\'> type but get {}.'.format(type(user_act)))
previous_state = self.state
new_belief_state = copy.deepcopy(previous_state['belief_state'])
new_request_state = copy.deepcopy(previous_state['request_state'])
for domain_type in user_act.keys():
domain, tpe = domain_type.lower().split('-')
if domain in ['unk', 'general', 'booking']:
continue
if tpe == 'inform':
for k, v in user_act[domain_type]:
k = REF_SYS_DA[domain.capitalize()].get(k, k)
if k is None:
continue
try:
assert domain in new_belief_state
except:
raise Exception('Error: domain <{}> not in new belief state'.format(domain))
domain_dic = new_belief_state[domain]
assert 'semi' in domain_dic
assert 'book' in domain_dic
if k in domain_dic['semi']:
nvalue = normalize_value(self.value_dict, domain, k, v)
# if nvalue != v:
# _log('domain {} slot {} value {} -> {}'.format(domain, k, v, nvalue))
new_belief_state[domain]['semi'][k] = nvalue
elif k in domain_dic['book']:
new_belief_state[domain]['book'][k] = v
elif k.lower() in domain_dic['book']:
new_belief_state[domain]['book'][k.lower()] = v
elif k == 'trainID' and domain == 'train':
new_belief_state[domain]['book'][k] = normalize_value(self.value_dict, domain, k, v)
else:
# raise Exception('unknown slot name <{}> of domain <{}>'.format(k, domain))
with open('unknown_slot.log', 'a+') as f:
f.write('unknown slot name <{}> of domain <{}>\n'.format(k, domain))
elif tpe == 'request':
for k, v in user_act[domain_type]:
k = REF_SYS_DA[domain.capitalize()].get(k, k)
if domain not in new_request_state:
new_request_state[domain] = {}
if k not in new_request_state[domain]:
new_request_state[domain][k] = 0
new_state = copy.deepcopy(previous_state)
new_state['belief_state'] = new_belief_state
new_state['request_state'] = new_request_state
new_state['user_action'] = user_act
self.state = new_state
return self.state
def init_session(self):
self.state = init_state()
|
normal
|
{
"blob_id": "8de82d09c8a9a1c1db59b0cac9cf8dda04f35847",
"index": 3335,
"step-1": "<mask token>\n\n\nclass RuleDST(Tracker):\n <mask token>\n\n def __init__(self):\n Tracker.__init__(self)\n self.state = init_state()\n prefix = os.path.dirname(os.path.dirname(convlab.__file__))\n self.value_dict = json.load(open(prefix +\n '/data/multiwoz/value_dict.json'))\n\n def update(self, user_act=None):\n if not isinstance(user_act, dict):\n raise Exception(\n \"Expect user_act to be <class 'dict'> type but get {}.\".\n format(type(user_act)))\n previous_state = self.state\n new_belief_state = copy.deepcopy(previous_state['belief_state'])\n new_request_state = copy.deepcopy(previous_state['request_state'])\n for domain_type in user_act.keys():\n domain, tpe = domain_type.lower().split('-')\n if domain in ['unk', 'general', 'booking']:\n continue\n if tpe == 'inform':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if k is None:\n continue\n try:\n assert domain in new_belief_state\n except:\n raise Exception(\n 'Error: domain <{}> not in new belief state'.\n format(domain))\n domain_dic = new_belief_state[domain]\n assert 'semi' in domain_dic\n assert 'book' in domain_dic\n if k in domain_dic['semi']:\n nvalue = normalize_value(self.value_dict, domain, k, v)\n new_belief_state[domain]['semi'][k] = nvalue\n elif k in domain_dic['book']:\n new_belief_state[domain]['book'][k] = v\n elif k.lower() in domain_dic['book']:\n new_belief_state[domain]['book'][k.lower()] = v\n elif k == 'trainID' and domain == 'train':\n new_belief_state[domain]['book'][k] = normalize_value(\n self.value_dict, domain, k, v)\n else:\n with open('unknown_slot.log', 'a+') as f:\n f.write('unknown slot name <{}> of domain <{}>\\n'\n .format(k, domain))\n elif tpe == 'request':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if domain not in new_request_state:\n new_request_state[domain] = {}\n if k not in new_request_state[domain]:\n new_request_state[domain][k] = 0\n new_state = copy.deepcopy(previous_state)\n new_state['belief_state'] = new_belief_state\n new_state['request_state'] = new_request_state\n new_state['user_action'] = user_act\n self.state = new_state\n return self.state\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass RuleDST(Tracker):\n <mask token>\n\n def __init__(self):\n Tracker.__init__(self)\n self.state = init_state()\n prefix = os.path.dirname(os.path.dirname(convlab.__file__))\n self.value_dict = json.load(open(prefix +\n '/data/multiwoz/value_dict.json'))\n\n def update(self, user_act=None):\n if not isinstance(user_act, dict):\n raise Exception(\n \"Expect user_act to be <class 'dict'> type but get {}.\".\n format(type(user_act)))\n previous_state = self.state\n new_belief_state = copy.deepcopy(previous_state['belief_state'])\n new_request_state = copy.deepcopy(previous_state['request_state'])\n for domain_type in user_act.keys():\n domain, tpe = domain_type.lower().split('-')\n if domain in ['unk', 'general', 'booking']:\n continue\n if tpe == 'inform':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if k is None:\n continue\n try:\n assert domain in new_belief_state\n except:\n raise Exception(\n 'Error: domain <{}> not in new belief state'.\n format(domain))\n domain_dic = new_belief_state[domain]\n assert 'semi' in domain_dic\n assert 'book' in domain_dic\n if k in domain_dic['semi']:\n nvalue = normalize_value(self.value_dict, domain, k, v)\n new_belief_state[domain]['semi'][k] = nvalue\n elif k in domain_dic['book']:\n new_belief_state[domain]['book'][k] = v\n elif k.lower() in domain_dic['book']:\n new_belief_state[domain]['book'][k.lower()] = v\n elif k == 'trainID' and domain == 'train':\n new_belief_state[domain]['book'][k] = normalize_value(\n self.value_dict, domain, k, v)\n else:\n with open('unknown_slot.log', 'a+') as f:\n f.write('unknown slot name <{}> of domain <{}>\\n'\n .format(k, domain))\n elif tpe == 'request':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if domain not in new_request_state:\n new_request_state[domain] = {}\n if k not in new_request_state[domain]:\n new_request_state[domain][k] = 0\n new_state = copy.deepcopy(previous_state)\n new_state['belief_state'] = new_belief_state\n new_state['request_state'] = new_request_state\n new_state['user_action'] = user_act\n self.state = new_state\n return self.state\n\n def init_session(self):\n self.state = init_state()\n",
"step-3": "<mask token>\n\n\nclass RuleDST(Tracker):\n \"\"\"Rule based DST which trivially updates new values from NLU result to states.\"\"\"\n\n def __init__(self):\n Tracker.__init__(self)\n self.state = init_state()\n prefix = os.path.dirname(os.path.dirname(convlab.__file__))\n self.value_dict = json.load(open(prefix +\n '/data/multiwoz/value_dict.json'))\n\n def update(self, user_act=None):\n if not isinstance(user_act, dict):\n raise Exception(\n \"Expect user_act to be <class 'dict'> type but get {}.\".\n format(type(user_act)))\n previous_state = self.state\n new_belief_state = copy.deepcopy(previous_state['belief_state'])\n new_request_state = copy.deepcopy(previous_state['request_state'])\n for domain_type in user_act.keys():\n domain, tpe = domain_type.lower().split('-')\n if domain in ['unk', 'general', 'booking']:\n continue\n if tpe == 'inform':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if k is None:\n continue\n try:\n assert domain in new_belief_state\n except:\n raise Exception(\n 'Error: domain <{}> not in new belief state'.\n format(domain))\n domain_dic = new_belief_state[domain]\n assert 'semi' in domain_dic\n assert 'book' in domain_dic\n if k in domain_dic['semi']:\n nvalue = normalize_value(self.value_dict, domain, k, v)\n new_belief_state[domain]['semi'][k] = nvalue\n elif k in domain_dic['book']:\n new_belief_state[domain]['book'][k] = v\n elif k.lower() in domain_dic['book']:\n new_belief_state[domain]['book'][k.lower()] = v\n elif k == 'trainID' and domain == 'train':\n new_belief_state[domain]['book'][k] = normalize_value(\n self.value_dict, domain, k, v)\n else:\n with open('unknown_slot.log', 'a+') as f:\n f.write('unknown slot name <{}> of domain <{}>\\n'\n .format(k, domain))\n elif tpe == 'request':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if domain not in new_request_state:\n new_request_state[domain] = {}\n if k not in new_request_state[domain]:\n new_request_state[domain][k] = 0\n new_state = copy.deepcopy(previous_state)\n new_state['belief_state'] = new_belief_state\n new_state['request_state'] = new_request_state\n new_state['user_action'] = user_act\n self.state = new_state\n return self.state\n\n def init_session(self):\n self.state = init_state()\n",
"step-4": "import copy\nimport json\nimport os\nimport convlab\nfrom convlab.modules.dst.multiwoz.dst_util import init_state\nfrom convlab.modules.dst.multiwoz.dst_util import normalize_value\nfrom convlab.modules.dst.state_tracker import Tracker\nfrom convlab.modules.util.multiwoz.multiwoz_slot_trans import REF_SYS_DA\n\n\nclass RuleDST(Tracker):\n \"\"\"Rule based DST which trivially updates new values from NLU result to states.\"\"\"\n\n def __init__(self):\n Tracker.__init__(self)\n self.state = init_state()\n prefix = os.path.dirname(os.path.dirname(convlab.__file__))\n self.value_dict = json.load(open(prefix +\n '/data/multiwoz/value_dict.json'))\n\n def update(self, user_act=None):\n if not isinstance(user_act, dict):\n raise Exception(\n \"Expect user_act to be <class 'dict'> type but get {}.\".\n format(type(user_act)))\n previous_state = self.state\n new_belief_state = copy.deepcopy(previous_state['belief_state'])\n new_request_state = copy.deepcopy(previous_state['request_state'])\n for domain_type in user_act.keys():\n domain, tpe = domain_type.lower().split('-')\n if domain in ['unk', 'general', 'booking']:\n continue\n if tpe == 'inform':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if k is None:\n continue\n try:\n assert domain in new_belief_state\n except:\n raise Exception(\n 'Error: domain <{}> not in new belief state'.\n format(domain))\n domain_dic = new_belief_state[domain]\n assert 'semi' in domain_dic\n assert 'book' in domain_dic\n if k in domain_dic['semi']:\n nvalue = normalize_value(self.value_dict, domain, k, v)\n new_belief_state[domain]['semi'][k] = nvalue\n elif k in domain_dic['book']:\n new_belief_state[domain]['book'][k] = v\n elif k.lower() in domain_dic['book']:\n new_belief_state[domain]['book'][k.lower()] = v\n elif k == 'trainID' and domain == 'train':\n new_belief_state[domain]['book'][k] = normalize_value(\n self.value_dict, domain, k, v)\n else:\n with open('unknown_slot.log', 'a+') as f:\n f.write('unknown slot name <{}> of domain <{}>\\n'\n .format(k, domain))\n elif tpe == 'request':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if domain not in new_request_state:\n new_request_state[domain] = {}\n if k not in new_request_state[domain]:\n new_request_state[domain][k] = 0\n new_state = copy.deepcopy(previous_state)\n new_state['belief_state'] = new_belief_state\n new_state['request_state'] = new_request_state\n new_state['user_action'] = user_act\n self.state = new_state\n return self.state\n\n def init_session(self):\n self.state = init_state()\n",
"step-5": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport copy\nimport json\nimport os\n\nimport convlab\nfrom convlab.modules.dst.multiwoz.dst_util import init_state\nfrom convlab.modules.dst.multiwoz.dst_util import normalize_value\nfrom convlab.modules.dst.state_tracker import Tracker\nfrom convlab.modules.util.multiwoz.multiwoz_slot_trans import REF_SYS_DA\n\n\nclass RuleDST(Tracker):\n \"\"\"Rule based DST which trivially updates new values from NLU result to states.\"\"\"\n def __init__(self):\n Tracker.__init__(self)\n self.state = init_state()\n prefix = os.path.dirname(os.path.dirname(convlab.__file__))\n self.value_dict = json.load(open(prefix+'/data/multiwoz/value_dict.json'))\n\n def update(self, user_act=None):\n # print('------------------{}'.format(user_act))\n if not isinstance(user_act, dict):\n raise Exception('Expect user_act to be <class \\'dict\\'> type but get {}.'.format(type(user_act)))\n previous_state = self.state\n new_belief_state = copy.deepcopy(previous_state['belief_state'])\n new_request_state = copy.deepcopy(previous_state['request_state'])\n for domain_type in user_act.keys():\n domain, tpe = domain_type.lower().split('-')\n if domain in ['unk', 'general', 'booking']:\n continue\n if tpe == 'inform':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if k is None:\n continue\n try:\n assert domain in new_belief_state\n except:\n raise Exception('Error: domain <{}> not in new belief state'.format(domain))\n domain_dic = new_belief_state[domain]\n assert 'semi' in domain_dic\n assert 'book' in domain_dic\n\n if k in domain_dic['semi']:\n nvalue = normalize_value(self.value_dict, domain, k, v)\n # if nvalue != v:\n # _log('domain {} slot {} value {} -> {}'.format(domain, k, v, nvalue))\n new_belief_state[domain]['semi'][k] = nvalue\n elif k in domain_dic['book']:\n new_belief_state[domain]['book'][k] = v\n elif k.lower() in domain_dic['book']:\n new_belief_state[domain]['book'][k.lower()] = v\n elif k == 'trainID' and domain == 'train':\n new_belief_state[domain]['book'][k] = normalize_value(self.value_dict, domain, k, v)\n else:\n # raise Exception('unknown slot name <{}> of domain <{}>'.format(k, domain))\n with open('unknown_slot.log', 'a+') as f:\n f.write('unknown slot name <{}> of domain <{}>\\n'.format(k, domain))\n elif tpe == 'request':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if domain not in new_request_state:\n new_request_state[domain] = {}\n if k not in new_request_state[domain]:\n new_request_state[domain][k] = 0\n\n new_state = copy.deepcopy(previous_state)\n new_state['belief_state'] = new_belief_state\n new_state['request_state'] = new_request_state\n new_state['user_action'] = user_act\n\n self.state = new_state\n \n return self.state\n\n def init_session(self):\n self.state = init_state()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import boto3
import os
from trustedadvisor import authenticate_support
accountnumber = os.environ['Account_Number']
rolename = os.environ['Role_Name']
rolesession = accountnumber + rolename
def lambda_handler(event, context):
sts_client = boto3.client('sts')
assumerole = sts_client.assume_role(
RoleArn="arn:aws:iam::" + accountnumber + ":role/" + rolename,
RoleSessionName=rolesession
)
credentials = assumerole['Credentials']
return authenticate_support(credentials)
|
normal
|
{
"blob_id": "539431649e54469ddbe44fdbd17031b4449abdd9",
"index": 5867,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef lambda_handler(event, context):\n sts_client = boto3.client('sts')\n assumerole = sts_client.assume_role(RoleArn='arn:aws:iam::' +\n accountnumber + ':role/' + rolename, RoleSessionName=rolesession)\n credentials = assumerole['Credentials']\n return authenticate_support(credentials)\n",
"step-3": "<mask token>\naccountnumber = os.environ['Account_Number']\nrolename = os.environ['Role_Name']\nrolesession = accountnumber + rolename\n\n\ndef lambda_handler(event, context):\n sts_client = boto3.client('sts')\n assumerole = sts_client.assume_role(RoleArn='arn:aws:iam::' +\n accountnumber + ':role/' + rolename, RoleSessionName=rolesession)\n credentials = assumerole['Credentials']\n return authenticate_support(credentials)\n",
"step-4": "import boto3\nimport os\nfrom trustedadvisor import authenticate_support\naccountnumber = os.environ['Account_Number']\nrolename = os.environ['Role_Name']\nrolesession = accountnumber + rolename\n\n\ndef lambda_handler(event, context):\n sts_client = boto3.client('sts')\n assumerole = sts_client.assume_role(RoleArn='arn:aws:iam::' +\n accountnumber + ':role/' + rolename, RoleSessionName=rolesession)\n credentials = assumerole['Credentials']\n return authenticate_support(credentials)\n",
"step-5": "import boto3\nimport os\n\nfrom trustedadvisor import authenticate_support\n\naccountnumber = os.environ['Account_Number']\nrolename = os.environ['Role_Name']\nrolesession = accountnumber + rolename\n\n\ndef lambda_handler(event, context):\n sts_client = boto3.client('sts')\n assumerole = sts_client.assume_role(\n RoleArn=\"arn:aws:iam::\" + accountnumber + \":role/\" + rolename,\n RoleSessionName=rolesession\n )\n\n credentials = assumerole['Credentials']\n\n return authenticate_support(credentials)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class GoldenTemplate(BasicTemplate):
<|reserved_special_token_0|>
sidebar_width = param.Integer(default=20, constant=True, doc=
"""
The width of the sidebar in percent.""")
_css = pathlib.Path(__file__).parent / 'golden.css'
_template = pathlib.Path(__file__).parent / 'golden.html'
_resources = {'css': {'goldenlayout':
f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-base.css',
'golden-theme-dark':
f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-dark-theme.css'
, 'golden-theme-light':
f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-light-theme.css'
}, 'js': {'jquery': JS_URLS['jQuery'], 'goldenlayout':
f'{config.npm_cdn}/golden-layout@1.5.9/dist/goldenlayout.min.js'}}
def _apply_root(self, name, model, tags):
if 'main' in tags:
model.margin = 10, 15, 10, 10
def resolve_resources(self, cdn: (bool | Literal['auto'])='auto'
) ->ResourcesType:
resources = super().resolve_resources(cdn=cdn)
del_theme = ('dark' if self._design.theme._name == 'default' else
'light')
del resources['css'][f'golden-theme-{del_theme}']
return resources
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GoldenTemplate(BasicTemplate):
"""
GoldenTemplate is built on top of golden-layout library.
"""
sidebar_width = param.Integer(default=20, constant=True, doc=
"""
The width of the sidebar in percent.""")
_css = pathlib.Path(__file__).parent / 'golden.css'
_template = pathlib.Path(__file__).parent / 'golden.html'
_resources = {'css': {'goldenlayout':
f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-base.css',
'golden-theme-dark':
f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-dark-theme.css'
, 'golden-theme-light':
f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-light-theme.css'
}, 'js': {'jquery': JS_URLS['jQuery'], 'goldenlayout':
f'{config.npm_cdn}/golden-layout@1.5.9/dist/goldenlayout.min.js'}}
def _apply_root(self, name, model, tags):
if 'main' in tags:
model.margin = 10, 15, 10, 10
def resolve_resources(self, cdn: (bool | Literal['auto'])='auto'
) ->ResourcesType:
resources = super().resolve_resources(cdn=cdn)
del_theme = ('dark' if self._design.theme._name == 'default' else
'light')
del resources['css'][f'golden-theme-{del_theme}']
return resources
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if TYPE_CHECKING:
from ...io.resources import ResourcesType
class GoldenTemplate(BasicTemplate):
"""
GoldenTemplate is built on top of golden-layout library.
"""
sidebar_width = param.Integer(default=20, constant=True, doc=
"""
The width of the sidebar in percent.""")
_css = pathlib.Path(__file__).parent / 'golden.css'
_template = pathlib.Path(__file__).parent / 'golden.html'
_resources = {'css': {'goldenlayout':
f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-base.css',
'golden-theme-dark':
f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-dark-theme.css'
, 'golden-theme-light':
f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-light-theme.css'
}, 'js': {'jquery': JS_URLS['jQuery'], 'goldenlayout':
f'{config.npm_cdn}/golden-layout@1.5.9/dist/goldenlayout.min.js'}}
def _apply_root(self, name, model, tags):
if 'main' in tags:
model.margin = 10, 15, 10, 10
def resolve_resources(self, cdn: (bool | Literal['auto'])='auto'
) ->ResourcesType:
resources = super().resolve_resources(cdn=cdn)
del_theme = ('dark' if self._design.theme._name == 'default' else
'light')
del resources['css'][f'golden-theme-{del_theme}']
return resources
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import annotations
import pathlib
from typing import TYPE_CHECKING, Literal
import param
from ...config import config
from ...io.resources import JS_URLS
from ..base import BasicTemplate
if TYPE_CHECKING:
from ...io.resources import ResourcesType
class GoldenTemplate(BasicTemplate):
"""
GoldenTemplate is built on top of golden-layout library.
"""
sidebar_width = param.Integer(default=20, constant=True, doc=
"""
The width of the sidebar in percent.""")
_css = pathlib.Path(__file__).parent / 'golden.css'
_template = pathlib.Path(__file__).parent / 'golden.html'
_resources = {'css': {'goldenlayout':
f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-base.css',
'golden-theme-dark':
f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-dark-theme.css'
, 'golden-theme-light':
f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-light-theme.css'
}, 'js': {'jquery': JS_URLS['jQuery'], 'goldenlayout':
f'{config.npm_cdn}/golden-layout@1.5.9/dist/goldenlayout.min.js'}}
def _apply_root(self, name, model, tags):
if 'main' in tags:
model.margin = 10, 15, 10, 10
def resolve_resources(self, cdn: (bool | Literal['auto'])='auto'
) ->ResourcesType:
resources = super().resolve_resources(cdn=cdn)
del_theme = ('dark' if self._design.theme._name == 'default' else
'light')
del resources['css'][f'golden-theme-{del_theme}']
return resources
<|reserved_special_token_1|>
"""
GoldenTemplate based on the golden-layout library.
"""
from __future__ import annotations
import pathlib
from typing import TYPE_CHECKING, Literal
import param
from ...config import config
from ...io.resources import JS_URLS
from ..base import BasicTemplate
if TYPE_CHECKING:
from ...io.resources import ResourcesType
class GoldenTemplate(BasicTemplate):
"""
GoldenTemplate is built on top of golden-layout library.
"""
sidebar_width = param.Integer(default=20, constant=True, doc="""
The width of the sidebar in percent.""")
_css = pathlib.Path(__file__).parent / 'golden.css'
_template = pathlib.Path(__file__).parent / 'golden.html'
_resources = {
'css': {
'goldenlayout': f"{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-base.css",
'golden-theme-dark': f"{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-dark-theme.css",
'golden-theme-light': f"{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-light-theme.css"
},
'js': {
'jquery': JS_URLS['jQuery'],
'goldenlayout': f"{config.npm_cdn}/golden-layout@1.5.9/dist/goldenlayout.min.js"
}
}
def _apply_root(self, name, model, tags):
if 'main' in tags:
model.margin = (10, 15, 10, 10)
def resolve_resources(self, cdn: bool | Literal['auto'] = 'auto') -> ResourcesType:
resources = super().resolve_resources(cdn=cdn)
del_theme = 'dark' if self._design.theme._name =='default' else 'light'
del resources['css'][f'golden-theme-{del_theme}']
return resources
|
flexible
|
{
"blob_id": "5bfb69d1608b397d6a19e663164a30089e4f67ad",
"index": 2859,
"step-1": "<mask token>\n\n\nclass GoldenTemplate(BasicTemplate):\n <mask token>\n sidebar_width = param.Integer(default=20, constant=True, doc=\n \"\"\"\n The width of the sidebar in percent.\"\"\")\n _css = pathlib.Path(__file__).parent / 'golden.css'\n _template = pathlib.Path(__file__).parent / 'golden.html'\n _resources = {'css': {'goldenlayout':\n f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-base.css',\n 'golden-theme-dark':\n f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-dark-theme.css'\n , 'golden-theme-light':\n f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-light-theme.css'\n }, 'js': {'jquery': JS_URLS['jQuery'], 'goldenlayout':\n f'{config.npm_cdn}/golden-layout@1.5.9/dist/goldenlayout.min.js'}}\n\n def _apply_root(self, name, model, tags):\n if 'main' in tags:\n model.margin = 10, 15, 10, 10\n\n def resolve_resources(self, cdn: (bool | Literal['auto'])='auto'\n ) ->ResourcesType:\n resources = super().resolve_resources(cdn=cdn)\n del_theme = ('dark' if self._design.theme._name == 'default' else\n 'light')\n del resources['css'][f'golden-theme-{del_theme}']\n return resources\n",
"step-2": "<mask token>\n\n\nclass GoldenTemplate(BasicTemplate):\n \"\"\"\n GoldenTemplate is built on top of golden-layout library.\n \"\"\"\n sidebar_width = param.Integer(default=20, constant=True, doc=\n \"\"\"\n The width of the sidebar in percent.\"\"\")\n _css = pathlib.Path(__file__).parent / 'golden.css'\n _template = pathlib.Path(__file__).parent / 'golden.html'\n _resources = {'css': {'goldenlayout':\n f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-base.css',\n 'golden-theme-dark':\n f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-dark-theme.css'\n , 'golden-theme-light':\n f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-light-theme.css'\n }, 'js': {'jquery': JS_URLS['jQuery'], 'goldenlayout':\n f'{config.npm_cdn}/golden-layout@1.5.9/dist/goldenlayout.min.js'}}\n\n def _apply_root(self, name, model, tags):\n if 'main' in tags:\n model.margin = 10, 15, 10, 10\n\n def resolve_resources(self, cdn: (bool | Literal['auto'])='auto'\n ) ->ResourcesType:\n resources = super().resolve_resources(cdn=cdn)\n del_theme = ('dark' if self._design.theme._name == 'default' else\n 'light')\n del resources['css'][f'golden-theme-{del_theme}']\n return resources\n",
"step-3": "<mask token>\nif TYPE_CHECKING:\n from ...io.resources import ResourcesType\n\n\nclass GoldenTemplate(BasicTemplate):\n \"\"\"\n GoldenTemplate is built on top of golden-layout library.\n \"\"\"\n sidebar_width = param.Integer(default=20, constant=True, doc=\n \"\"\"\n The width of the sidebar in percent.\"\"\")\n _css = pathlib.Path(__file__).parent / 'golden.css'\n _template = pathlib.Path(__file__).parent / 'golden.html'\n _resources = {'css': {'goldenlayout':\n f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-base.css',\n 'golden-theme-dark':\n f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-dark-theme.css'\n , 'golden-theme-light':\n f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-light-theme.css'\n }, 'js': {'jquery': JS_URLS['jQuery'], 'goldenlayout':\n f'{config.npm_cdn}/golden-layout@1.5.9/dist/goldenlayout.min.js'}}\n\n def _apply_root(self, name, model, tags):\n if 'main' in tags:\n model.margin = 10, 15, 10, 10\n\n def resolve_resources(self, cdn: (bool | Literal['auto'])='auto'\n ) ->ResourcesType:\n resources = super().resolve_resources(cdn=cdn)\n del_theme = ('dark' if self._design.theme._name == 'default' else\n 'light')\n del resources['css'][f'golden-theme-{del_theme}']\n return resources\n",
"step-4": "<mask token>\nfrom __future__ import annotations\nimport pathlib\nfrom typing import TYPE_CHECKING, Literal\nimport param\nfrom ...config import config\nfrom ...io.resources import JS_URLS\nfrom ..base import BasicTemplate\nif TYPE_CHECKING:\n from ...io.resources import ResourcesType\n\n\nclass GoldenTemplate(BasicTemplate):\n \"\"\"\n GoldenTemplate is built on top of golden-layout library.\n \"\"\"\n sidebar_width = param.Integer(default=20, constant=True, doc=\n \"\"\"\n The width of the sidebar in percent.\"\"\")\n _css = pathlib.Path(__file__).parent / 'golden.css'\n _template = pathlib.Path(__file__).parent / 'golden.html'\n _resources = {'css': {'goldenlayout':\n f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-base.css',\n 'golden-theme-dark':\n f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-dark-theme.css'\n , 'golden-theme-light':\n f'{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-light-theme.css'\n }, 'js': {'jquery': JS_URLS['jQuery'], 'goldenlayout':\n f'{config.npm_cdn}/golden-layout@1.5.9/dist/goldenlayout.min.js'}}\n\n def _apply_root(self, name, model, tags):\n if 'main' in tags:\n model.margin = 10, 15, 10, 10\n\n def resolve_resources(self, cdn: (bool | Literal['auto'])='auto'\n ) ->ResourcesType:\n resources = super().resolve_resources(cdn=cdn)\n del_theme = ('dark' if self._design.theme._name == 'default' else\n 'light')\n del resources['css'][f'golden-theme-{del_theme}']\n return resources\n",
"step-5": "\"\"\"\nGoldenTemplate based on the golden-layout library.\n\"\"\"\nfrom __future__ import annotations\n\nimport pathlib\n\nfrom typing import TYPE_CHECKING, Literal\n\nimport param\n\nfrom ...config import config\nfrom ...io.resources import JS_URLS\nfrom ..base import BasicTemplate\n\nif TYPE_CHECKING:\n from ...io.resources import ResourcesType\n\n\nclass GoldenTemplate(BasicTemplate):\n \"\"\"\n GoldenTemplate is built on top of golden-layout library.\n \"\"\"\n\n sidebar_width = param.Integer(default=20, constant=True, doc=\"\"\"\n The width of the sidebar in percent.\"\"\")\n\n _css = pathlib.Path(__file__).parent / 'golden.css'\n\n _template = pathlib.Path(__file__).parent / 'golden.html'\n\n _resources = {\n 'css': {\n 'goldenlayout': f\"{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-base.css\",\n 'golden-theme-dark': f\"{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-dark-theme.css\",\n 'golden-theme-light': f\"{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-light-theme.css\"\n },\n 'js': {\n 'jquery': JS_URLS['jQuery'],\n 'goldenlayout': f\"{config.npm_cdn}/golden-layout@1.5.9/dist/goldenlayout.min.js\"\n }\n }\n\n def _apply_root(self, name, model, tags):\n if 'main' in tags:\n model.margin = (10, 15, 10, 10)\n\n def resolve_resources(self, cdn: bool | Literal['auto'] = 'auto') -> ResourcesType:\n resources = super().resolve_resources(cdn=cdn)\n del_theme = 'dark' if self._design.theme._name =='default' else 'light'\n del resources['css'][f'golden-theme-{del_theme}']\n return resources\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import sys
numList = list(range(3))
for i in range(3):
numList[i] = int(sys.stdin.readline())
result = numList[0] * numList[1] * numList[2]
resultList = list(str(result))
intList = list(range(10))
for i in intList:
print(resultList.count(str(i)))
|
normal
|
{
"blob_id": "c3de6cd76ca7180a1a4d236bb2a6a18f7594f38b",
"index": 1304,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(3):\n numList[i] = int(sys.stdin.readline())\n<mask token>\nfor i in intList:\n print(resultList.count(str(i)))\n",
"step-3": "<mask token>\nnumList = list(range(3))\nfor i in range(3):\n numList[i] = int(sys.stdin.readline())\nresult = numList[0] * numList[1] * numList[2]\nresultList = list(str(result))\nintList = list(range(10))\nfor i in intList:\n print(resultList.count(str(i)))\n",
"step-4": "import sys\nnumList = list(range(3))\nfor i in range(3):\n numList[i] = int(sys.stdin.readline())\nresult = numList[0] * numList[1] * numList[2]\nresultList = list(str(result))\nintList = list(range(10))\nfor i in intList:\n print(resultList.count(str(i)))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def get_bios_boot_order(self):
result = {
}
boot_device_list = []
boot_device_details = []
key = 'Bios'
bootsources = 'BootSources'
response = self.get_request((self.root_uri + self.systems_uri))
if (response['ret'] is False):
return response
result['ret'] = True
data = response['data']
bios_uri = data[key]['@odata.id']
response = self.get_request((self.root_uri + bios_uri))
if (response['ret'] is False):
return response
data = response['data']
boot_mode = data['Attributes']['BootMode']
if (boot_mode == 'Uefi'):
boot_seq = 'UefiBootSeq'
else:
boot_seq = 'BootSeq'
response = self.get_request((((self.root_uri + self.systems_uri) + '/') + bootsources))
if (response['ret'] is False):
return response
result['ret'] = True
data = response['data']
boot_device_list = data['Attributes'][boot_seq]
for b in boot_device_list:
boot_device = {
}
boot_device['Index'] = b['Index']
boot_device['Name'] = b['Name']
boot_device['Enabled'] = b['Enabled']
boot_device_details.append(boot_device)
result['entries'] = boot_device_details
return result
|
normal
|
{
"blob_id": "bbe7df31a44ccf51c305cd620dc7c4155b7e1a97",
"index": 2668,
"step-1": "<mask token>\n",
"step-2": "def get_bios_boot_order(self):\n result = {}\n boot_device_list = []\n boot_device_details = []\n key = 'Bios'\n bootsources = 'BootSources'\n response = self.get_request(self.root_uri + self.systems_uri)\n if response['ret'] is False:\n return response\n result['ret'] = True\n data = response['data']\n bios_uri = data[key]['@odata.id']\n response = self.get_request(self.root_uri + bios_uri)\n if response['ret'] is False:\n return response\n data = response['data']\n boot_mode = data['Attributes']['BootMode']\n if boot_mode == 'Uefi':\n boot_seq = 'UefiBootSeq'\n else:\n boot_seq = 'BootSeq'\n response = self.get_request(self.root_uri + self.systems_uri + '/' +\n bootsources)\n if response['ret'] is False:\n return response\n result['ret'] = True\n data = response['data']\n boot_device_list = data['Attributes'][boot_seq]\n for b in boot_device_list:\n boot_device = {}\n boot_device['Index'] = b['Index']\n boot_device['Name'] = b['Name']\n boot_device['Enabled'] = b['Enabled']\n boot_device_details.append(boot_device)\n result['entries'] = boot_device_details\n return result\n",
"step-3": "def get_bios_boot_order(self):\n result = {\n \n }\n boot_device_list = []\n boot_device_details = []\n key = 'Bios'\n bootsources = 'BootSources'\n response = self.get_request((self.root_uri + self.systems_uri))\n if (response['ret'] is False):\n return response\n result['ret'] = True\n data = response['data']\n bios_uri = data[key]['@odata.id']\n response = self.get_request((self.root_uri + bios_uri))\n if (response['ret'] is False):\n return response\n data = response['data']\n boot_mode = data['Attributes']['BootMode']\n if (boot_mode == 'Uefi'):\n boot_seq = 'UefiBootSeq'\n else:\n boot_seq = 'BootSeq'\n response = self.get_request((((self.root_uri + self.systems_uri) + '/') + bootsources))\n if (response['ret'] is False):\n return response\n result['ret'] = True\n data = response['data']\n boot_device_list = data['Attributes'][boot_seq]\n for b in boot_device_list:\n boot_device = {\n \n }\n boot_device['Index'] = b['Index']\n boot_device['Name'] = b['Name']\n boot_device['Enabled'] = b['Enabled']\n boot_device_details.append(boot_device)\n result['entries'] = boot_device_details\n return result",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_chunk(reader, chunk_size):
data = {}
for i in range(chunk_size):
ret = reader.read_next()
for k, v in ret.items():
if k not in data:
data[k] = []
data[k].append(v)
data['input'] = np.array(data['input'])
data['masking'] = np.array(data['masking'])
data['timestamp'] = np.array(data['timestamp'])
data['label'] = np.array(data['label'])
return data
<|reserved_special_token_1|>
import numpy as np
import os
import random
import pandas as pd
def read_chunk(reader, chunk_size):
data = {}
for i in range(chunk_size):
ret = reader.read_next()
for k, v in ret.items():
if k not in data:
data[k] = []
data[k].append(v)
data['input'] = np.array(data['input'])
data['masking'] = np.array(data['masking'])
data['timestamp'] = np.array(data['timestamp'])
data['label'] = np.array(data['label'])
return data
<|reserved_special_token_1|>
#!/usr/bin/env python3
import numpy as np
import os
import random
import pandas as pd
def read_chunk(reader, chunk_size):
data = {}
for i in range(chunk_size):
ret = reader.read_next()
for k, v in ret.items():
if k not in data:
data[k] = []
data[k].append(v)
data['input'] = np.array(data['input'])
data['masking'] = np.array(data['masking'])
data['timestamp'] = np.array(data['timestamp'])
data['label'] = np.array(data['label'])
return data
|
flexible
|
{
"blob_id": "dc28c3426f47bef8b691a06d54713bc68696ee44",
"index": 8309,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_chunk(reader, chunk_size):\n data = {}\n for i in range(chunk_size):\n ret = reader.read_next()\n for k, v in ret.items():\n if k not in data:\n data[k] = []\n data[k].append(v)\n data['input'] = np.array(data['input'])\n data['masking'] = np.array(data['masking'])\n data['timestamp'] = np.array(data['timestamp'])\n data['label'] = np.array(data['label'])\n return data\n",
"step-3": "import numpy as np\nimport os\nimport random\nimport pandas as pd\n\n\ndef read_chunk(reader, chunk_size):\n data = {}\n for i in range(chunk_size):\n ret = reader.read_next()\n for k, v in ret.items():\n if k not in data:\n data[k] = []\n data[k].append(v)\n data['input'] = np.array(data['input'])\n data['masking'] = np.array(data['masking'])\n data['timestamp'] = np.array(data['timestamp'])\n data['label'] = np.array(data['label'])\n return data\n",
"step-4": "#!/usr/bin/env python3\n\nimport numpy as np\nimport os\nimport random\nimport pandas as pd\n\ndef read_chunk(reader, chunk_size):\n\n data = {}\n for i in range(chunk_size):\n ret = reader.read_next()\n for k, v in ret.items():\n if k not in data:\n data[k] = []\n data[k].append(v)\n data['input'] = np.array(data['input'])\n data['masking'] = np.array(data['masking'])\n data['timestamp'] = np.array(data['timestamp'])\n data['label'] = np.array(data['label'])\n return data\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.apps import AppConfig
class FitnerappConfig(AppConfig):
name = 'fitnerapp'
|
normal
|
{
"blob_id": "6546d04d3755d62d1a8756bdec1a10f6f018dcea",
"index": 5638,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass FitnerappConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FitnerappConfig(AppConfig):\n name = 'fitnerapp'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass FitnerappConfig(AppConfig):\n name = 'fitnerapp'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class WorkersOrchestrator:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WorkersOrchestrator:
@classmethod
def worker_func(cls, worker):
worker.start_consumption()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WorkersOrchestrator:
@classmethod
def worker_func(cls, worker):
worker.start_consumption()
def run_orchestrator(self, num_of_workers):
worker_list = []
for i in range(num_of_workers):
worker_list.append(QueueWorker())
worker_threads = list()
for worker in worker_list:
x = threading.Thread(target=self.worker_func, args=(worker,))
worker_threads.append(x)
x.start()
<|reserved_special_token_1|>
from .queue_worker import QueueWorker
import threading
class WorkersOrchestrator:
@classmethod
def worker_func(cls, worker):
worker.start_consumption()
def run_orchestrator(self, num_of_workers):
worker_list = []
for i in range(num_of_workers):
worker_list.append(QueueWorker())
worker_threads = list()
for worker in worker_list:
x = threading.Thread(target=self.worker_func, args=(worker,))
worker_threads.append(x)
x.start()
|
flexible
|
{
"blob_id": "6a4a5eac1b736ee4f8587adba298571f90df1cf9",
"index": 8864,
"step-1": "<mask token>\n\n\nclass WorkersOrchestrator:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass WorkersOrchestrator:\n\n @classmethod\n def worker_func(cls, worker):\n worker.start_consumption()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass WorkersOrchestrator:\n\n @classmethod\n def worker_func(cls, worker):\n worker.start_consumption()\n\n def run_orchestrator(self, num_of_workers):\n worker_list = []\n for i in range(num_of_workers):\n worker_list.append(QueueWorker())\n worker_threads = list()\n for worker in worker_list:\n x = threading.Thread(target=self.worker_func, args=(worker,))\n worker_threads.append(x)\n x.start()\n",
"step-4": "from .queue_worker import QueueWorker\nimport threading\n\n\nclass WorkersOrchestrator:\n\n @classmethod\n def worker_func(cls, worker):\n worker.start_consumption()\n\n def run_orchestrator(self, num_of_workers):\n worker_list = []\n for i in range(num_of_workers):\n worker_list.append(QueueWorker())\n worker_threads = list()\n for worker in worker_list:\n x = threading.Thread(target=self.worker_func, args=(worker,))\n worker_threads.append(x)\n x.start()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class PlayOut(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.wavefiles = [f for f in listdir('./en') if path.isfile(path.
join('./en', f))]
def run(self):
for wav in list(self.wavefiles):
global flgPlayOn, flgFinish, reftext, filename, flgRefReady, flgGoogle
filename = wav
print('Playing:' + filename)
flgPlayOn = True
flgGoogle = False
time.sleep(0.5)
WAV_FILE = path.join('./en', wav)
wf = wave.open(WAV_FILE, 'rb')
stream = p.open(format=p.get_format_from_width(wf.getsampwidth(
)), channels=wf.getnchannels(), rate=wf.getframerate(),
output=True)
data = wf.readframes(CHUNK_OUT)
while len(data) > 0:
stream.write(data)
data = wf.readframes(CHUNK_OUT)
wf.close()
stream.stop_stream()
stream.close()
time.sleep(1)
flgPlayOn = False
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source)
flgRefReady = False
try:
reftext = r.recognize_google(audio)
print('correct one:' + str(reftext.encode('utf-8')))
filesave.write('correct one:' + str(reftext.encode('utf-8')))
filesave.write('\r\n')
except sr.UnknownValueError:
print('Google Speech Recognition could not understand audio')
except sr.RequestError as e:
print(
'Could not request results from Google Speech Recognition service; {0}'
.format(e))
flgRefReady = True
while flgGoogle == False:
time.sleep(0.01)
flgFinish = True
p.terminate()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
global flgLoad
<|reserved_special_token_0|>
class PlayOut(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.wavefiles = [f for f in listdir('./en') if path.isfile(path.
join('./en', f))]
def run(self):
for wav in list(self.wavefiles):
global flgPlayOn, flgFinish, reftext, filename, flgRefReady, flgGoogle
filename = wav
print('Playing:' + filename)
flgPlayOn = True
flgGoogle = False
time.sleep(0.5)
WAV_FILE = path.join('./en', wav)
wf = wave.open(WAV_FILE, 'rb')
stream = p.open(format=p.get_format_from_width(wf.getsampwidth(
)), channels=wf.getnchannels(), rate=wf.getframerate(),
output=True)
data = wf.readframes(CHUNK_OUT)
while len(data) > 0:
stream.write(data)
data = wf.readframes(CHUNK_OUT)
wf.close()
stream.stop_stream()
stream.close()
time.sleep(1)
flgPlayOn = False
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source)
flgRefReady = False
try:
reftext = r.recognize_google(audio)
print('correct one:' + str(reftext.encode('utf-8')))
filesave.write('correct one:' + str(reftext.encode('utf-8')))
filesave.write('\r\n')
except sr.UnknownValueError:
print('Google Speech Recognition could not understand audio')
except sr.RequestError as e:
print(
'Could not request results from Google Speech Recognition service; {0}'
.format(e))
flgRefReady = True
while flgGoogle == False:
time.sleep(0.01)
flgFinish = True
p.terminate()
if False:
Frames_1024 = MIC_ARRAY.Read()
while BEAM.ListenBGNoise(Frames_1024) == 0:
time.sleep(0.0001)
<|reserved_special_token_0|>
thread_play.start()
while flgFinish == False:
time.sleep(0.01)
print('**** recording *******')
ind = 0
flgLoad = [True] * PAR.CNTBUF
MIC_ARRAY.ForgetOldData()
while flgPlayOn == True:
Frames_1024 = MIC_ARRAY.Read()
"""Sound Source Localization"""
idxDir = LOC.Update(Frames_1024)
Beam_Audio = BEAM.BFCalc(Frames_1024, 1, Post_Filtering=False)
Audio_Data[ind:ind + PAR.N, 0:PAR.m] = Frames_1024[:, 0:PAR.m]
Audio_SD[ind:ind + PAR.N] = Beam_Audio
ind = ind + PAR.N
print('**** done recording **')
raw_data = Audio_SD[:ind].astype(np.int16)
byte_data = raw_data.tostring()
WAVE_OUTPUT_BF_SD = filename + 'SD.wav'
wf = wave.open(WAVE_OUTPUT_BF_SD, 'wb')
wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))
wf.writeframes(byte_data)
wf.close()
for i in range(0, PAR.m):
raw_data = Audio_Data[:ind, i].astype(np.int16)
byte_data = raw_data.tostring()
WAVE_OUTPUT_FILENAME_I = filename + 'channel' + str(i) + '.wav'
Data_Audio = 'Audio_Channel' + str(i)
wf = wave.open(WAVE_OUTPUT_FILENAME_I, 'wb')
wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))
wf.writeframesraw(byte_data)
wf.close()
while flgRefReady == False:
time.sleep(0.01)
if True:
ResSum = 0
for i in range(0, 8):
file = filename + 'channel' + str(i) + '.wav'
WAV_FILE = path.join(path.dirname(path.realpath(__file__)), file)
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source)
try:
testtext = r.recognize_google(audio)
print('Google Speech Recognition for mic ' + str(i) +
'::::::::::' + str(testtext.encode('utf-8')))
filesave.write(' mic ' + str(i) + '::::::::::' + str(
testtext.encode('utf-8')))
filesave.write('\r\n')
res = wer.wer(reftext, testtext)
ResSum += 1.0 / 8.0 * res
print('Word Error Rate: {0:.04f}'.format(res))
filesave.write('Word Error Rate: {0:.04f}'.format(res))
filesave.write('\r\n')
except sr.UnknownValueError:
print('Google Speech Recognition could not understand audio')
ResSum += 1.0 / 8.0
except sr.RequestError as e:
print(
'Could not request results from Google Speech Recognition service; {0}'
.format(e))
ResSum += 1.0 / 8.0
filesave.write('Word Error Rate Everage: {0:.04f}'.format(ResSum))
filesave.write('\r\n')
WAV_FILE = path.join(path.dirname(path.realpath(__file__)),
filename + 'SD.wav')
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source)
try:
testtext = r.recognize_google(audio)
print('Beam-forming result :::::::::::::::::::::::::' + str(
testtext.encode('utf-8')))
filesave.write('Beam-forming result :::::::::::::::::::::::::' +
str(testtext.encode('utf-8')))
filesave.write('\r\n')
res = wer.wer(reftext, testtext)
print('Word Error Rate: {0:.04f}'.format(res))
filesave.write('Word Error Rate: {0:.04f}'.format(res))
filesave.write('\r\n')
except sr.UnknownValueError:
print('Google Speech Recognition could not understand audio')
except sr.RequestError as e:
print(
'Could not request results from Google Speech Recognition service; {0}'
.format(e))
flgGoogle = True
time.sleep(0.03)
LOC.Stop()
MIC_ARRAY.Stop_Read()
filesave.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
global flgLoad
flgGoogle = False
flgRefReady = False
flgPlayOn = False
flgFinish = False
CHUNK_OUT = 1024
reftext = None
filename = None
CHANNELS = 2
CHUNK = 1024 * 4
RATE = 64000
RECORD_SECONDS = 15
idxDir = 6
Audio_Data = np.zeros((np.floor(RECORD_SECONDS * RATE / 4), PAR.m))
Audio_SD = np.zeros(np.floor(RECORD_SECONDS * RATE / 4))
ind = 0
numCHUNK = np.floor(RATE * RECORD_SECONDS / CHUNK)
filesave = open('log.txt', 'w')
p = pyaudio.PyAudio()
r = sr.Recognizer()
MIC_ARRAY = READ.Mic_Array_Read()
LOC = DOA.DOA_MicArray()
BEAM = BF.BeamFormingObj(Weight_Update=False)
class PlayOut(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.wavefiles = [f for f in listdir('./en') if path.isfile(path.
join('./en', f))]
def run(self):
for wav in list(self.wavefiles):
global flgPlayOn, flgFinish, reftext, filename, flgRefReady, flgGoogle
filename = wav
print('Playing:' + filename)
flgPlayOn = True
flgGoogle = False
time.sleep(0.5)
WAV_FILE = path.join('./en', wav)
wf = wave.open(WAV_FILE, 'rb')
stream = p.open(format=p.get_format_from_width(wf.getsampwidth(
)), channels=wf.getnchannels(), rate=wf.getframerate(),
output=True)
data = wf.readframes(CHUNK_OUT)
while len(data) > 0:
stream.write(data)
data = wf.readframes(CHUNK_OUT)
wf.close()
stream.stop_stream()
stream.close()
time.sleep(1)
flgPlayOn = False
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source)
flgRefReady = False
try:
reftext = r.recognize_google(audio)
print('correct one:' + str(reftext.encode('utf-8')))
filesave.write('correct one:' + str(reftext.encode('utf-8')))
filesave.write('\r\n')
except sr.UnknownValueError:
print('Google Speech Recognition could not understand audio')
except sr.RequestError as e:
print(
'Could not request results from Google Speech Recognition service; {0}'
.format(e))
flgRefReady = True
while flgGoogle == False:
time.sleep(0.01)
flgFinish = True
p.terminate()
if False:
Frames_1024 = MIC_ARRAY.Read()
while BEAM.ListenBGNoise(Frames_1024) == 0:
time.sleep(0.0001)
threadLock = threading.Lock()
thread_play = PlayOut()
thread_play.start()
while flgFinish == False:
time.sleep(0.01)
print('**** recording *******')
ind = 0
flgLoad = [True] * PAR.CNTBUF
MIC_ARRAY.ForgetOldData()
while flgPlayOn == True:
Frames_1024 = MIC_ARRAY.Read()
"""Sound Source Localization"""
idxDir = LOC.Update(Frames_1024)
Beam_Audio = BEAM.BFCalc(Frames_1024, 1, Post_Filtering=False)
Audio_Data[ind:ind + PAR.N, 0:PAR.m] = Frames_1024[:, 0:PAR.m]
Audio_SD[ind:ind + PAR.N] = Beam_Audio
ind = ind + PAR.N
print('**** done recording **')
raw_data = Audio_SD[:ind].astype(np.int16)
byte_data = raw_data.tostring()
WAVE_OUTPUT_BF_SD = filename + 'SD.wav'
wf = wave.open(WAVE_OUTPUT_BF_SD, 'wb')
wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))
wf.writeframes(byte_data)
wf.close()
for i in range(0, PAR.m):
raw_data = Audio_Data[:ind, i].astype(np.int16)
byte_data = raw_data.tostring()
WAVE_OUTPUT_FILENAME_I = filename + 'channel' + str(i) + '.wav'
Data_Audio = 'Audio_Channel' + str(i)
wf = wave.open(WAVE_OUTPUT_FILENAME_I, 'wb')
wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))
wf.writeframesraw(byte_data)
wf.close()
while flgRefReady == False:
time.sleep(0.01)
if True:
ResSum = 0
for i in range(0, 8):
file = filename + 'channel' + str(i) + '.wav'
WAV_FILE = path.join(path.dirname(path.realpath(__file__)), file)
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source)
try:
testtext = r.recognize_google(audio)
print('Google Speech Recognition for mic ' + str(i) +
'::::::::::' + str(testtext.encode('utf-8')))
filesave.write(' mic ' + str(i) + '::::::::::' + str(
testtext.encode('utf-8')))
filesave.write('\r\n')
res = wer.wer(reftext, testtext)
ResSum += 1.0 / 8.0 * res
print('Word Error Rate: {0:.04f}'.format(res))
filesave.write('Word Error Rate: {0:.04f}'.format(res))
filesave.write('\r\n')
except sr.UnknownValueError:
print('Google Speech Recognition could not understand audio')
ResSum += 1.0 / 8.0
except sr.RequestError as e:
print(
'Could not request results from Google Speech Recognition service; {0}'
.format(e))
ResSum += 1.0 / 8.0
filesave.write('Word Error Rate Everage: {0:.04f}'.format(ResSum))
filesave.write('\r\n')
WAV_FILE = path.join(path.dirname(path.realpath(__file__)),
filename + 'SD.wav')
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source)
try:
testtext = r.recognize_google(audio)
print('Beam-forming result :::::::::::::::::::::::::' + str(
testtext.encode('utf-8')))
filesave.write('Beam-forming result :::::::::::::::::::::::::' +
str(testtext.encode('utf-8')))
filesave.write('\r\n')
res = wer.wer(reftext, testtext)
print('Word Error Rate: {0:.04f}'.format(res))
filesave.write('Word Error Rate: {0:.04f}'.format(res))
filesave.write('\r\n')
except sr.UnknownValueError:
print('Google Speech Recognition could not understand audio')
except sr.RequestError as e:
print(
'Could not request results from Google Speech Recognition service; {0}'
.format(e))
flgGoogle = True
time.sleep(0.03)
LOC.Stop()
MIC_ARRAY.Stop_Read()
filesave.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import speech_recognition as sr
import pyaudio
from os import listdir
from os import path
import time
import wave
import threading
import numpy as np
import BF.BeamForming as BF
import BF.Parameter as PAR
import BF.asr_wer as wer
import BF.mic_array_read as READ
import BF.DOA as DOA
global flgLoad
flgGoogle = False
flgRefReady = False
flgPlayOn = False
flgFinish = False
CHUNK_OUT = 1024
reftext = None
filename = None
CHANNELS = 2
CHUNK = 1024 * 4
RATE = 64000
RECORD_SECONDS = 15
idxDir = 6
Audio_Data = np.zeros((np.floor(RECORD_SECONDS * RATE / 4), PAR.m))
Audio_SD = np.zeros(np.floor(RECORD_SECONDS * RATE / 4))
ind = 0
numCHUNK = np.floor(RATE * RECORD_SECONDS / CHUNK)
filesave = open('log.txt', 'w')
p = pyaudio.PyAudio()
r = sr.Recognizer()
MIC_ARRAY = READ.Mic_Array_Read()
LOC = DOA.DOA_MicArray()
BEAM = BF.BeamFormingObj(Weight_Update=False)
class PlayOut(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.wavefiles = [f for f in listdir('./en') if path.isfile(path.
join('./en', f))]
def run(self):
for wav in list(self.wavefiles):
global flgPlayOn, flgFinish, reftext, filename, flgRefReady, flgGoogle
filename = wav
print('Playing:' + filename)
flgPlayOn = True
flgGoogle = False
time.sleep(0.5)
WAV_FILE = path.join('./en', wav)
wf = wave.open(WAV_FILE, 'rb')
stream = p.open(format=p.get_format_from_width(wf.getsampwidth(
)), channels=wf.getnchannels(), rate=wf.getframerate(),
output=True)
data = wf.readframes(CHUNK_OUT)
while len(data) > 0:
stream.write(data)
data = wf.readframes(CHUNK_OUT)
wf.close()
stream.stop_stream()
stream.close()
time.sleep(1)
flgPlayOn = False
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source)
flgRefReady = False
try:
reftext = r.recognize_google(audio)
print('correct one:' + str(reftext.encode('utf-8')))
filesave.write('correct one:' + str(reftext.encode('utf-8')))
filesave.write('\r\n')
except sr.UnknownValueError:
print('Google Speech Recognition could not understand audio')
except sr.RequestError as e:
print(
'Could not request results from Google Speech Recognition service; {0}'
.format(e))
flgRefReady = True
while flgGoogle == False:
time.sleep(0.01)
flgFinish = True
p.terminate()
if False:
Frames_1024 = MIC_ARRAY.Read()
while BEAM.ListenBGNoise(Frames_1024) == 0:
time.sleep(0.0001)
threadLock = threading.Lock()
thread_play = PlayOut()
thread_play.start()
while flgFinish == False:
time.sleep(0.01)
print('**** recording *******')
ind = 0
flgLoad = [True] * PAR.CNTBUF
MIC_ARRAY.ForgetOldData()
while flgPlayOn == True:
Frames_1024 = MIC_ARRAY.Read()
"""Sound Source Localization"""
idxDir = LOC.Update(Frames_1024)
Beam_Audio = BEAM.BFCalc(Frames_1024, 1, Post_Filtering=False)
Audio_Data[ind:ind + PAR.N, 0:PAR.m] = Frames_1024[:, 0:PAR.m]
Audio_SD[ind:ind + PAR.N] = Beam_Audio
ind = ind + PAR.N
print('**** done recording **')
raw_data = Audio_SD[:ind].astype(np.int16)
byte_data = raw_data.tostring()
WAVE_OUTPUT_BF_SD = filename + 'SD.wav'
wf = wave.open(WAVE_OUTPUT_BF_SD, 'wb')
wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))
wf.writeframes(byte_data)
wf.close()
for i in range(0, PAR.m):
raw_data = Audio_Data[:ind, i].astype(np.int16)
byte_data = raw_data.tostring()
WAVE_OUTPUT_FILENAME_I = filename + 'channel' + str(i) + '.wav'
Data_Audio = 'Audio_Channel' + str(i)
wf = wave.open(WAVE_OUTPUT_FILENAME_I, 'wb')
wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))
wf.writeframesraw(byte_data)
wf.close()
while flgRefReady == False:
time.sleep(0.01)
if True:
ResSum = 0
for i in range(0, 8):
file = filename + 'channel' + str(i) + '.wav'
WAV_FILE = path.join(path.dirname(path.realpath(__file__)), file)
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source)
try:
testtext = r.recognize_google(audio)
print('Google Speech Recognition for mic ' + str(i) +
'::::::::::' + str(testtext.encode('utf-8')))
filesave.write(' mic ' + str(i) + '::::::::::' + str(
testtext.encode('utf-8')))
filesave.write('\r\n')
res = wer.wer(reftext, testtext)
ResSum += 1.0 / 8.0 * res
print('Word Error Rate: {0:.04f}'.format(res))
filesave.write('Word Error Rate: {0:.04f}'.format(res))
filesave.write('\r\n')
except sr.UnknownValueError:
print('Google Speech Recognition could not understand audio')
ResSum += 1.0 / 8.0
except sr.RequestError as e:
print(
'Could not request results from Google Speech Recognition service; {0}'
.format(e))
ResSum += 1.0 / 8.0
filesave.write('Word Error Rate Everage: {0:.04f}'.format(ResSum))
filesave.write('\r\n')
WAV_FILE = path.join(path.dirname(path.realpath(__file__)),
filename + 'SD.wav')
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source)
try:
testtext = r.recognize_google(audio)
print('Beam-forming result :::::::::::::::::::::::::' + str(
testtext.encode('utf-8')))
filesave.write('Beam-forming result :::::::::::::::::::::::::' +
str(testtext.encode('utf-8')))
filesave.write('\r\n')
res = wer.wer(reftext, testtext)
print('Word Error Rate: {0:.04f}'.format(res))
filesave.write('Word Error Rate: {0:.04f}'.format(res))
filesave.write('\r\n')
except sr.UnknownValueError:
print('Google Speech Recognition could not understand audio')
except sr.RequestError as e:
print(
'Could not request results from Google Speech Recognition service; {0}'
.format(e))
flgGoogle = True
time.sleep(0.03)
LOC.Stop()
MIC_ARRAY.Stop_Read()
filesave.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
'''
www.autonomous.ai
Phan Le Son
plson03@gmail.com
'''
import speech_recognition as sr
import pyaudio
from os import listdir
from os import path
import time
import wave
import threading
import numpy as np
import BF.BeamForming as BF
import BF.Parameter as PAR
import BF.asr_wer as wer
import BF.mic_array_read as READ
import BF.DOA as DOA
global flgLoad
flgGoogle = False
flgRefReady = False
flgPlayOn = False
flgFinish = False
CHUNK_OUT = 1024
reftext = None
filename = None
CHANNELS = 2
CHUNK = 1024 * 4 # PAR.m*PAR.N/CHANNELS # 1024*4
RATE = 64000 # sample rate
RECORD_SECONDS = 15
idxDir = 6
Audio_Data = np.zeros((np.floor(RECORD_SECONDS * RATE / 4), PAR.m))
Audio_SD = np.zeros(np.floor(RECORD_SECONDS * RATE / 4))
ind = 0
numCHUNK = np.floor(RATE * RECORD_SECONDS / CHUNK)
filesave = open("log.txt",'w')
p = pyaudio.PyAudio()
r = sr.Recognizer()
MIC_ARRAY = READ.Mic_Array_Read()
LOC = DOA.DOA_MicArray()
BEAM = BF.BeamFormingObj(Weight_Update=False)
class PlayOut(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.wavefiles = [f for f in listdir('./en') if path.isfile(path.join('./en', f))]
def run(self):
for wav in list(self.wavefiles):
global flgPlayOn, flgFinish, reftext, filename, flgRefReady, flgGoogle
filename = wav
print("Playing:" + filename)
flgPlayOn = True
flgGoogle = False
time.sleep(0.5)
WAV_FILE = path.join("./en", wav)
wf = wave.open(WAV_FILE, 'rb')
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# read data
data = wf.readframes(CHUNK_OUT)
while len(data) > 0:
stream.write(data)
data = wf.readframes(CHUNK_OUT)
wf.close()
# stop stream
stream.stop_stream()
stream.close()
time.sleep(1)
flgPlayOn = False
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source) # read the entire WAV file
flgRefReady = False
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
reftext = r.recognize_google(audio)
print("correct one:" + str(reftext.encode('utf-8')))
filesave.write("correct one:"+ str(reftext.encode('utf-8')))
filesave.write('\r\n')
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
flgRefReady = True
while (flgGoogle == False):
time.sleep(0.01)
flgFinish = True
p.terminate()
if False:
Frames_1024 = MIC_ARRAY.Read()
while (BEAM.ListenBGNoise(Frames_1024)==0):
time.sleep(0.0001)
threadLock = threading.Lock()
thread_play = PlayOut()
thread_play.start()
while (flgFinish == False):
time.sleep(0.01)
print("**** recording *******")
ind = 0
flgLoad = [True]*PAR.CNTBUF
MIC_ARRAY.ForgetOldData()
while (flgPlayOn == True):
Frames_1024 = MIC_ARRAY.Read()
'''Sound Source Localization'''
idxDir = LOC.Update(Frames_1024)
Beam_Audio = BEAM.BFCalc(Frames_1024, 1,Post_Filtering=False)
# Storage audio output
Audio_Data[ind:ind + PAR.N, 0:PAR.m] = Frames_1024[:, 0:PAR.m]
Audio_SD[ind:ind + PAR.N] = Beam_Audio
ind = ind + PAR.N
print("**** done recording **")
raw_data = Audio_SD[:ind].astype(np.int16)
byte_data = raw_data.tostring()
WAVE_OUTPUT_BF_SD = filename + "SD.wav"
wf = wave.open(WAVE_OUTPUT_BF_SD, 'wb')
wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))
wf.writeframes(byte_data)
wf.close()
for i in range(0, PAR.m):
raw_data = Audio_Data[:ind, i].astype(np.int16)
byte_data = raw_data.tostring()
WAVE_OUTPUT_FILENAME_I = filename + "channel" + str(i) + ".wav"
Data_Audio = "Audio_Channel" + str(i)
wf = wave.open(WAVE_OUTPUT_FILENAME_I, 'wb')
wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE')) # (nchannels, sampwidth, framerate, nframes, comptype, compname
wf.writeframesraw(byte_data)
wf.close()
while (flgRefReady == False):
time.sleep(0.01)
if True:
ResSum=0
for i in range(0, 8):
file = filename + "channel" + str(i) + ".wav"
WAV_FILE = path.join(path.dirname(path.realpath(__file__)), file)
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source) # read the entire WAV file
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
testtext = r.recognize_google(audio)
print("Google Speech Recognition for mic " + str(i) + "::::::::::" + str(testtext.encode('utf-8')))
filesave.write(" mic " + str(i) + "::::::::::" + str(testtext.encode('utf-8')))
filesave.write('\r\n')
res = wer.wer(reftext, testtext)
ResSum+= (1.0/8.0)*res
print('Word Error Rate: {0:.04f}'.format(res))
filesave.write('Word Error Rate: {0:.04f}'.format(res))
filesave.write('\r\n')
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
ResSum+= (1.0/8.0)
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
ResSum+= (1.0/8.0)
filesave.write('Word Error Rate Everage: {0:.04f}'.format(ResSum))
filesave.write('\r\n')
WAV_FILE = path.join(path.dirname(path.realpath(__file__)), filename + "SD.wav")
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source) # read the entire WAV file
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
testtext = r.recognize_google(audio)
print("Beam-forming result :::::::::::::::::::::::::" + str(testtext.encode('utf-8')))
filesave.write("Beam-forming result :::::::::::::::::::::::::" + str(testtext.encode('utf-8')))
filesave.write('\r\n')
res = wer.wer(reftext, testtext)
print('Word Error Rate: {0:.04f}'.format(res))
filesave.write('Word Error Rate: {0:.04f}'.format(res))
filesave.write('\r\n')
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
flgGoogle = True
time.sleep(0.03)
LOC.Stop()
MIC_ARRAY.Stop_Read()
filesave.close()
'''
# recognize speech using Sphinx
try:
print("Sphinx thinks you said " + r.recognize_sphinx(audio))
except sr.UnknownValueError:
print("Sphinx could not understand audio")
except sr.RequestError as e:
print("Sphinx error; {0}".format(e))
'''
|
flexible
|
{
"blob_id": "8c458d66ab2f9a1bf1923eecb29c3c89f2808d0b",
"index": 3889,
"step-1": "<mask token>\n\n\nclass PlayOut(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n self.wavefiles = [f for f in listdir('./en') if path.isfile(path.\n join('./en', f))]\n\n def run(self):\n for wav in list(self.wavefiles):\n global flgPlayOn, flgFinish, reftext, filename, flgRefReady, flgGoogle\n filename = wav\n print('Playing:' + filename)\n flgPlayOn = True\n flgGoogle = False\n time.sleep(0.5)\n WAV_FILE = path.join('./en', wav)\n wf = wave.open(WAV_FILE, 'rb')\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth(\n )), channels=wf.getnchannels(), rate=wf.getframerate(),\n output=True)\n data = wf.readframes(CHUNK_OUT)\n while len(data) > 0:\n stream.write(data)\n data = wf.readframes(CHUNK_OUT)\n wf.close()\n stream.stop_stream()\n stream.close()\n time.sleep(1)\n flgPlayOn = False\n with sr.WavFile(WAV_FILE) as source:\n audio = r.record(source)\n flgRefReady = False\n try:\n reftext = r.recognize_google(audio)\n print('correct one:' + str(reftext.encode('utf-8')))\n filesave.write('correct one:' + str(reftext.encode('utf-8')))\n filesave.write('\\r\\n')\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print(\n 'Could not request results from Google Speech Recognition service; {0}'\n .format(e))\n flgRefReady = True\n while flgGoogle == False:\n time.sleep(0.01)\n flgFinish = True\n p.terminate()\n\n\n<mask token>\n",
"step-2": "<mask token>\nglobal flgLoad\n<mask token>\n\n\nclass PlayOut(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n self.wavefiles = [f for f in listdir('./en') if path.isfile(path.\n join('./en', f))]\n\n def run(self):\n for wav in list(self.wavefiles):\n global flgPlayOn, flgFinish, reftext, filename, flgRefReady, flgGoogle\n filename = wav\n print('Playing:' + filename)\n flgPlayOn = True\n flgGoogle = False\n time.sleep(0.5)\n WAV_FILE = path.join('./en', wav)\n wf = wave.open(WAV_FILE, 'rb')\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth(\n )), channels=wf.getnchannels(), rate=wf.getframerate(),\n output=True)\n data = wf.readframes(CHUNK_OUT)\n while len(data) > 0:\n stream.write(data)\n data = wf.readframes(CHUNK_OUT)\n wf.close()\n stream.stop_stream()\n stream.close()\n time.sleep(1)\n flgPlayOn = False\n with sr.WavFile(WAV_FILE) as source:\n audio = r.record(source)\n flgRefReady = False\n try:\n reftext = r.recognize_google(audio)\n print('correct one:' + str(reftext.encode('utf-8')))\n filesave.write('correct one:' + str(reftext.encode('utf-8')))\n filesave.write('\\r\\n')\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print(\n 'Could not request results from Google Speech Recognition service; {0}'\n .format(e))\n flgRefReady = True\n while flgGoogle == False:\n time.sleep(0.01)\n flgFinish = True\n p.terminate()\n\n\nif False:\n Frames_1024 = MIC_ARRAY.Read()\n while BEAM.ListenBGNoise(Frames_1024) == 0:\n time.sleep(0.0001)\n<mask token>\nthread_play.start()\nwhile flgFinish == False:\n time.sleep(0.01)\n print('**** recording *******')\n ind = 0\n flgLoad = [True] * PAR.CNTBUF\n MIC_ARRAY.ForgetOldData()\n while flgPlayOn == True:\n Frames_1024 = MIC_ARRAY.Read()\n \"\"\"Sound Source Localization\"\"\"\n idxDir = LOC.Update(Frames_1024)\n Beam_Audio = BEAM.BFCalc(Frames_1024, 1, Post_Filtering=False)\n Audio_Data[ind:ind + PAR.N, 0:PAR.m] = Frames_1024[:, 0:PAR.m]\n Audio_SD[ind:ind + PAR.N] = Beam_Audio\n ind = ind + PAR.N\n print('**** done recording **')\n raw_data = Audio_SD[:ind].astype(np.int16)\n byte_data = raw_data.tostring()\n WAVE_OUTPUT_BF_SD = filename + 'SD.wav'\n wf = wave.open(WAVE_OUTPUT_BF_SD, 'wb')\n wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))\n wf.writeframes(byte_data)\n wf.close()\n for i in range(0, PAR.m):\n raw_data = Audio_Data[:ind, i].astype(np.int16)\n byte_data = raw_data.tostring()\n WAVE_OUTPUT_FILENAME_I = filename + 'channel' + str(i) + '.wav'\n Data_Audio = 'Audio_Channel' + str(i)\n wf = wave.open(WAVE_OUTPUT_FILENAME_I, 'wb')\n wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))\n wf.writeframesraw(byte_data)\n wf.close()\n while flgRefReady == False:\n time.sleep(0.01)\n if True:\n ResSum = 0\n for i in range(0, 8):\n file = filename + 'channel' + str(i) + '.wav'\n WAV_FILE = path.join(path.dirname(path.realpath(__file__)), file)\n with sr.WavFile(WAV_FILE) as source:\n audio = r.record(source)\n try:\n testtext = r.recognize_google(audio)\n print('Google Speech Recognition for mic ' + str(i) +\n '::::::::::' + str(testtext.encode('utf-8')))\n filesave.write(' mic ' + str(i) + '::::::::::' + str(\n testtext.encode('utf-8')))\n filesave.write('\\r\\n')\n res = wer.wer(reftext, testtext)\n ResSum += 1.0 / 8.0 * res\n print('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('\\r\\n')\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n ResSum += 1.0 / 8.0\n except sr.RequestError as e:\n print(\n 'Could not request results from Google Speech Recognition service; {0}'\n .format(e))\n ResSum += 1.0 / 8.0\n filesave.write('Word Error Rate Everage: {0:.04f}'.format(ResSum))\n filesave.write('\\r\\n')\n WAV_FILE = path.join(path.dirname(path.realpath(__file__)), \n filename + 'SD.wav')\n with sr.WavFile(WAV_FILE) as source:\n audio = r.record(source)\n try:\n testtext = r.recognize_google(audio)\n print('Beam-forming result :::::::::::::::::::::::::' + str(\n testtext.encode('utf-8')))\n filesave.write('Beam-forming result :::::::::::::::::::::::::' +\n str(testtext.encode('utf-8')))\n filesave.write('\\r\\n')\n res = wer.wer(reftext, testtext)\n print('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('\\r\\n')\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print(\n 'Could not request results from Google Speech Recognition service; {0}'\n .format(e))\n flgGoogle = True\n time.sleep(0.03)\nLOC.Stop()\nMIC_ARRAY.Stop_Read()\nfilesave.close()\n<mask token>\n",
"step-3": "<mask token>\nglobal flgLoad\nflgGoogle = False\nflgRefReady = False\nflgPlayOn = False\nflgFinish = False\nCHUNK_OUT = 1024\nreftext = None\nfilename = None\nCHANNELS = 2\nCHUNK = 1024 * 4\nRATE = 64000\nRECORD_SECONDS = 15\nidxDir = 6\nAudio_Data = np.zeros((np.floor(RECORD_SECONDS * RATE / 4), PAR.m))\nAudio_SD = np.zeros(np.floor(RECORD_SECONDS * RATE / 4))\nind = 0\nnumCHUNK = np.floor(RATE * RECORD_SECONDS / CHUNK)\nfilesave = open('log.txt', 'w')\np = pyaudio.PyAudio()\nr = sr.Recognizer()\nMIC_ARRAY = READ.Mic_Array_Read()\nLOC = DOA.DOA_MicArray()\nBEAM = BF.BeamFormingObj(Weight_Update=False)\n\n\nclass PlayOut(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n self.wavefiles = [f for f in listdir('./en') if path.isfile(path.\n join('./en', f))]\n\n def run(self):\n for wav in list(self.wavefiles):\n global flgPlayOn, flgFinish, reftext, filename, flgRefReady, flgGoogle\n filename = wav\n print('Playing:' + filename)\n flgPlayOn = True\n flgGoogle = False\n time.sleep(0.5)\n WAV_FILE = path.join('./en', wav)\n wf = wave.open(WAV_FILE, 'rb')\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth(\n )), channels=wf.getnchannels(), rate=wf.getframerate(),\n output=True)\n data = wf.readframes(CHUNK_OUT)\n while len(data) > 0:\n stream.write(data)\n data = wf.readframes(CHUNK_OUT)\n wf.close()\n stream.stop_stream()\n stream.close()\n time.sleep(1)\n flgPlayOn = False\n with sr.WavFile(WAV_FILE) as source:\n audio = r.record(source)\n flgRefReady = False\n try:\n reftext = r.recognize_google(audio)\n print('correct one:' + str(reftext.encode('utf-8')))\n filesave.write('correct one:' + str(reftext.encode('utf-8')))\n filesave.write('\\r\\n')\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print(\n 'Could not request results from Google Speech Recognition service; {0}'\n .format(e))\n flgRefReady = True\n while flgGoogle == False:\n time.sleep(0.01)\n flgFinish = True\n p.terminate()\n\n\nif False:\n Frames_1024 = MIC_ARRAY.Read()\n while BEAM.ListenBGNoise(Frames_1024) == 0:\n time.sleep(0.0001)\nthreadLock = threading.Lock()\nthread_play = PlayOut()\nthread_play.start()\nwhile flgFinish == False:\n time.sleep(0.01)\n print('**** recording *******')\n ind = 0\n flgLoad = [True] * PAR.CNTBUF\n MIC_ARRAY.ForgetOldData()\n while flgPlayOn == True:\n Frames_1024 = MIC_ARRAY.Read()\n \"\"\"Sound Source Localization\"\"\"\n idxDir = LOC.Update(Frames_1024)\n Beam_Audio = BEAM.BFCalc(Frames_1024, 1, Post_Filtering=False)\n Audio_Data[ind:ind + PAR.N, 0:PAR.m] = Frames_1024[:, 0:PAR.m]\n Audio_SD[ind:ind + PAR.N] = Beam_Audio\n ind = ind + PAR.N\n print('**** done recording **')\n raw_data = Audio_SD[:ind].astype(np.int16)\n byte_data = raw_data.tostring()\n WAVE_OUTPUT_BF_SD = filename + 'SD.wav'\n wf = wave.open(WAVE_OUTPUT_BF_SD, 'wb')\n wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))\n wf.writeframes(byte_data)\n wf.close()\n for i in range(0, PAR.m):\n raw_data = Audio_Data[:ind, i].astype(np.int16)\n byte_data = raw_data.tostring()\n WAVE_OUTPUT_FILENAME_I = filename + 'channel' + str(i) + '.wav'\n Data_Audio = 'Audio_Channel' + str(i)\n wf = wave.open(WAVE_OUTPUT_FILENAME_I, 'wb')\n wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))\n wf.writeframesraw(byte_data)\n wf.close()\n while flgRefReady == False:\n time.sleep(0.01)\n if True:\n ResSum = 0\n for i in range(0, 8):\n file = filename + 'channel' + str(i) + '.wav'\n WAV_FILE = path.join(path.dirname(path.realpath(__file__)), file)\n with sr.WavFile(WAV_FILE) as source:\n audio = r.record(source)\n try:\n testtext = r.recognize_google(audio)\n print('Google Speech Recognition for mic ' + str(i) +\n '::::::::::' + str(testtext.encode('utf-8')))\n filesave.write(' mic ' + str(i) + '::::::::::' + str(\n testtext.encode('utf-8')))\n filesave.write('\\r\\n')\n res = wer.wer(reftext, testtext)\n ResSum += 1.0 / 8.0 * res\n print('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('\\r\\n')\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n ResSum += 1.0 / 8.0\n except sr.RequestError as e:\n print(\n 'Could not request results from Google Speech Recognition service; {0}'\n .format(e))\n ResSum += 1.0 / 8.0\n filesave.write('Word Error Rate Everage: {0:.04f}'.format(ResSum))\n filesave.write('\\r\\n')\n WAV_FILE = path.join(path.dirname(path.realpath(__file__)), \n filename + 'SD.wav')\n with sr.WavFile(WAV_FILE) as source:\n audio = r.record(source)\n try:\n testtext = r.recognize_google(audio)\n print('Beam-forming result :::::::::::::::::::::::::' + str(\n testtext.encode('utf-8')))\n filesave.write('Beam-forming result :::::::::::::::::::::::::' +\n str(testtext.encode('utf-8')))\n filesave.write('\\r\\n')\n res = wer.wer(reftext, testtext)\n print('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('\\r\\n')\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print(\n 'Could not request results from Google Speech Recognition service; {0}'\n .format(e))\n flgGoogle = True\n time.sleep(0.03)\nLOC.Stop()\nMIC_ARRAY.Stop_Read()\nfilesave.close()\n<mask token>\n",
"step-4": "<mask token>\nimport speech_recognition as sr\nimport pyaudio\nfrom os import listdir\nfrom os import path\nimport time\nimport wave\nimport threading\nimport numpy as np\nimport BF.BeamForming as BF\nimport BF.Parameter as PAR\nimport BF.asr_wer as wer\nimport BF.mic_array_read as READ\nimport BF.DOA as DOA\nglobal flgLoad\nflgGoogle = False\nflgRefReady = False\nflgPlayOn = False\nflgFinish = False\nCHUNK_OUT = 1024\nreftext = None\nfilename = None\nCHANNELS = 2\nCHUNK = 1024 * 4\nRATE = 64000\nRECORD_SECONDS = 15\nidxDir = 6\nAudio_Data = np.zeros((np.floor(RECORD_SECONDS * RATE / 4), PAR.m))\nAudio_SD = np.zeros(np.floor(RECORD_SECONDS * RATE / 4))\nind = 0\nnumCHUNK = np.floor(RATE * RECORD_SECONDS / CHUNK)\nfilesave = open('log.txt', 'w')\np = pyaudio.PyAudio()\nr = sr.Recognizer()\nMIC_ARRAY = READ.Mic_Array_Read()\nLOC = DOA.DOA_MicArray()\nBEAM = BF.BeamFormingObj(Weight_Update=False)\n\n\nclass PlayOut(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n self.wavefiles = [f for f in listdir('./en') if path.isfile(path.\n join('./en', f))]\n\n def run(self):\n for wav in list(self.wavefiles):\n global flgPlayOn, flgFinish, reftext, filename, flgRefReady, flgGoogle\n filename = wav\n print('Playing:' + filename)\n flgPlayOn = True\n flgGoogle = False\n time.sleep(0.5)\n WAV_FILE = path.join('./en', wav)\n wf = wave.open(WAV_FILE, 'rb')\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth(\n )), channels=wf.getnchannels(), rate=wf.getframerate(),\n output=True)\n data = wf.readframes(CHUNK_OUT)\n while len(data) > 0:\n stream.write(data)\n data = wf.readframes(CHUNK_OUT)\n wf.close()\n stream.stop_stream()\n stream.close()\n time.sleep(1)\n flgPlayOn = False\n with sr.WavFile(WAV_FILE) as source:\n audio = r.record(source)\n flgRefReady = False\n try:\n reftext = r.recognize_google(audio)\n print('correct one:' + str(reftext.encode('utf-8')))\n filesave.write('correct one:' + str(reftext.encode('utf-8')))\n filesave.write('\\r\\n')\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print(\n 'Could not request results from Google Speech Recognition service; {0}'\n .format(e))\n flgRefReady = True\n while flgGoogle == False:\n time.sleep(0.01)\n flgFinish = True\n p.terminate()\n\n\nif False:\n Frames_1024 = MIC_ARRAY.Read()\n while BEAM.ListenBGNoise(Frames_1024) == 0:\n time.sleep(0.0001)\nthreadLock = threading.Lock()\nthread_play = PlayOut()\nthread_play.start()\nwhile flgFinish == False:\n time.sleep(0.01)\n print('**** recording *******')\n ind = 0\n flgLoad = [True] * PAR.CNTBUF\n MIC_ARRAY.ForgetOldData()\n while flgPlayOn == True:\n Frames_1024 = MIC_ARRAY.Read()\n \"\"\"Sound Source Localization\"\"\"\n idxDir = LOC.Update(Frames_1024)\n Beam_Audio = BEAM.BFCalc(Frames_1024, 1, Post_Filtering=False)\n Audio_Data[ind:ind + PAR.N, 0:PAR.m] = Frames_1024[:, 0:PAR.m]\n Audio_SD[ind:ind + PAR.N] = Beam_Audio\n ind = ind + PAR.N\n print('**** done recording **')\n raw_data = Audio_SD[:ind].astype(np.int16)\n byte_data = raw_data.tostring()\n WAVE_OUTPUT_BF_SD = filename + 'SD.wav'\n wf = wave.open(WAVE_OUTPUT_BF_SD, 'wb')\n wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))\n wf.writeframes(byte_data)\n wf.close()\n for i in range(0, PAR.m):\n raw_data = Audio_Data[:ind, i].astype(np.int16)\n byte_data = raw_data.tostring()\n WAVE_OUTPUT_FILENAME_I = filename + 'channel' + str(i) + '.wav'\n Data_Audio = 'Audio_Channel' + str(i)\n wf = wave.open(WAVE_OUTPUT_FILENAME_I, 'wb')\n wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))\n wf.writeframesraw(byte_data)\n wf.close()\n while flgRefReady == False:\n time.sleep(0.01)\n if True:\n ResSum = 0\n for i in range(0, 8):\n file = filename + 'channel' + str(i) + '.wav'\n WAV_FILE = path.join(path.dirname(path.realpath(__file__)), file)\n with sr.WavFile(WAV_FILE) as source:\n audio = r.record(source)\n try:\n testtext = r.recognize_google(audio)\n print('Google Speech Recognition for mic ' + str(i) +\n '::::::::::' + str(testtext.encode('utf-8')))\n filesave.write(' mic ' + str(i) + '::::::::::' + str(\n testtext.encode('utf-8')))\n filesave.write('\\r\\n')\n res = wer.wer(reftext, testtext)\n ResSum += 1.0 / 8.0 * res\n print('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('\\r\\n')\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n ResSum += 1.0 / 8.0\n except sr.RequestError as e:\n print(\n 'Could not request results from Google Speech Recognition service; {0}'\n .format(e))\n ResSum += 1.0 / 8.0\n filesave.write('Word Error Rate Everage: {0:.04f}'.format(ResSum))\n filesave.write('\\r\\n')\n WAV_FILE = path.join(path.dirname(path.realpath(__file__)), \n filename + 'SD.wav')\n with sr.WavFile(WAV_FILE) as source:\n audio = r.record(source)\n try:\n testtext = r.recognize_google(audio)\n print('Beam-forming result :::::::::::::::::::::::::' + str(\n testtext.encode('utf-8')))\n filesave.write('Beam-forming result :::::::::::::::::::::::::' +\n str(testtext.encode('utf-8')))\n filesave.write('\\r\\n')\n res = wer.wer(reftext, testtext)\n print('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('\\r\\n')\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print(\n 'Could not request results from Google Speech Recognition service; {0}'\n .format(e))\n flgGoogle = True\n time.sleep(0.03)\nLOC.Stop()\nMIC_ARRAY.Stop_Read()\nfilesave.close()\n<mask token>\n",
"step-5": "'''\nwww.autonomous.ai\nPhan Le Son\nplson03@gmail.com\n'''\nimport speech_recognition as sr\nimport pyaudio\nfrom os import listdir\nfrom os import path\nimport time\nimport wave\nimport threading\nimport numpy as np\nimport BF.BeamForming as BF\nimport BF.Parameter as PAR\nimport BF.asr_wer as wer\nimport BF.mic_array_read as READ\nimport BF.DOA as DOA\n\nglobal flgLoad\n\nflgGoogle = False\nflgRefReady = False\nflgPlayOn = False\nflgFinish = False\nCHUNK_OUT = 1024\n\nreftext = None\nfilename = None\nCHANNELS = 2\nCHUNK = 1024 * 4 # PAR.m*PAR.N/CHANNELS # 1024*4\nRATE = 64000 # sample rate\nRECORD_SECONDS = 15\nidxDir = 6\n\nAudio_Data = np.zeros((np.floor(RECORD_SECONDS * RATE / 4), PAR.m))\nAudio_SD = np.zeros(np.floor(RECORD_SECONDS * RATE / 4))\nind = 0\nnumCHUNK = np.floor(RATE * RECORD_SECONDS / CHUNK)\n\nfilesave = open(\"log.txt\",'w')\np = pyaudio.PyAudio()\nr = sr.Recognizer()\nMIC_ARRAY = READ.Mic_Array_Read()\nLOC = DOA.DOA_MicArray()\nBEAM = BF.BeamFormingObj(Weight_Update=False)\n\n\nclass PlayOut(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.wavefiles = [f for f in listdir('./en') if path.isfile(path.join('./en', f))]\n\n def run(self):\n for wav in list(self.wavefiles):\n global flgPlayOn, flgFinish, reftext, filename, flgRefReady, flgGoogle\n\n filename = wav\n print(\"Playing:\" + filename)\n flgPlayOn = True\n flgGoogle = False\n time.sleep(0.5)\n WAV_FILE = path.join(\"./en\", wav)\n wf = wave.open(WAV_FILE, 'rb')\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True)\n # read data\n data = wf.readframes(CHUNK_OUT)\n while len(data) > 0:\n stream.write(data)\n data = wf.readframes(CHUNK_OUT)\n\n wf.close()\n # stop stream\n stream.stop_stream()\n stream.close()\n\n time.sleep(1)\n flgPlayOn = False\n\n with sr.WavFile(WAV_FILE) as source:\n audio = r.record(source) # read the entire WAV file\n\n flgRefReady = False\n # recognize speech using Google Speech Recognition\n try:\n # for testing purposes, we're just using the default API key\n # to use another API key, use `r.recognize_google(audio, key=\"GOOGLE_SPEECH_RECOGNITION_API_KEY\")`\n # instead of `r.recognize_google(audio)`\n reftext = r.recognize_google(audio)\n print(\"correct one:\" + str(reftext.encode('utf-8')))\n filesave.write(\"correct one:\"+ str(reftext.encode('utf-8')))\n filesave.write('\\r\\n')\n\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n\n flgRefReady = True\n\n while (flgGoogle == False):\n time.sleep(0.01)\n\n flgFinish = True\n\n p.terminate()\n\nif False:\n Frames_1024 = MIC_ARRAY.Read()\n while (BEAM.ListenBGNoise(Frames_1024)==0):\n time.sleep(0.0001)\n\nthreadLock = threading.Lock()\nthread_play = PlayOut()\nthread_play.start()\nwhile (flgFinish == False):\n time.sleep(0.01)\n print(\"**** recording *******\")\n ind = 0\n flgLoad = [True]*PAR.CNTBUF\n MIC_ARRAY.ForgetOldData()\n while (flgPlayOn == True):\n Frames_1024 = MIC_ARRAY.Read()\n '''Sound Source Localization'''\n idxDir = LOC.Update(Frames_1024)\n Beam_Audio = BEAM.BFCalc(Frames_1024, 1,Post_Filtering=False)\n\n\n\n # Storage audio output\n Audio_Data[ind:ind + PAR.N, 0:PAR.m] = Frames_1024[:, 0:PAR.m]\n Audio_SD[ind:ind + PAR.N] = Beam_Audio\n ind = ind + PAR.N\n\n print(\"**** done recording **\")\n \n\n raw_data = Audio_SD[:ind].astype(np.int16)\n byte_data = raw_data.tostring()\n\n WAVE_OUTPUT_BF_SD = filename + \"SD.wav\"\n wf = wave.open(WAVE_OUTPUT_BF_SD, 'wb')\n wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))\n wf.writeframes(byte_data)\n wf.close()\n\n for i in range(0, PAR.m):\n raw_data = Audio_Data[:ind, i].astype(np.int16)\n byte_data = raw_data.tostring()\n WAVE_OUTPUT_FILENAME_I = filename + \"channel\" + str(i) + \".wav\"\n Data_Audio = \"Audio_Channel\" + str(i)\n wf = wave.open(WAVE_OUTPUT_FILENAME_I, 'wb')\n wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE')) # (nchannels, sampwidth, framerate, nframes, comptype, compname\n wf.writeframesraw(byte_data)\n wf.close()\n\n while (flgRefReady == False):\n time.sleep(0.01)\n\n if True:\n ResSum=0\n for i in range(0, 8):\n file = filename + \"channel\" + str(i) + \".wav\"\n WAV_FILE = path.join(path.dirname(path.realpath(__file__)), file)\n\n with sr.WavFile(WAV_FILE) as source:\n audio = r.record(source) # read the entire WAV file\n\n # recognize speech using Google Speech Recognition\n try:\n # for testing purposes, we're just using the default API key\n # to use another API key, use `r.recognize_google(audio, key=\"GOOGLE_SPEECH_RECOGNITION_API_KEY\")`\n # instead of `r.recognize_google(audio)`\n testtext = r.recognize_google(audio)\n print(\"Google Speech Recognition for mic \" + str(i) + \"::::::::::\" + str(testtext.encode('utf-8')))\n filesave.write(\" mic \" + str(i) + \"::::::::::\" + str(testtext.encode('utf-8')))\n filesave.write('\\r\\n') \n res = wer.wer(reftext, testtext)\n ResSum+= (1.0/8.0)*res\n print('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('\\r\\n')\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n ResSum+= (1.0/8.0)\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n ResSum+= (1.0/8.0) \n filesave.write('Word Error Rate Everage: {0:.04f}'.format(ResSum))\n filesave.write('\\r\\n')\n WAV_FILE = path.join(path.dirname(path.realpath(__file__)), filename + \"SD.wav\")\n\n with sr.WavFile(WAV_FILE) as source:\n audio = r.record(source) # read the entire WAV file\n\n # recognize speech using Google Speech Recognition\n try:\n # for testing purposes, we're just using the default API key\n # to use another API key, use `r.recognize_google(audio, key=\"GOOGLE_SPEECH_RECOGNITION_API_KEY\")`\n # instead of `r.recognize_google(audio)`\n testtext = r.recognize_google(audio)\n print(\"Beam-forming result :::::::::::::::::::::::::\" + str(testtext.encode('utf-8')))\n filesave.write(\"Beam-forming result :::::::::::::::::::::::::\" + str(testtext.encode('utf-8')))\n filesave.write('\\r\\n')\n\n res = wer.wer(reftext, testtext)\n print('Word Error Rate: {0:.04f}'.format(res))\n \n filesave.write('Word Error Rate: {0:.04f}'.format(res))\n filesave.write('\\r\\n') \n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n\n flgGoogle = True\n time.sleep(0.03)\n\nLOC.Stop()\nMIC_ARRAY.Stop_Read()\nfilesave.close()\n'''\n# recognize speech using Sphinx\ntry:\n print(\"Sphinx thinks you said \" + r.recognize_sphinx(audio))\nexcept sr.UnknownValueError:\n print(\"Sphinx could not understand audio\")\nexcept sr.RequestError as e:\n print(\"Sphinx error; {0}\".format(e))\n'''\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from flask import Flask, request, render_template
from utils import get_result
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route("/result", methods=["POST"])
def result():
form_data = request.form
sentence = form_data['sentence']
output = get_result(sentence)
return render_template('result.html', result=output)
if __name__ == '__main__':
app.run(debug=True)
|
normal
|
{
"blob_id": "264da5a2ab7d5c311d8a59b06c81ea2156cefd76",
"index": 9627,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/result', methods=['POST'])\ndef result():\n form_data = request.form\n sentence = form_data['sentence']\n output = get_result(sentence)\n return render_template('result.html', result=output)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/result', methods=['POST'])\ndef result():\n form_data = request.form\n sentence = form_data['sentence']\n output = get_result(sentence)\n return render_template('result.html', result=output)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/result', methods=['POST'])\ndef result():\n form_data = request.form\n sentence = form_data['sentence']\n output = get_result(sentence)\n return render_template('result.html', result=output)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask, request, render_template\nfrom utils import get_result\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/result', methods=['POST'])\ndef result():\n form_data = request.form\n sentence = form_data['sentence']\n output = get_result(sentence)\n return render_template('result.html', result=output)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, request, render_template\n\nfrom utils import get_result\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route(\"/result\", methods=[\"POST\"])\ndef result():\n form_data = request.form\n sentence = form_data['sentence']\n output = get_result(sentence)\n return render_template('result.html', result=output)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
class car:
def info(self):
print(self.speed, self.color, self.model)
def increment(self):
print('increment')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class car:
def info(self):
print(self.speed, self.color, self.model)
def increment(self):
print('increment')
def decrement(self):
print('decrement')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class car:
def info(self):
print(self.speed, self.color, self.model)
def increment(self):
print('increment')
def decrement(self):
print('decrement')
<|reserved_special_token_0|>
BMW.info()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class car:
def info(self):
print(self.speed, self.color, self.model)
def increment(self):
print('increment')
def decrement(self):
print('decrement')
BMW = car()
BMW.speed = 320
BMW.color = 'red'
BMW.model = 1982
BMW.info()
Camry = car()
Camry.speed = 220
Camry.color = 'blue'
|
flexible
|
{
"blob_id": "022f588455d8624d0b0107180417f65816254cb1",
"index": 8687,
"step-1": "class car:\n\n def info(self):\n print(self.speed, self.color, self.model)\n\n def increment(self):\n print('increment')\n <mask token>\n\n\n<mask token>\n",
"step-2": "class car:\n\n def info(self):\n print(self.speed, self.color, self.model)\n\n def increment(self):\n print('increment')\n\n def decrement(self):\n print('decrement')\n\n\n<mask token>\n",
"step-3": "class car:\n\n def info(self):\n print(self.speed, self.color, self.model)\n\n def increment(self):\n print('increment')\n\n def decrement(self):\n print('decrement')\n\n\n<mask token>\nBMW.info()\n<mask token>\n",
"step-4": "class car:\n\n def info(self):\n print(self.speed, self.color, self.model)\n\n def increment(self):\n print('increment')\n\n def decrement(self):\n print('decrement')\n\n\nBMW = car()\nBMW.speed = 320\nBMW.color = 'red'\nBMW.model = 1982\nBMW.info()\nCamry = car()\nCamry.speed = 220\nCamry.color = 'blue'\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
files = [
"arria2_ddr3.qip"
]
|
normal
|
{
"blob_id": "cad881dd29be16de8375b3ce6e4a437562a05097",
"index": 5426,
"step-1": "<mask token>\n",
"step-2": "files = ['arria2_ddr3.qip']\n",
"step-3": "files = [\n \"arria2_ddr3.qip\"\n ]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from arma_scipy.fit import fit, predict
|
flexible
|
{
"blob_id": "0f6512bb734336a67eab2f13949dd960f5ffc1d5",
"index": 7758,
"step-1": "<mask token>\n",
"step-2": "from arma_scipy.fit import fit, predict\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#-*- coding: utf-8 -*-
from SPARQLWrapper import SPARQLWrapper, SPARQLWrapper2, JSON
import time, random
# testes
NOW=time.time()
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("""
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?label
WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print("%.2f segundos para consultar a dbpedia"%(time.time()-NOW,))
for result in results["results"]["bindings"]:
print(result["label"]["value"]+", "+result["label"]["xml:lang"])
PREFIX="""PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX ops: <http://purl.org/socialparticipation/ops#>
PREFIX opa: <http://purl.org/socialparticipation/opa#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/terms/>
PREFIX tsioc: <http://rdfs.org/sioc/types#>
PREFIX schema: <http://schema.org/>
"""
q2="SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}"
NOW=time.time()
sparql3 = SPARQLWrapper("http://localhost:82/participabr/query")
#sparql3 = SPARQLWrapper("http://200.144.255.210:8082/participabr/query")
sparql3.setQuery(PREFIX+q2)
sparql3.setReturnFormat(JSON)
results3 = sparql3.query().convert()
print("%.2f segundos para puxar todos os nomes dos participantes do Participa.br"%(time.time()-NOW,))
for i in results3["results"]["bindings"][-10:]: print(u"participante: " +i["nome"]["value"])
NOW=time.time()
q="SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}"
sparql3.setQuery(PREFIX+q)
sparql3.setReturnFormat(JSON)
results4 = sparql3.query().convert()
print("%.2f segundos para puxar todos os comentários do Participa.br"%(time.time()-NOW,))
NOW=time.time()
print("dados lidos, processando")
import string, nltk as k
# histograma com as palavras
palavras=string.join([i["texto"]["value"].lower() for i in results4["results"]["bindings"]])
exclude = set(string.punctuation)
palavras = ''.join(ch for ch in palavras if ch not in exclude)
palavras_=palavras.split()
#fdist=k.FreqDist(palavras_)
print("feita primeira freq dist em %.2f"%(time.time()-NOW,))
NOW=time.time()
stopwords = set(k.corpus.stopwords.words('portuguese'))
palavras__=[pp for pp in palavras_ if pp not in stopwords]
fdist_=k.FreqDist(palavras__)
print("feita segunda freq dist (retiradas stopwords) em %.2f"%(time.time()-NOW,))
#NOW=time.time()
#stemmer = k.stem.RSLPStemmer()
#palavras___=[stemmer.stem(pp) for pp in palavras__]
#fdist__=k.FreqDist(palavras___)
#print("feita terceira freq dist (radicalizada) em %.2f"%(time.time()-NOW,))
##################
# bebe comentarios do endpoint sparql.
# guarda 10 e os classifica na mão
# faz histograma de todas as palavras
# escolhe as mais frequentes ou com offset
# ou as menos frequentes
# faz feture vector com elas.
# escolhendo as 200 palavras mais frequentes
palavras_escolhidas=fdist_.keys()[:200]
# outras features que podemos escolher é:
# *) número de palavras terminadas em a, o, e ou s
# *) tamanho médio das palavras utilizadas
# *) uso das stopwords
# é necessário um conjunto maior de classificações na mão
# para julgar qual parte do histograma
# é melhor de ser considerada.
#########
def document_features(documento):
features={}
for palavra in palavras_escolhidas:
features["contains(%s)"%(palavra,)]=(palavra in documento)
return features
# fazendo com classes dummy
msgs= [(rr["texto"]["value"],"pos") for rr in results4["results"]["bindings"][:1000]]
msgs2=[(rr["texto"]["value"],"neg") for rr in results4["results"]["bindings"][1000:2000]]
msgs_=msgs+msgs2
random.shuffle(msgs_)
feature_sets=[(document_features(msg[0]),msg[1]) for msg in msgs_]
train_set, test_set = feature_sets[1000:], feature_sets[:1000]
classifier = k.NaiveBayesClassifier.train(train_set)
########
# As mais frequentes podem ser úteis já que os comentários
# são pequenos e queremos que o vetor de atributos tenha informação
# As menos frequentes são as palavras mais incomuns, informativas
# para detecção de nichos do autor
# As de incidência intermediária são consideradas as mais representativas
# do assunto
|
normal
|
{
"blob_id": "c5b50420788ddde7483a46c66aca3922ddb47952",
"index": 6199,
"step-1": "<mask token>\n\n\ndef document_features(documento):\n features = {}\n for palavra in palavras_escolhidas:\n features['contains(%s)' % (palavra,)] = palavra in documento\n return features\n\n\n<mask token>\n",
"step-2": "<mask token>\nsparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n SELECT ?label\n WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }\n\"\"\"\n )\nsparql.setReturnFormat(JSON)\n<mask token>\nprint('%.2f segundos para consultar a dbpedia' % (time.time() - NOW,))\nfor result in results['results']['bindings']:\n print(result['label']['value'] + ', ' + result['label']['xml:lang'])\n<mask token>\nsparql3.setQuery(PREFIX + q2)\nsparql3.setReturnFormat(JSON)\n<mask token>\nprint(\n '%.2f segundos para puxar todos os nomes dos participantes do Participa.br'\n % (time.time() - NOW,))\nfor i in results3['results']['bindings'][-10:]:\n print(u'participante: ' + i['nome']['value'])\n<mask token>\nsparql3.setQuery(PREFIX + q)\nsparql3.setReturnFormat(JSON)\n<mask token>\nprint('%.2f segundos para puxar todos os comentários do Participa.br' % (\n time.time() - NOW,))\n<mask token>\nprint('dados lidos, processando')\n<mask token>\nprint('feita primeira freq dist em %.2f' % (time.time() - NOW,))\n<mask token>\nprint('feita segunda freq dist (retiradas stopwords) em %.2f' % (time.time(\n ) - NOW,))\n<mask token>\n\n\ndef document_features(documento):\n features = {}\n for palavra in palavras_escolhidas:\n features['contains(%s)' % (palavra,)] = palavra in documento\n return features\n\n\n<mask token>\nrandom.shuffle(msgs_)\n<mask token>\n",
"step-3": "<mask token>\nNOW = time.time()\nsparql = SPARQLWrapper('http://dbpedia.org/sparql')\nsparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n SELECT ?label\n WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }\n\"\"\"\n )\nsparql.setReturnFormat(JSON)\nresults = sparql.query().convert()\nprint('%.2f segundos para consultar a dbpedia' % (time.time() - NOW,))\nfor result in results['results']['bindings']:\n print(result['label']['value'] + ', ' + result['label']['xml:lang'])\nPREFIX = \"\"\"PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX ops: <http://purl.org/socialparticipation/ops#>\nPREFIX opa: <http://purl.org/socialparticipation/opa#>\nPREFIX foaf: <http://xmlns.com/foaf/0.1/>\nPREFIX dc: <http://purl.org/dc/terms/>\nPREFIX tsioc: <http://rdfs.org/sioc/types#>\nPREFIX schema: <http://schema.org/>\n\"\"\"\nq2 = 'SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}'\nNOW = time.time()\nsparql3 = SPARQLWrapper('http://localhost:82/participabr/query')\nsparql3.setQuery(PREFIX + q2)\nsparql3.setReturnFormat(JSON)\nresults3 = sparql3.query().convert()\nprint(\n '%.2f segundos para puxar todos os nomes dos participantes do Participa.br'\n % (time.time() - NOW,))\nfor i in results3['results']['bindings'][-10:]:\n print(u'participante: ' + i['nome']['value'])\nNOW = time.time()\nq = (\n 'SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}'\n )\nsparql3.setQuery(PREFIX + q)\nsparql3.setReturnFormat(JSON)\nresults4 = sparql3.query().convert()\nprint('%.2f segundos para puxar todos os comentários do Participa.br' % (\n time.time() - NOW,))\nNOW = time.time()\nprint('dados lidos, processando')\n<mask token>\npalavras = string.join([i['texto']['value'].lower() for i in results4[\n 'results']['bindings']])\nexclude = set(string.punctuation)\npalavras = ''.join(ch for ch in palavras if ch not in exclude)\npalavras_ = palavras.split()\nprint('feita primeira freq dist em %.2f' % (time.time() - NOW,))\nNOW = time.time()\nstopwords = set(k.corpus.stopwords.words('portuguese'))\npalavras__ = [pp for pp in palavras_ if pp not in stopwords]\nfdist_ = k.FreqDist(palavras__)\nprint('feita segunda freq dist (retiradas stopwords) em %.2f' % (time.time(\n ) - NOW,))\npalavras_escolhidas = fdist_.keys()[:200]\n\n\ndef document_features(documento):\n features = {}\n for palavra in palavras_escolhidas:\n features['contains(%s)' % (palavra,)] = palavra in documento\n return features\n\n\nmsgs = [(rr['texto']['value'], 'pos') for rr in results4['results'][\n 'bindings'][:1000]]\nmsgs2 = [(rr['texto']['value'], 'neg') for rr in results4['results'][\n 'bindings'][1000:2000]]\nmsgs_ = msgs + msgs2\nrandom.shuffle(msgs_)\nfeature_sets = [(document_features(msg[0]), msg[1]) for msg in msgs_]\ntrain_set, test_set = feature_sets[1000:], feature_sets[:1000]\nclassifier = k.NaiveBayesClassifier.train(train_set)\n",
"step-4": "from SPARQLWrapper import SPARQLWrapper, SPARQLWrapper2, JSON\nimport time, random\nNOW = time.time()\nsparql = SPARQLWrapper('http://dbpedia.org/sparql')\nsparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n SELECT ?label\n WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }\n\"\"\"\n )\nsparql.setReturnFormat(JSON)\nresults = sparql.query().convert()\nprint('%.2f segundos para consultar a dbpedia' % (time.time() - NOW,))\nfor result in results['results']['bindings']:\n print(result['label']['value'] + ', ' + result['label']['xml:lang'])\nPREFIX = \"\"\"PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX ops: <http://purl.org/socialparticipation/ops#>\nPREFIX opa: <http://purl.org/socialparticipation/opa#>\nPREFIX foaf: <http://xmlns.com/foaf/0.1/>\nPREFIX dc: <http://purl.org/dc/terms/>\nPREFIX tsioc: <http://rdfs.org/sioc/types#>\nPREFIX schema: <http://schema.org/>\n\"\"\"\nq2 = 'SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}'\nNOW = time.time()\nsparql3 = SPARQLWrapper('http://localhost:82/participabr/query')\nsparql3.setQuery(PREFIX + q2)\nsparql3.setReturnFormat(JSON)\nresults3 = sparql3.query().convert()\nprint(\n '%.2f segundos para puxar todos os nomes dos participantes do Participa.br'\n % (time.time() - NOW,))\nfor i in results3['results']['bindings'][-10:]:\n print(u'participante: ' + i['nome']['value'])\nNOW = time.time()\nq = (\n 'SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}'\n )\nsparql3.setQuery(PREFIX + q)\nsparql3.setReturnFormat(JSON)\nresults4 = sparql3.query().convert()\nprint('%.2f segundos para puxar todos os comentários do Participa.br' % (\n time.time() - NOW,))\nNOW = time.time()\nprint('dados lidos, processando')\nimport string, nltk as k\npalavras = string.join([i['texto']['value'].lower() for i in results4[\n 'results']['bindings']])\nexclude = set(string.punctuation)\npalavras = ''.join(ch for ch in palavras if ch not in exclude)\npalavras_ = palavras.split()\nprint('feita primeira freq dist em %.2f' % (time.time() - NOW,))\nNOW = time.time()\nstopwords = set(k.corpus.stopwords.words('portuguese'))\npalavras__ = [pp for pp in palavras_ if pp not in stopwords]\nfdist_ = k.FreqDist(palavras__)\nprint('feita segunda freq dist (retiradas stopwords) em %.2f' % (time.time(\n ) - NOW,))\npalavras_escolhidas = fdist_.keys()[:200]\n\n\ndef document_features(documento):\n features = {}\n for palavra in palavras_escolhidas:\n features['contains(%s)' % (palavra,)] = palavra in documento\n return features\n\n\nmsgs = [(rr['texto']['value'], 'pos') for rr in results4['results'][\n 'bindings'][:1000]]\nmsgs2 = [(rr['texto']['value'], 'neg') for rr in results4['results'][\n 'bindings'][1000:2000]]\nmsgs_ = msgs + msgs2\nrandom.shuffle(msgs_)\nfeature_sets = [(document_features(msg[0]), msg[1]) for msg in msgs_]\ntrain_set, test_set = feature_sets[1000:], feature_sets[:1000]\nclassifier = k.NaiveBayesClassifier.train(train_set)\n",
"step-5": "#-*- coding: utf-8 -*-\nfrom SPARQLWrapper import SPARQLWrapper, SPARQLWrapper2, JSON\nimport time, random\n\n# testes\nNOW=time.time()\nsparql = SPARQLWrapper(\"http://dbpedia.org/sparql\")\nsparql.setQuery(\"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n SELECT ?label\n WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }\n\"\"\")\nsparql.setReturnFormat(JSON)\nresults = sparql.query().convert()\nprint(\"%.2f segundos para consultar a dbpedia\"%(time.time()-NOW,))\n\nfor result in results[\"results\"][\"bindings\"]:\n print(result[\"label\"][\"value\"]+\", \"+result[\"label\"][\"xml:lang\"])\n\nPREFIX=\"\"\"PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX ops: <http://purl.org/socialparticipation/ops#>\nPREFIX opa: <http://purl.org/socialparticipation/opa#>\nPREFIX foaf: <http://xmlns.com/foaf/0.1/>\nPREFIX dc: <http://purl.org/dc/terms/>\nPREFIX tsioc: <http://rdfs.org/sioc/types#>\nPREFIX schema: <http://schema.org/>\n\"\"\"\n\nq2=\"SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}\"\nNOW=time.time()\nsparql3 = SPARQLWrapper(\"http://localhost:82/participabr/query\")\n#sparql3 = SPARQLWrapper(\"http://200.144.255.210:8082/participabr/query\")\nsparql3.setQuery(PREFIX+q2)\nsparql3.setReturnFormat(JSON)\nresults3 = sparql3.query().convert()\nprint(\"%.2f segundos para puxar todos os nomes dos participantes do Participa.br\"%(time.time()-NOW,))\n\nfor i in results3[\"results\"][\"bindings\"][-10:]: print(u\"participante: \" +i[\"nome\"][\"value\"])\n\nNOW=time.time()\nq=\"SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}\"\nsparql3.setQuery(PREFIX+q)\nsparql3.setReturnFormat(JSON)\nresults4 = sparql3.query().convert()\nprint(\"%.2f segundos para puxar todos os comentários do Participa.br\"%(time.time()-NOW,))\n\nNOW=time.time()\nprint(\"dados lidos, processando\")\nimport string, nltk as k\n# histograma com as palavras\npalavras=string.join([i[\"texto\"][\"value\"].lower() for i in results4[\"results\"][\"bindings\"]])\nexclude = set(string.punctuation)\npalavras = ''.join(ch for ch in palavras if ch not in exclude)\npalavras_=palavras.split()\n#fdist=k.FreqDist(palavras_)\nprint(\"feita primeira freq dist em %.2f\"%(time.time()-NOW,))\n\nNOW=time.time()\nstopwords = set(k.corpus.stopwords.words('portuguese'))\npalavras__=[pp for pp in palavras_ if pp not in stopwords]\nfdist_=k.FreqDist(palavras__)\nprint(\"feita segunda freq dist (retiradas stopwords) em %.2f\"%(time.time()-NOW,))\n\n#NOW=time.time()\n#stemmer = k.stem.RSLPStemmer()\n#palavras___=[stemmer.stem(pp) for pp in palavras__]\n#fdist__=k.FreqDist(palavras___)\n#print(\"feita terceira freq dist (radicalizada) em %.2f\"%(time.time()-NOW,))\n\n##################\n# bebe comentarios do endpoint sparql.\n# guarda 10 e os classifica na mão\n\n# faz histograma de todas as palavras\n# escolhe as mais frequentes ou com offset\n# ou as menos frequentes\n# faz feture vector com elas.\n# escolhendo as 200 palavras mais frequentes\npalavras_escolhidas=fdist_.keys()[:200]\n# outras features que podemos escolher é:\n# *) número de palavras terminadas em a, o, e ou s\n# *) tamanho médio das palavras utilizadas\n# *) uso das stopwords\n\n# é necessário um conjunto maior de classificações na mão\n# para julgar qual parte do histograma\n# é melhor de ser considerada.\n\n#########\ndef document_features(documento):\n features={}\n for palavra in palavras_escolhidas:\n features[\"contains(%s)\"%(palavra,)]=(palavra in documento)\n return features\n# fazendo com classes dummy\nmsgs= [(rr[\"texto\"][\"value\"],\"pos\") for rr in results4[\"results\"][\"bindings\"][:1000]]\nmsgs2=[(rr[\"texto\"][\"value\"],\"neg\") for rr in results4[\"results\"][\"bindings\"][1000:2000]]\nmsgs_=msgs+msgs2\nrandom.shuffle(msgs_)\nfeature_sets=[(document_features(msg[0]),msg[1]) for msg in msgs_]\ntrain_set, test_set = feature_sets[1000:], feature_sets[:1000]\nclassifier = k.NaiveBayesClassifier.train(train_set)\n\n########\n# As mais frequentes podem ser úteis já que os comentários\n# são pequenos e queremos que o vetor de atributos tenha informação\n\n# As menos frequentes são as palavras mais incomuns, informativas\n# para detecção de nichos do autor\n\n# As de incidência intermediária são consideradas as mais representativas\n# do assunto\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def hmac_sha1_token():
timestamp = str(time.time())
hmac_pass = hmac.new(b'some very secret string', timestamp.encode(
'utf-8'), hashlib.sha1).hexdigest()
token = '%s:%s' % (timestamp, hmac_pass)
return token
<|reserved_special_token_1|>
import hashlib
import hmac
import time
def hmac_sha1_token():
timestamp = str(time.time())
hmac_pass = hmac.new(b'some very secret string', timestamp.encode(
'utf-8'), hashlib.sha1).hexdigest()
token = '%s:%s' % (timestamp, hmac_pass)
return token
|
flexible
|
{
"blob_id": "65ef3b2ed5eef3d9d9e682ca18cf84457e929df2",
"index": 2222,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef hmac_sha1_token():\n timestamp = str(time.time())\n hmac_pass = hmac.new(b'some very secret string', timestamp.encode(\n 'utf-8'), hashlib.sha1).hexdigest()\n token = '%s:%s' % (timestamp, hmac_pass)\n return token\n",
"step-3": "import hashlib\nimport hmac\nimport time\n\n\ndef hmac_sha1_token():\n timestamp = str(time.time())\n hmac_pass = hmac.new(b'some very secret string', timestamp.encode(\n 'utf-8'), hashlib.sha1).hexdigest()\n token = '%s:%s' % (timestamp, hmac_pass)\n return token\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def test_create_table_with_default_options(pawprint_default_tracker_db):
"""Ensure the table is correctly created with the default schema."""
tracker = pawprint_default_tracker_db
assert tracker.create_table() is None
with pytest.raises(ProgrammingError):
tracker.create_table()
assert pd.io.sql.execute('SELECT COUNT(*) FROM {}'.format(tracker.table
), tracker.db).fetchall() == [(0,)]
schema = pd.io.sql.execute(
"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'"
.format(tracker.table), tracker.db).fetchall()
expected_schema = [(u'id', u'integer', None), (u'timestamp',
u'timestamp without time zone', None), (u'user_id', u'text', None),
(u'event', u'text', None), (u'metadata', u'jsonb', None)]
assert schema == expected_schema
def test_drop_table(pawprint_default_tracker_db_with_table):
"""Ensure that tables are deleted successfully."""
tracker = pawprint_default_tracker_db_with_table
with pytest.raises(ProgrammingError):
tracker.create_table()
tracker.drop_table()
with pytest.raises(ProgrammingError):
tracker.drop_table()
<|reserved_special_token_0|>
def test_create_table_with_other_options(drop_tracker_test_table, db_string,
tracker_test_table_name):
"""Ensure the table is correctly created with an alternative schema."""
schema = OrderedDict([('pk', 'SERIAL PRIMARY KEY'), ('infofield', 'TEXT')])
tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name,
schema=schema)
tracker.create_table()
schema = pd.io.sql.execute(
"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'"
.format(tracker.table), tracker.db).fetchall()
assert schema == [('pk', 'integer', None), ('infofield', 'text', None)]
<|reserved_special_token_0|>
def test_counts(pawprint_default_tracker_db_with_table):
"""Test counting a specific event, with date ranges and time resolutions."""
tracker = pawprint_default_tracker_db_with_table
query = (
"""
INSERT INTO {} (timestamp, user_id, event) VALUES
('2016-01-01 12:30', 'alice', 'logged_in'),
('2016-01-01 12:40', 'bob', 'logged_in'),
('2016-01-01 16:00', 'charlotte', 'logged_in'),
('2016-01-02 00:00', 'dan', 'logged_in'),
('2016-01-02 00:00', 'elizabeth', 'logged_in'),
('2016-01-05 00:00', 'frank', 'logged_in'),
('2016-01-10 00:00', 'gabrielle', 'logged_in'),
('2016-01-20 00:00', 'hans', 'logged_in'),
('2016-02-01 00:00', 'iris', 'logged_in'),
('2016-02-01 00:00', 'james', 'logged_in'),
('2016-03-01 00:00', 'kelly', 'logged_in'),
('2016-03-01 00:00', 'laura', 'logged_in'),
('2016-03-01 00:00', 'mike', 'not_logged_in')
"""
.format(tracker.table))
pd.io.sql.execute(query, tracker.db)
logins_hourly = tracker.count(event='logged_in', resolution='hour')
logins_daily = tracker.count(event='logged_in')
logins_weekly = tracker.count(event='logged_in', resolution='week')
logins_monthly = tracker.count(event='logged_in', resolution='month')
logins_weekly_left_range = tracker.count(event='logged_in', resolution=
'week', start=datetime(2016, 2, 1))
logins_weekly_right_range = tracker.count(event='logged_in', resolution
='week', end=datetime(2016, 2, 1))
logins_daily_full_range = tracker.count(event='logged_in', start=
datetime(2016, 1, 15), end=datetime(2016, 2, 15))
assert len(logins_hourly) == 8
assert np.all(logins_hourly['count'].values == [2, 1, 2, 1, 1, 1, 2, 2])
assert len(logins_daily) == 7
assert np.all(logins_daily['count'].values == [3, 2, 1, 1, 1, 2, 2])
assert len(logins_weekly) == 5
assert np.all(logins_weekly['count'].values == [5, 2, 1, 2, 2])
assert len(logins_monthly) == 3
assert len(logins_weekly_left_range) == 2
assert len(logins_weekly_right_range) == 4
assert len(logins_daily_full_range) == 2
def test_sum_and_average(pawprint_default_tracker_db_with_table):
"""Test aggregating a specific event, with date ranges and time resolutions."""
tracker = pawprint_default_tracker_db_with_table
metadata = str('{"val": 1}').replace("'", '"')
query = (
"""
INSERT INTO {table} (timestamp, user_id, event, metadata) VALUES
('2016-01-01 12:30', 'alice', 'logged_in', '{metadata}'),
('2016-01-01 12:40', 'bob', 'logged_in', '{metadata}'),
('2016-01-01 16:00', 'charlotte', 'logged_in', '{metadata}'),
('2016-01-02 00:00', 'dan', 'logged_in', '{metadata}'),
('2016-01-02 00:00', 'elizabeth', 'logged_in', '{metadata}'),
('2016-01-05 00:00', 'frank', 'logged_in', '{metadata}'),
('2016-01-10 00:00', 'gabrielle', 'logged_in', '{metadata}'),
('2016-01-20 00:00', 'hans', 'logged_in', '{metadata}'),
('2016-02-01 00:00', 'iris', 'logged_in', '{metadata}'),
('2016-02-01 00:00', 'james', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'kelly', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'laura', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'mike', 'not_logged_in', '{metadata}')
"""
.format(table=tracker.table, metadata=metadata))
pd.io.sql.execute(query, tracker.db)
x_sum_daily_all = tracker.sum('metadata__val')
x_sum_daily = tracker.sum('metadata__val', event='logged_in')
x_avg_daily_all = tracker.average('metadata__val', event='logged_in')
x_avg_daily = tracker.average('metadata__val', event='logged_in')
assert len(x_sum_daily) == 7
assert np.all(x_sum_daily_all['sum'].values == [3, 2, 1, 1, 1, 2, 3])
assert np.all(x_sum_daily['sum'].values == [3, 2, 1, 1, 1, 2, 2])
assert np.all(x_avg_daily_all['avg'].values == [1, 1, 1, 1, 1, 1, 1])
assert np.all(x_avg_daily['avg'] == x_avg_daily_all['avg'])
<|reserved_special_token_0|>
def test_silent_write_errors():
"""When a failure occurs in event write, it should fail silently."""
tracker = pawprint.Tracker(db=None, table=None)
try:
tracker.write(event='This will fail silently.')
except Exception:
pytest.fail('Failed to fail silently.')
<|reserved_special_token_0|>
def test_malicious_strings(pawprint_default_tracker_db_with_table):
"""Test that SQL injection strings are sanitized"""
tracker = pawprint_default_tracker_db_with_table
tracker.write(event='armageddon', metadata={'shady business': {'with':
'the following string', 'of sql':
"50');INSERT INTO {table} (event, user_id) VALUES ('you got pwnd', '50"
.format(table=tracker.table)}})
assert len(tracker.read()) == 1
tracker.write(event='armageddon', metadata={'more shady business': {
'my shady sql': "' OR '1'='1;DROP TABLE {table};".format(table=
tracker.table)}})
assert len(tracker.read()) == 2
tracker.write(event="' OR '1'='1;", metadata={'foo':
"x'); DROP TABLE {table}; --".format(table=tracker.table)})
assert len(tracker.read()) == 3
def test_escaping_from_quotes(pawprint_default_tracker_db_with_table):
tracker = pawprint_default_tracker_db_with_table
tracker.write(event='known crummy string', metadata={'foo': {'toState':
"#/app/dealnotes/2345/FORPETE'S_SAKE,_LLC_Tenant_Rep_Lease_2",
'fromState': '#/app/dealdetails/2345', 'platform': 'iOS App'}})
assert len(tracker.read()) == 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_create_table_with_default_options(pawprint_default_tracker_db):
"""Ensure the table is correctly created with the default schema."""
tracker = pawprint_default_tracker_db
assert tracker.create_table() is None
with pytest.raises(ProgrammingError):
tracker.create_table()
assert pd.io.sql.execute('SELECT COUNT(*) FROM {}'.format(tracker.table
), tracker.db).fetchall() == [(0,)]
schema = pd.io.sql.execute(
"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'"
.format(tracker.table), tracker.db).fetchall()
expected_schema = [(u'id', u'integer', None), (u'timestamp',
u'timestamp without time zone', None), (u'user_id', u'text', None),
(u'event', u'text', None), (u'metadata', u'jsonb', None)]
assert schema == expected_schema
def test_drop_table(pawprint_default_tracker_db_with_table):
"""Ensure that tables are deleted successfully."""
tracker = pawprint_default_tracker_db_with_table
with pytest.raises(ProgrammingError):
tracker.create_table()
tracker.drop_table()
with pytest.raises(ProgrammingError):
tracker.drop_table()
<|reserved_special_token_0|>
def test_create_table_with_other_options(drop_tracker_test_table, db_string,
tracker_test_table_name):
"""Ensure the table is correctly created with an alternative schema."""
schema = OrderedDict([('pk', 'SERIAL PRIMARY KEY'), ('infofield', 'TEXT')])
tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name,
schema=schema)
tracker.create_table()
schema = pd.io.sql.execute(
"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'"
.format(tracker.table), tracker.db).fetchall()
assert schema == [('pk', 'integer', None), ('infofield', 'text', None)]
<|reserved_special_token_0|>
def test_counts(pawprint_default_tracker_db_with_table):
"""Test counting a specific event, with date ranges and time resolutions."""
tracker = pawprint_default_tracker_db_with_table
query = (
"""
INSERT INTO {} (timestamp, user_id, event) VALUES
('2016-01-01 12:30', 'alice', 'logged_in'),
('2016-01-01 12:40', 'bob', 'logged_in'),
('2016-01-01 16:00', 'charlotte', 'logged_in'),
('2016-01-02 00:00', 'dan', 'logged_in'),
('2016-01-02 00:00', 'elizabeth', 'logged_in'),
('2016-01-05 00:00', 'frank', 'logged_in'),
('2016-01-10 00:00', 'gabrielle', 'logged_in'),
('2016-01-20 00:00', 'hans', 'logged_in'),
('2016-02-01 00:00', 'iris', 'logged_in'),
('2016-02-01 00:00', 'james', 'logged_in'),
('2016-03-01 00:00', 'kelly', 'logged_in'),
('2016-03-01 00:00', 'laura', 'logged_in'),
('2016-03-01 00:00', 'mike', 'not_logged_in')
"""
.format(tracker.table))
pd.io.sql.execute(query, tracker.db)
logins_hourly = tracker.count(event='logged_in', resolution='hour')
logins_daily = tracker.count(event='logged_in')
logins_weekly = tracker.count(event='logged_in', resolution='week')
logins_monthly = tracker.count(event='logged_in', resolution='month')
logins_weekly_left_range = tracker.count(event='logged_in', resolution=
'week', start=datetime(2016, 2, 1))
logins_weekly_right_range = tracker.count(event='logged_in', resolution
='week', end=datetime(2016, 2, 1))
logins_daily_full_range = tracker.count(event='logged_in', start=
datetime(2016, 1, 15), end=datetime(2016, 2, 15))
assert len(logins_hourly) == 8
assert np.all(logins_hourly['count'].values == [2, 1, 2, 1, 1, 1, 2, 2])
assert len(logins_daily) == 7
assert np.all(logins_daily['count'].values == [3, 2, 1, 1, 1, 2, 2])
assert len(logins_weekly) == 5
assert np.all(logins_weekly['count'].values == [5, 2, 1, 2, 2])
assert len(logins_monthly) == 3
assert len(logins_weekly_left_range) == 2
assert len(logins_weekly_right_range) == 4
assert len(logins_daily_full_range) == 2
def test_sum_and_average(pawprint_default_tracker_db_with_table):
"""Test aggregating a specific event, with date ranges and time resolutions."""
tracker = pawprint_default_tracker_db_with_table
metadata = str('{"val": 1}').replace("'", '"')
query = (
"""
INSERT INTO {table} (timestamp, user_id, event, metadata) VALUES
('2016-01-01 12:30', 'alice', 'logged_in', '{metadata}'),
('2016-01-01 12:40', 'bob', 'logged_in', '{metadata}'),
('2016-01-01 16:00', 'charlotte', 'logged_in', '{metadata}'),
('2016-01-02 00:00', 'dan', 'logged_in', '{metadata}'),
('2016-01-02 00:00', 'elizabeth', 'logged_in', '{metadata}'),
('2016-01-05 00:00', 'frank', 'logged_in', '{metadata}'),
('2016-01-10 00:00', 'gabrielle', 'logged_in', '{metadata}'),
('2016-01-20 00:00', 'hans', 'logged_in', '{metadata}'),
('2016-02-01 00:00', 'iris', 'logged_in', '{metadata}'),
('2016-02-01 00:00', 'james', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'kelly', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'laura', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'mike', 'not_logged_in', '{metadata}')
"""
.format(table=tracker.table, metadata=metadata))
pd.io.sql.execute(query, tracker.db)
x_sum_daily_all = tracker.sum('metadata__val')
x_sum_daily = tracker.sum('metadata__val', event='logged_in')
x_avg_daily_all = tracker.average('metadata__val', event='logged_in')
x_avg_daily = tracker.average('metadata__val', event='logged_in')
assert len(x_sum_daily) == 7
assert np.all(x_sum_daily_all['sum'].values == [3, 2, 1, 1, 1, 2, 3])
assert np.all(x_sum_daily['sum'].values == [3, 2, 1, 1, 1, 2, 2])
assert np.all(x_avg_daily_all['avg'].values == [1, 1, 1, 1, 1, 1, 1])
assert np.all(x_avg_daily['avg'] == x_avg_daily_all['avg'])
def test_parse_fields(pawprint_default_tracker_db):
"""Test args passed to read() and _aggregate() are parsed correctly."""
tracker = pawprint_default_tracker_db
args = ()
assert tracker._parse_fields(*args) == '*'
args = 'event',
assert tracker._parse_fields(*args) == 'event'
args = 'user_id', 'timestamp'
assert tracker._parse_fields(*args) == 'user_id, timestamp'
args = 'metadata__a__b',
assert tracker._parse_fields(*args) == "metadata #> '{a, b}' AS json_field"
def test_parse_values(pawprint_default_tracker_db):
"""Test parsing values for write()."""
tracker = pawprint_default_tracker_db
args = 'logged_in',
assert tracker._parse_values(*args) == "'logged_in'"
args = 'logged_in', 'hannah'
assert tracker._parse_values(*args) == "'logged_in', 'hannah'"
def test_parse_conditionals(pawprint_default_tracker_db):
"""Test kwargs passed to read() and _aggregate() are parsed correctly."""
tracker = pawprint_default_tracker_db
kwargs = {}
assert tracker._parse_conditionals(**kwargs) == ''
kwargs = {'user_id': 'Quentin'}
assert tracker._parse_conditionals(**kwargs) == "WHERE user_id = 'Quentin'"
kwargs = {'event': 'logged_in', 'user_id': 'Quentin'}
assert tracker._parse_conditionals(**kwargs) in (
"WHERE event = 'logged_in' AND user_id = 'Quentin'",
"WHERE user_id = 'Quentin' AND event = 'logged_in'")
kwargs = {'event__in': ['logged_in', 'logged_out']}
assert tracker._parse_conditionals(**kwargs
) == "WHERE event IN ('logged_in', 'logged_out')"
def test_accessing_json_fields(pawprint_default_tracker_db_with_table):
"""Test some structured data pulling."""
tracker = pawprint_default_tracker_db_with_table
simple = {'integral': 'derivative'}
medium = {'montecarlo': {'prior': 'likelihood'}}
difficult = {'deepnet': ['mlp', 'cnn', 'rnn'], 'ensembles': {'random':
'forest', 'always': {'cross_validate': ['kfold', 'stratified']}}}
tracker.write(event='maths', metadata=simple)
tracker.write(event='stats', metadata=medium)
tracker.write(event='ml', metadata=difficult)
maths_all = tracker.read('metadata__integral')
maths_condition = tracker.read('metadata__integral', event='maths')
assert len(maths_all) == 3
assert len(maths_condition) == 1
assert list(maths_all.json_field) == ['derivative', None, None]
stats = tracker.read('metadata__montecarlo__prior').dropna()
assert len(stats) == 1
assert stats.json_field.iloc[0] == 'likelihood'
types_of_nn = tracker.read('metadata__deepnet').dropna()
best_nn = tracker.read('metadata__deepnet__1').dropna()
full_depth = tracker.read('metadata__ensembles__always__cross_validate__0'
).dropna()
assert len(types_of_nn) == 1
assert len(best_nn) == 1
assert best_nn.json_field.iloc[0] == 'cnn'
assert len(full_depth) == 1
assert full_depth.json_field.iloc[0] == 'kfold'
def test_json_maths(pawprint_default_tracker_db_with_table):
"""More advanced operations on JSON subfields."""
tracker = pawprint_default_tracker_db_with_table
tracker.write(event='whisky', metadata={'uigeadail': {'value': 123,
'lagavulin': [4, 2]}})
tracker.write(event='whisky', metadata={'uigeadail': {'value': 456,
'lagavulin': [5, 0]}})
tracker.write(event='whisky', metadata={'uigeadail': {'value': 758,
'lagavulin': [7, 10]}})
tracker.write(event='armagnac', metadata={'age': 'XO'})
tracker.write(event='armagnac', metadata={'age': 15})
assert len(tracker.read()) == 5
assert len(tracker.read(metadata__uigeadail__contains='lagavulin')) == 3
assert len(tracker.read(metadata__uigeadail__value__gt=123)) == 2
assert len(tracker.read(metadata__uigeadail__value__gte=123)) == 3
whiskies = tracker.sum('metadata__uigeadail__value')
assert len(whiskies) == 1
assert whiskies.iloc[0]['sum'] == 1337
assert len(tracker.read(metadata__contains='age')) == 2
assert len(tracker.read(metadata__age='XO')) == 1
def test_silent_write_errors():
"""When a failure occurs in event write, it should fail silently."""
tracker = pawprint.Tracker(db=None, table=None)
try:
tracker.write(event='This will fail silently.')
except Exception:
pytest.fail('Failed to fail silently.')
def test_nonsilent_write_errors(error_logger):
"""Test non-silent write errors that should output to the logger or raise exceptions."""
tracker = pawprint.Tracker(db='postgresql:///fail', logger=error_logger)
with pytest.raises(Exception):
tracker.write()
with pytest.raises(Exception):
tracker.write(event='going_to_fail')
with open('pawprint.log', mode='r') as f:
logs = f.readlines()
print(logs[3])
assert len(logs) == 6
assert logs[0].startswith('pawprint: pawprint failed to write.')
assert 'Table: None. Query: INSERT INTO None () VALUES ();' in logs[0]
assert "Query: INSERT INTO None (event) VALUES ('going_to_fail')" in logs[3
]
os.remove('pawprint.log')
<|reserved_special_token_0|>
def test_repr_and_str(pawprint_default_tracker_db):
"""Test the __repr__ and __str__."""
tracker = pawprint_default_tracker_db
expected_repr = "pawprint.Tracker on table '{}' and database '{}'".format(
tracker.table, tracker.db)
expected_str = 'pawprint Tracker object.\ndb : {}\ntable : {}'.format(
tracker.db, tracker.table)
assert tracker.__repr__() == expected_repr
assert tracker.__str__() == expected_str
def test_malicious_strings(pawprint_default_tracker_db_with_table):
"""Test that SQL injection strings are sanitized"""
tracker = pawprint_default_tracker_db_with_table
tracker.write(event='armageddon', metadata={'shady business': {'with':
'the following string', 'of sql':
"50');INSERT INTO {table} (event, user_id) VALUES ('you got pwnd', '50"
.format(table=tracker.table)}})
assert len(tracker.read()) == 1
tracker.write(event='armageddon', metadata={'more shady business': {
'my shady sql': "' OR '1'='1;DROP TABLE {table};".format(table=
tracker.table)}})
assert len(tracker.read()) == 2
tracker.write(event="' OR '1'='1;", metadata={'foo':
"x'); DROP TABLE {table}; --".format(table=tracker.table)})
assert len(tracker.read()) == 3
def test_escaping_from_quotes(pawprint_default_tracker_db_with_table):
tracker = pawprint_default_tracker_db_with_table
tracker.write(event='known crummy string', metadata={'foo': {'toState':
"#/app/dealnotes/2345/FORPETE'S_SAKE,_LLC_Tenant_Rep_Lease_2",
'fromState': '#/app/dealdetails/2345', 'platform': 'iOS App'}})
assert len(tracker.read()) == 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_create_table_with_default_options(pawprint_default_tracker_db):
"""Ensure the table is correctly created with the default schema."""
tracker = pawprint_default_tracker_db
assert tracker.create_table() is None
with pytest.raises(ProgrammingError):
tracker.create_table()
assert pd.io.sql.execute('SELECT COUNT(*) FROM {}'.format(tracker.table
), tracker.db).fetchall() == [(0,)]
schema = pd.io.sql.execute(
"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'"
.format(tracker.table), tracker.db).fetchall()
expected_schema = [(u'id', u'integer', None), (u'timestamp',
u'timestamp without time zone', None), (u'user_id', u'text', None),
(u'event', u'text', None), (u'metadata', u'jsonb', None)]
assert schema == expected_schema
def test_drop_table(pawprint_default_tracker_db_with_table):
"""Ensure that tables are deleted successfully."""
tracker = pawprint_default_tracker_db_with_table
with pytest.raises(ProgrammingError):
tracker.create_table()
tracker.drop_table()
with pytest.raises(ProgrammingError):
tracker.drop_table()
def test_instantiate_tracker_from_dot_file(drop_tracker_test_table):
"""Test instantiating a Tracker with a dotfile instead of using db and table strings."""
dotfile = {'db': 'postgresql:///little_bean_toes', 'json_field':
'such_fuzzy'}
with open('.pawprint', 'w') as f:
json.dump(dotfile, f)
tracker = pawprint.Tracker(dotfile='.pawprint', json_field='boop')
assert tracker.db == 'postgresql:///little_bean_toes'
assert tracker.table is None
assert tracker.json_field == 'boop'
os.remove('.pawprint')
def test_create_table_with_other_options(drop_tracker_test_table, db_string,
tracker_test_table_name):
"""Ensure the table is correctly created with an alternative schema."""
schema = OrderedDict([('pk', 'SERIAL PRIMARY KEY'), ('infofield', 'TEXT')])
tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name,
schema=schema)
tracker.create_table()
schema = pd.io.sql.execute(
"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'"
.format(tracker.table), tracker.db).fetchall()
assert schema == [('pk', 'integer', None), ('infofield', 'text', None)]
<|reserved_special_token_0|>
def test_read(pawprint_default_tracker_db_with_table):
"""Test pulling the data into a dataframe according to various simple filters."""
tracker = pawprint_default_tracker_db_with_table
assert len(tracker.read()) == 0
tracker.write(user_id='Pawprint', event='Testing !')
tracker.write(user_id='Pawprint')
tracker.write(event='No user')
tracker.write(user_id='import this', event='very zen', metadata={
'better': 'forgiveness', 'worse': 'permission', 'ordered': [
'simple', 'complex', 'complicated']})
all_data = tracker.read()
pawprint_events = tracker.read(user_id='Pawprint')
id_gt_events = tracker.read(id__gt=10)
id_gte_lt_events = tracker.read(id__gte=1, id__lt=3)
field_events = tracker.read('event', id__lte=100, event='very zen')
contains_events = tracker.read(metadata__contains='better')
not_contains_events = tracker.read(metadata__contains='whisky')
assert len(all_data) == 4
assert len(pawprint_events) == 2
assert len(id_gt_events) == 0
assert len(id_gte_lt_events) == 2
assert len(field_events) == 1
assert len(contains_events) == 1
assert len(not_contains_events) == 0
assert set(all_data.columns) == set(['id', 'user_id', 'event',
'metadata', 'timestamp'])
assert set(field_events.columns) == set(['event'])
def test_counts(pawprint_default_tracker_db_with_table):
"""Test counting a specific event, with date ranges and time resolutions."""
tracker = pawprint_default_tracker_db_with_table
query = (
"""
INSERT INTO {} (timestamp, user_id, event) VALUES
('2016-01-01 12:30', 'alice', 'logged_in'),
('2016-01-01 12:40', 'bob', 'logged_in'),
('2016-01-01 16:00', 'charlotte', 'logged_in'),
('2016-01-02 00:00', 'dan', 'logged_in'),
('2016-01-02 00:00', 'elizabeth', 'logged_in'),
('2016-01-05 00:00', 'frank', 'logged_in'),
('2016-01-10 00:00', 'gabrielle', 'logged_in'),
('2016-01-20 00:00', 'hans', 'logged_in'),
('2016-02-01 00:00', 'iris', 'logged_in'),
('2016-02-01 00:00', 'james', 'logged_in'),
('2016-03-01 00:00', 'kelly', 'logged_in'),
('2016-03-01 00:00', 'laura', 'logged_in'),
('2016-03-01 00:00', 'mike', 'not_logged_in')
"""
.format(tracker.table))
pd.io.sql.execute(query, tracker.db)
logins_hourly = tracker.count(event='logged_in', resolution='hour')
logins_daily = tracker.count(event='logged_in')
logins_weekly = tracker.count(event='logged_in', resolution='week')
logins_monthly = tracker.count(event='logged_in', resolution='month')
logins_weekly_left_range = tracker.count(event='logged_in', resolution=
'week', start=datetime(2016, 2, 1))
logins_weekly_right_range = tracker.count(event='logged_in', resolution
='week', end=datetime(2016, 2, 1))
logins_daily_full_range = tracker.count(event='logged_in', start=
datetime(2016, 1, 15), end=datetime(2016, 2, 15))
assert len(logins_hourly) == 8
assert np.all(logins_hourly['count'].values == [2, 1, 2, 1, 1, 1, 2, 2])
assert len(logins_daily) == 7
assert np.all(logins_daily['count'].values == [3, 2, 1, 1, 1, 2, 2])
assert len(logins_weekly) == 5
assert np.all(logins_weekly['count'].values == [5, 2, 1, 2, 2])
assert len(logins_monthly) == 3
assert len(logins_weekly_left_range) == 2
assert len(logins_weekly_right_range) == 4
assert len(logins_daily_full_range) == 2
def test_sum_and_average(pawprint_default_tracker_db_with_table):
"""Test aggregating a specific event, with date ranges and time resolutions."""
tracker = pawprint_default_tracker_db_with_table
metadata = str('{"val": 1}').replace("'", '"')
query = (
"""
INSERT INTO {table} (timestamp, user_id, event, metadata) VALUES
('2016-01-01 12:30', 'alice', 'logged_in', '{metadata}'),
('2016-01-01 12:40', 'bob', 'logged_in', '{metadata}'),
('2016-01-01 16:00', 'charlotte', 'logged_in', '{metadata}'),
('2016-01-02 00:00', 'dan', 'logged_in', '{metadata}'),
('2016-01-02 00:00', 'elizabeth', 'logged_in', '{metadata}'),
('2016-01-05 00:00', 'frank', 'logged_in', '{metadata}'),
('2016-01-10 00:00', 'gabrielle', 'logged_in', '{metadata}'),
('2016-01-20 00:00', 'hans', 'logged_in', '{metadata}'),
('2016-02-01 00:00', 'iris', 'logged_in', '{metadata}'),
('2016-02-01 00:00', 'james', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'kelly', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'laura', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'mike', 'not_logged_in', '{metadata}')
"""
.format(table=tracker.table, metadata=metadata))
pd.io.sql.execute(query, tracker.db)
x_sum_daily_all = tracker.sum('metadata__val')
x_sum_daily = tracker.sum('metadata__val', event='logged_in')
x_avg_daily_all = tracker.average('metadata__val', event='logged_in')
x_avg_daily = tracker.average('metadata__val', event='logged_in')
assert len(x_sum_daily) == 7
assert np.all(x_sum_daily_all['sum'].values == [3, 2, 1, 1, 1, 2, 3])
assert np.all(x_sum_daily['sum'].values == [3, 2, 1, 1, 1, 2, 2])
assert np.all(x_avg_daily_all['avg'].values == [1, 1, 1, 1, 1, 1, 1])
assert np.all(x_avg_daily['avg'] == x_avg_daily_all['avg'])
def test_parse_fields(pawprint_default_tracker_db):
"""Test args passed to read() and _aggregate() are parsed correctly."""
tracker = pawprint_default_tracker_db
args = ()
assert tracker._parse_fields(*args) == '*'
args = 'event',
assert tracker._parse_fields(*args) == 'event'
args = 'user_id', 'timestamp'
assert tracker._parse_fields(*args) == 'user_id, timestamp'
args = 'metadata__a__b',
assert tracker._parse_fields(*args) == "metadata #> '{a, b}' AS json_field"
def test_parse_values(pawprint_default_tracker_db):
"""Test parsing values for write()."""
tracker = pawprint_default_tracker_db
args = 'logged_in',
assert tracker._parse_values(*args) == "'logged_in'"
args = 'logged_in', 'hannah'
assert tracker._parse_values(*args) == "'logged_in', 'hannah'"
def test_parse_conditionals(pawprint_default_tracker_db):
"""Test kwargs passed to read() and _aggregate() are parsed correctly."""
tracker = pawprint_default_tracker_db
kwargs = {}
assert tracker._parse_conditionals(**kwargs) == ''
kwargs = {'user_id': 'Quentin'}
assert tracker._parse_conditionals(**kwargs) == "WHERE user_id = 'Quentin'"
kwargs = {'event': 'logged_in', 'user_id': 'Quentin'}
assert tracker._parse_conditionals(**kwargs) in (
"WHERE event = 'logged_in' AND user_id = 'Quentin'",
"WHERE user_id = 'Quentin' AND event = 'logged_in'")
kwargs = {'event__in': ['logged_in', 'logged_out']}
assert tracker._parse_conditionals(**kwargs
) == "WHERE event IN ('logged_in', 'logged_out')"
def test_accessing_json_fields(pawprint_default_tracker_db_with_table):
"""Test some structured data pulling."""
tracker = pawprint_default_tracker_db_with_table
simple = {'integral': 'derivative'}
medium = {'montecarlo': {'prior': 'likelihood'}}
difficult = {'deepnet': ['mlp', 'cnn', 'rnn'], 'ensembles': {'random':
'forest', 'always': {'cross_validate': ['kfold', 'stratified']}}}
tracker.write(event='maths', metadata=simple)
tracker.write(event='stats', metadata=medium)
tracker.write(event='ml', metadata=difficult)
maths_all = tracker.read('metadata__integral')
maths_condition = tracker.read('metadata__integral', event='maths')
assert len(maths_all) == 3
assert len(maths_condition) == 1
assert list(maths_all.json_field) == ['derivative', None, None]
stats = tracker.read('metadata__montecarlo__prior').dropna()
assert len(stats) == 1
assert stats.json_field.iloc[0] == 'likelihood'
types_of_nn = tracker.read('metadata__deepnet').dropna()
best_nn = tracker.read('metadata__deepnet__1').dropna()
full_depth = tracker.read('metadata__ensembles__always__cross_validate__0'
).dropna()
assert len(types_of_nn) == 1
assert len(best_nn) == 1
assert best_nn.json_field.iloc[0] == 'cnn'
assert len(full_depth) == 1
assert full_depth.json_field.iloc[0] == 'kfold'
def test_json_maths(pawprint_default_tracker_db_with_table):
"""More advanced operations on JSON subfields."""
tracker = pawprint_default_tracker_db_with_table
tracker.write(event='whisky', metadata={'uigeadail': {'value': 123,
'lagavulin': [4, 2]}})
tracker.write(event='whisky', metadata={'uigeadail': {'value': 456,
'lagavulin': [5, 0]}})
tracker.write(event='whisky', metadata={'uigeadail': {'value': 758,
'lagavulin': [7, 10]}})
tracker.write(event='armagnac', metadata={'age': 'XO'})
tracker.write(event='armagnac', metadata={'age': 15})
assert len(tracker.read()) == 5
assert len(tracker.read(metadata__uigeadail__contains='lagavulin')) == 3
assert len(tracker.read(metadata__uigeadail__value__gt=123)) == 2
assert len(tracker.read(metadata__uigeadail__value__gte=123)) == 3
whiskies = tracker.sum('metadata__uigeadail__value')
assert len(whiskies) == 1
assert whiskies.iloc[0]['sum'] == 1337
assert len(tracker.read(metadata__contains='age')) == 2
assert len(tracker.read(metadata__age='XO')) == 1
def test_silent_write_errors():
"""When a failure occurs in event write, it should fail silently."""
tracker = pawprint.Tracker(db=None, table=None)
try:
tracker.write(event='This will fail silently.')
except Exception:
pytest.fail('Failed to fail silently.')
def test_nonsilent_write_errors(error_logger):
"""Test non-silent write errors that should output to the logger or raise exceptions."""
tracker = pawprint.Tracker(db='postgresql:///fail', logger=error_logger)
with pytest.raises(Exception):
tracker.write()
with pytest.raises(Exception):
tracker.write(event='going_to_fail')
with open('pawprint.log', mode='r') as f:
logs = f.readlines()
print(logs[3])
assert len(logs) == 6
assert logs[0].startswith('pawprint: pawprint failed to write.')
assert 'Table: None. Query: INSERT INTO None () VALUES ();' in logs[0]
assert "Query: INSERT INTO None (event) VALUES ('going_to_fail')" in logs[3
]
os.remove('pawprint.log')
def test_auto_timestamp(db_string):
"""Ensure that timestamps are autopopulated correctly if not passed."""
schema = {'event': 'TEXT', 'timestamp': 'TIMESTAMP'}
no_auto = pawprint.Tracker(db=db_string, table='no_auto',
auto_timestamp=False, schema=schema)
auto = pawprint.Tracker(db=db_string, table='auto', auto_timestamp=True,
schema=schema)
no_auto.create_table()
auto.create_table()
no_auto.write(event='foo')
auto.write(event='bar')
assert len(no_auto.read()) == 1
assert len(auto.read()) == 1
assert len(no_auto.read().dropna()) == 0
assert len(auto.read().dropna()) == 1
no_auto.drop_table()
auto.drop_table()
def test_repr_and_str(pawprint_default_tracker_db):
"""Test the __repr__ and __str__."""
tracker = pawprint_default_tracker_db
expected_repr = "pawprint.Tracker on table '{}' and database '{}'".format(
tracker.table, tracker.db)
expected_str = 'pawprint Tracker object.\ndb : {}\ntable : {}'.format(
tracker.db, tracker.table)
assert tracker.__repr__() == expected_repr
assert tracker.__str__() == expected_str
def test_malicious_strings(pawprint_default_tracker_db_with_table):
"""Test that SQL injection strings are sanitized"""
tracker = pawprint_default_tracker_db_with_table
tracker.write(event='armageddon', metadata={'shady business': {'with':
'the following string', 'of sql':
"50');INSERT INTO {table} (event, user_id) VALUES ('you got pwnd', '50"
.format(table=tracker.table)}})
assert len(tracker.read()) == 1
tracker.write(event='armageddon', metadata={'more shady business': {
'my shady sql': "' OR '1'='1;DROP TABLE {table};".format(table=
tracker.table)}})
assert len(tracker.read()) == 2
tracker.write(event="' OR '1'='1;", metadata={'foo':
"x'); DROP TABLE {table}; --".format(table=tracker.table)})
assert len(tracker.read()) == 3
def test_escaping_from_quotes(pawprint_default_tracker_db_with_table):
tracker = pawprint_default_tracker_db_with_table
tracker.write(event='known crummy string', metadata={'foo': {'toState':
"#/app/dealnotes/2345/FORPETE'S_SAKE,_LLC_Tenant_Rep_Lease_2",
'fromState': '#/app/dealdetails/2345', 'platform': 'iOS App'}})
assert len(tracker.read()) == 1
<|reserved_special_token_1|>
import os
import json
import pytest
from datetime import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
from sqlalchemy.exc import ProgrammingError
import pawprint
def test_create_table_with_default_options(pawprint_default_tracker_db):
"""Ensure the table is correctly created with the default schema."""
tracker = pawprint_default_tracker_db
assert tracker.create_table() is None
with pytest.raises(ProgrammingError):
tracker.create_table()
assert pd.io.sql.execute('SELECT COUNT(*) FROM {}'.format(tracker.table
), tracker.db).fetchall() == [(0,)]
schema = pd.io.sql.execute(
"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'"
.format(tracker.table), tracker.db).fetchall()
expected_schema = [(u'id', u'integer', None), (u'timestamp',
u'timestamp without time zone', None), (u'user_id', u'text', None),
(u'event', u'text', None), (u'metadata', u'jsonb', None)]
assert schema == expected_schema
def test_drop_table(pawprint_default_tracker_db_with_table):
"""Ensure that tables are deleted successfully."""
tracker = pawprint_default_tracker_db_with_table
with pytest.raises(ProgrammingError):
tracker.create_table()
tracker.drop_table()
with pytest.raises(ProgrammingError):
tracker.drop_table()
def test_instantiate_tracker_from_dot_file(drop_tracker_test_table):
"""Test instantiating a Tracker with a dotfile instead of using db and table strings."""
dotfile = {'db': 'postgresql:///little_bean_toes', 'json_field':
'such_fuzzy'}
with open('.pawprint', 'w') as f:
json.dump(dotfile, f)
tracker = pawprint.Tracker(dotfile='.pawprint', json_field='boop')
assert tracker.db == 'postgresql:///little_bean_toes'
assert tracker.table is None
assert tracker.json_field == 'boop'
os.remove('.pawprint')
def test_create_table_with_other_options(drop_tracker_test_table, db_string,
tracker_test_table_name):
"""Ensure the table is correctly created with an alternative schema."""
schema = OrderedDict([('pk', 'SERIAL PRIMARY KEY'), ('infofield', 'TEXT')])
tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name,
schema=schema)
tracker.create_table()
schema = pd.io.sql.execute(
"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'"
.format(tracker.table), tracker.db).fetchall()
assert schema == [('pk', 'integer', None), ('infofield', 'text', None)]
def test_write(drop_tracker_test_table, db_string, tracker_test_table_name):
"""Test the tracking of an event."""
tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name,
schema={'id': 'INT'})
tracker.create_table()
assert pd.io.sql.execute('SELECT COUNT(*) FROM {}'.format(tracker.table
), tracker.db).fetchall() == [(0,)]
tracker.write(id=1337)
assert pd.io.sql.execute('SELECT COUNT(*) FROM {}'.format(tracker.table
), tracker.db).fetchall() == [(1,)]
data = pd.read_sql('SELECT * FROM {}'.format(tracker.table), tracker.db)
assert isinstance(data, pd.DataFrame)
assert len(data.columns) == 1
assert data.columns[0] == 'id'
assert data.id[0] == 1337
def test_read(pawprint_default_tracker_db_with_table):
"""Test pulling the data into a dataframe according to various simple filters."""
tracker = pawprint_default_tracker_db_with_table
assert len(tracker.read()) == 0
tracker.write(user_id='Pawprint', event='Testing !')
tracker.write(user_id='Pawprint')
tracker.write(event='No user')
tracker.write(user_id='import this', event='very zen', metadata={
'better': 'forgiveness', 'worse': 'permission', 'ordered': [
'simple', 'complex', 'complicated']})
all_data = tracker.read()
pawprint_events = tracker.read(user_id='Pawprint')
id_gt_events = tracker.read(id__gt=10)
id_gte_lt_events = tracker.read(id__gte=1, id__lt=3)
field_events = tracker.read('event', id__lte=100, event='very zen')
contains_events = tracker.read(metadata__contains='better')
not_contains_events = tracker.read(metadata__contains='whisky')
assert len(all_data) == 4
assert len(pawprint_events) == 2
assert len(id_gt_events) == 0
assert len(id_gte_lt_events) == 2
assert len(field_events) == 1
assert len(contains_events) == 1
assert len(not_contains_events) == 0
assert set(all_data.columns) == set(['id', 'user_id', 'event',
'metadata', 'timestamp'])
assert set(field_events.columns) == set(['event'])
def test_counts(pawprint_default_tracker_db_with_table):
"""Test counting a specific event, with date ranges and time resolutions."""
tracker = pawprint_default_tracker_db_with_table
query = (
"""
INSERT INTO {} (timestamp, user_id, event) VALUES
('2016-01-01 12:30', 'alice', 'logged_in'),
('2016-01-01 12:40', 'bob', 'logged_in'),
('2016-01-01 16:00', 'charlotte', 'logged_in'),
('2016-01-02 00:00', 'dan', 'logged_in'),
('2016-01-02 00:00', 'elizabeth', 'logged_in'),
('2016-01-05 00:00', 'frank', 'logged_in'),
('2016-01-10 00:00', 'gabrielle', 'logged_in'),
('2016-01-20 00:00', 'hans', 'logged_in'),
('2016-02-01 00:00', 'iris', 'logged_in'),
('2016-02-01 00:00', 'james', 'logged_in'),
('2016-03-01 00:00', 'kelly', 'logged_in'),
('2016-03-01 00:00', 'laura', 'logged_in'),
('2016-03-01 00:00', 'mike', 'not_logged_in')
"""
.format(tracker.table))
pd.io.sql.execute(query, tracker.db)
logins_hourly = tracker.count(event='logged_in', resolution='hour')
logins_daily = tracker.count(event='logged_in')
logins_weekly = tracker.count(event='logged_in', resolution='week')
logins_monthly = tracker.count(event='logged_in', resolution='month')
logins_weekly_left_range = tracker.count(event='logged_in', resolution=
'week', start=datetime(2016, 2, 1))
logins_weekly_right_range = tracker.count(event='logged_in', resolution
='week', end=datetime(2016, 2, 1))
logins_daily_full_range = tracker.count(event='logged_in', start=
datetime(2016, 1, 15), end=datetime(2016, 2, 15))
assert len(logins_hourly) == 8
assert np.all(logins_hourly['count'].values == [2, 1, 2, 1, 1, 1, 2, 2])
assert len(logins_daily) == 7
assert np.all(logins_daily['count'].values == [3, 2, 1, 1, 1, 2, 2])
assert len(logins_weekly) == 5
assert np.all(logins_weekly['count'].values == [5, 2, 1, 2, 2])
assert len(logins_monthly) == 3
assert len(logins_weekly_left_range) == 2
assert len(logins_weekly_right_range) == 4
assert len(logins_daily_full_range) == 2
def test_sum_and_average(pawprint_default_tracker_db_with_table):
"""Test aggregating a specific event, with date ranges and time resolutions."""
tracker = pawprint_default_tracker_db_with_table
metadata = str('{"val": 1}').replace("'", '"')
query = (
"""
INSERT INTO {table} (timestamp, user_id, event, metadata) VALUES
('2016-01-01 12:30', 'alice', 'logged_in', '{metadata}'),
('2016-01-01 12:40', 'bob', 'logged_in', '{metadata}'),
('2016-01-01 16:00', 'charlotte', 'logged_in', '{metadata}'),
('2016-01-02 00:00', 'dan', 'logged_in', '{metadata}'),
('2016-01-02 00:00', 'elizabeth', 'logged_in', '{metadata}'),
('2016-01-05 00:00', 'frank', 'logged_in', '{metadata}'),
('2016-01-10 00:00', 'gabrielle', 'logged_in', '{metadata}'),
('2016-01-20 00:00', 'hans', 'logged_in', '{metadata}'),
('2016-02-01 00:00', 'iris', 'logged_in', '{metadata}'),
('2016-02-01 00:00', 'james', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'kelly', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'laura', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'mike', 'not_logged_in', '{metadata}')
"""
.format(table=tracker.table, metadata=metadata))
pd.io.sql.execute(query, tracker.db)
x_sum_daily_all = tracker.sum('metadata__val')
x_sum_daily = tracker.sum('metadata__val', event='logged_in')
x_avg_daily_all = tracker.average('metadata__val', event='logged_in')
x_avg_daily = tracker.average('metadata__val', event='logged_in')
assert len(x_sum_daily) == 7
assert np.all(x_sum_daily_all['sum'].values == [3, 2, 1, 1, 1, 2, 3])
assert np.all(x_sum_daily['sum'].values == [3, 2, 1, 1, 1, 2, 2])
assert np.all(x_avg_daily_all['avg'].values == [1, 1, 1, 1, 1, 1, 1])
assert np.all(x_avg_daily['avg'] == x_avg_daily_all['avg'])
def test_parse_fields(pawprint_default_tracker_db):
"""Test args passed to read() and _aggregate() are parsed correctly."""
tracker = pawprint_default_tracker_db
args = ()
assert tracker._parse_fields(*args) == '*'
args = 'event',
assert tracker._parse_fields(*args) == 'event'
args = 'user_id', 'timestamp'
assert tracker._parse_fields(*args) == 'user_id, timestamp'
args = 'metadata__a__b',
assert tracker._parse_fields(*args) == "metadata #> '{a, b}' AS json_field"
def test_parse_values(pawprint_default_tracker_db):
"""Test parsing values for write()."""
tracker = pawprint_default_tracker_db
args = 'logged_in',
assert tracker._parse_values(*args) == "'logged_in'"
args = 'logged_in', 'hannah'
assert tracker._parse_values(*args) == "'logged_in', 'hannah'"
def test_parse_conditionals(pawprint_default_tracker_db):
"""Test kwargs passed to read() and _aggregate() are parsed correctly."""
tracker = pawprint_default_tracker_db
kwargs = {}
assert tracker._parse_conditionals(**kwargs) == ''
kwargs = {'user_id': 'Quentin'}
assert tracker._parse_conditionals(**kwargs) == "WHERE user_id = 'Quentin'"
kwargs = {'event': 'logged_in', 'user_id': 'Quentin'}
assert tracker._parse_conditionals(**kwargs) in (
"WHERE event = 'logged_in' AND user_id = 'Quentin'",
"WHERE user_id = 'Quentin' AND event = 'logged_in'")
kwargs = {'event__in': ['logged_in', 'logged_out']}
assert tracker._parse_conditionals(**kwargs
) == "WHERE event IN ('logged_in', 'logged_out')"
def test_accessing_json_fields(pawprint_default_tracker_db_with_table):
"""Test some structured data pulling."""
tracker = pawprint_default_tracker_db_with_table
simple = {'integral': 'derivative'}
medium = {'montecarlo': {'prior': 'likelihood'}}
difficult = {'deepnet': ['mlp', 'cnn', 'rnn'], 'ensembles': {'random':
'forest', 'always': {'cross_validate': ['kfold', 'stratified']}}}
tracker.write(event='maths', metadata=simple)
tracker.write(event='stats', metadata=medium)
tracker.write(event='ml', metadata=difficult)
maths_all = tracker.read('metadata__integral')
maths_condition = tracker.read('metadata__integral', event='maths')
assert len(maths_all) == 3
assert len(maths_condition) == 1
assert list(maths_all.json_field) == ['derivative', None, None]
stats = tracker.read('metadata__montecarlo__prior').dropna()
assert len(stats) == 1
assert stats.json_field.iloc[0] == 'likelihood'
types_of_nn = tracker.read('metadata__deepnet').dropna()
best_nn = tracker.read('metadata__deepnet__1').dropna()
full_depth = tracker.read('metadata__ensembles__always__cross_validate__0'
).dropna()
assert len(types_of_nn) == 1
assert len(best_nn) == 1
assert best_nn.json_field.iloc[0] == 'cnn'
assert len(full_depth) == 1
assert full_depth.json_field.iloc[0] == 'kfold'
def test_json_maths(pawprint_default_tracker_db_with_table):
"""More advanced operations on JSON subfields."""
tracker = pawprint_default_tracker_db_with_table
tracker.write(event='whisky', metadata={'uigeadail': {'value': 123,
'lagavulin': [4, 2]}})
tracker.write(event='whisky', metadata={'uigeadail': {'value': 456,
'lagavulin': [5, 0]}})
tracker.write(event='whisky', metadata={'uigeadail': {'value': 758,
'lagavulin': [7, 10]}})
tracker.write(event='armagnac', metadata={'age': 'XO'})
tracker.write(event='armagnac', metadata={'age': 15})
assert len(tracker.read()) == 5
assert len(tracker.read(metadata__uigeadail__contains='lagavulin')) == 3
assert len(tracker.read(metadata__uigeadail__value__gt=123)) == 2
assert len(tracker.read(metadata__uigeadail__value__gte=123)) == 3
whiskies = tracker.sum('metadata__uigeadail__value')
assert len(whiskies) == 1
assert whiskies.iloc[0]['sum'] == 1337
assert len(tracker.read(metadata__contains='age')) == 2
assert len(tracker.read(metadata__age='XO')) == 1
def test_silent_write_errors():
"""When a failure occurs in event write, it should fail silently."""
tracker = pawprint.Tracker(db=None, table=None)
try:
tracker.write(event='This will fail silently.')
except Exception:
pytest.fail('Failed to fail silently.')
def test_nonsilent_write_errors(error_logger):
"""Test non-silent write errors that should output to the logger or raise exceptions."""
tracker = pawprint.Tracker(db='postgresql:///fail', logger=error_logger)
with pytest.raises(Exception):
tracker.write()
with pytest.raises(Exception):
tracker.write(event='going_to_fail')
with open('pawprint.log', mode='r') as f:
logs = f.readlines()
print(logs[3])
assert len(logs) == 6
assert logs[0].startswith('pawprint: pawprint failed to write.')
assert 'Table: None. Query: INSERT INTO None () VALUES ();' in logs[0]
assert "Query: INSERT INTO None (event) VALUES ('going_to_fail')" in logs[3
]
os.remove('pawprint.log')
def test_auto_timestamp(db_string):
"""Ensure that timestamps are autopopulated correctly if not passed."""
schema = {'event': 'TEXT', 'timestamp': 'TIMESTAMP'}
no_auto = pawprint.Tracker(db=db_string, table='no_auto',
auto_timestamp=False, schema=schema)
auto = pawprint.Tracker(db=db_string, table='auto', auto_timestamp=True,
schema=schema)
no_auto.create_table()
auto.create_table()
no_auto.write(event='foo')
auto.write(event='bar')
assert len(no_auto.read()) == 1
assert len(auto.read()) == 1
assert len(no_auto.read().dropna()) == 0
assert len(auto.read().dropna()) == 1
no_auto.drop_table()
auto.drop_table()
def test_repr_and_str(pawprint_default_tracker_db):
"""Test the __repr__ and __str__."""
tracker = pawprint_default_tracker_db
expected_repr = "pawprint.Tracker on table '{}' and database '{}'".format(
tracker.table, tracker.db)
expected_str = 'pawprint Tracker object.\ndb : {}\ntable : {}'.format(
tracker.db, tracker.table)
assert tracker.__repr__() == expected_repr
assert tracker.__str__() == expected_str
def test_malicious_strings(pawprint_default_tracker_db_with_table):
"""Test that SQL injection strings are sanitized"""
tracker = pawprint_default_tracker_db_with_table
tracker.write(event='armageddon', metadata={'shady business': {'with':
'the following string', 'of sql':
"50');INSERT INTO {table} (event, user_id) VALUES ('you got pwnd', '50"
.format(table=tracker.table)}})
assert len(tracker.read()) == 1
tracker.write(event='armageddon', metadata={'more shady business': {
'my shady sql': "' OR '1'='1;DROP TABLE {table};".format(table=
tracker.table)}})
assert len(tracker.read()) == 2
tracker.write(event="' OR '1'='1;", metadata={'foo':
"x'); DROP TABLE {table}; --".format(table=tracker.table)})
assert len(tracker.read()) == 3
def test_escaping_from_quotes(pawprint_default_tracker_db_with_table):
tracker = pawprint_default_tracker_db_with_table
tracker.write(event='known crummy string', metadata={'foo': {'toState':
"#/app/dealnotes/2345/FORPETE'S_SAKE,_LLC_Tenant_Rep_Lease_2",
'fromState': '#/app/dealdetails/2345', 'platform': 'iOS App'}})
assert len(tracker.read()) == 1
<|reserved_special_token_1|>
import os
import json
import pytest
from datetime import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
from sqlalchemy.exc import ProgrammingError
import pawprint
def test_create_table_with_default_options(pawprint_default_tracker_db):
"""Ensure the table is correctly created with the default schema."""
tracker = pawprint_default_tracker_db
# The table shouldn't exist. Assert it's correct created.
assert tracker.create_table() is None
# Try creating it again. This should raise an error.
with pytest.raises(ProgrammingError):
tracker.create_table()
# Assert the table is empty when created
assert pd.io.sql.execute(
"SELECT COUNT(*) FROM {}".format(tracker.table), tracker.db
).fetchall() == [(0,)]
# Ensure its schema is correct
schema = pd.io.sql.execute(
"SELECT column_name, data_type, character_maximum_length "
"FROM INFORMATION_SCHEMA.COLUMNS "
"WHERE table_name = '{}'".format(tracker.table),
tracker.db,
).fetchall()
expected_schema = [
(u"id", u"integer", None),
(u"timestamp", u"timestamp without time zone", None),
(u"user_id", u"text", None),
(u"event", u"text", None),
(u"metadata", u"jsonb", None),
]
assert schema == expected_schema
def test_drop_table(pawprint_default_tracker_db_with_table):
"""Ensure that tables are deleted successfully."""
tracker = pawprint_default_tracker_db_with_table
# make sure table exists
with pytest.raises(ProgrammingError):
tracker.create_table()
tracker.drop_table()
with pytest.raises(ProgrammingError):
tracker.drop_table()
def test_instantiate_tracker_from_dot_file(drop_tracker_test_table):
"""Test instantiating a Tracker with a dotfile instead of using db and table strings."""
# Write a dotfile to disk
dotfile = {
"db": "postgresql:///little_bean_toes",
"json_field": "such_fuzzy",
}
with open(".pawprint", "w") as f:
json.dump(dotfile, f)
# Create a tracker from this dotfile
tracker = pawprint.Tracker(dotfile=".pawprint", json_field="boop")
# Ensure all the entries are as they should be
assert tracker.db == "postgresql:///little_bean_toes"
assert tracker.table is None
# assert tracker.logger is None
assert tracker.json_field == "boop" # field present in dotfile but overwritten in init
os.remove(".pawprint")
def test_create_table_with_other_options(
drop_tracker_test_table, db_string, tracker_test_table_name
):
"""Ensure the table is correctly created with an alternative schema."""
schema = OrderedDict([("pk", "SERIAL PRIMARY KEY"), ("infofield", "TEXT")])
tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name, schema=schema)
tracker.create_table()
# Ensure its schema is correct
schema = pd.io.sql.execute(
"SELECT column_name, data_type, character_maximum_length "
"FROM INFORMATION_SCHEMA.COLUMNS "
"WHERE table_name = '{}'".format(tracker.table),
tracker.db,
).fetchall()
assert schema == [("pk", "integer", None), ("infofield", "text", None)]
def test_write(drop_tracker_test_table, db_string, tracker_test_table_name):
"""Test the tracking of an event."""
tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name, schema={"id": "INT"})
tracker.create_table()
# Check the table's empty
assert pd.io.sql.execute(
"SELECT COUNT(*) FROM {}".format(tracker.table), tracker.db
).fetchall() == [(0,)]
# Add some data and check if the row count increases by one
tracker.write(id=1337)
assert pd.io.sql.execute(
"SELECT COUNT(*) FROM {}".format(tracker.table), tracker.db
).fetchall() == [(1,)]
# Pull the data and ensure it's correct
data = pd.read_sql("SELECT * FROM {}".format(tracker.table), tracker.db)
assert isinstance(data, pd.DataFrame)
assert len(data.columns) == 1
assert data.columns[0] == "id"
assert data.id[0] == 1337
def test_read(pawprint_default_tracker_db_with_table):
"""Test pulling the data into a dataframe according to various simple filters."""
tracker = pawprint_default_tracker_db_with_table
# Ensure the table is empty to begin with
assert len(tracker.read()) == 0
# Add some data
tracker.write(user_id="Pawprint", event="Testing !")
tracker.write(user_id="Pawprint")
tracker.write(event="No user")
tracker.write(
user_id="import this",
event="very zen",
metadata={
"better": "forgiveness",
"worse": "permission",
"ordered": ["simple", "complex", "complicated"],
},
)
all_data = tracker.read()
pawprint_events = tracker.read(user_id="Pawprint")
id_gt_events = tracker.read(id__gt=10)
id_gte_lt_events = tracker.read(id__gte=1, id__lt=3)
field_events = tracker.read("event", id__lte=100, event="very zen")
contains_events = tracker.read(metadata__contains="better")
not_contains_events = tracker.read(metadata__contains="whisky")
assert len(all_data) == 4
assert len(pawprint_events) == 2
assert len(id_gt_events) == 0
assert len(id_gte_lt_events) == 2
assert len(field_events) == 1
assert len(contains_events) == 1
assert len(not_contains_events) == 0
assert set(all_data.columns) == set(["id", "user_id", "event", "metadata", "timestamp"])
assert set(field_events.columns) == set(["event"])
def test_counts(pawprint_default_tracker_db_with_table):
"""Test counting a specific event, with date ranges and time resolutions."""
tracker = pawprint_default_tracker_db_with_table
# Add a bunch of events
query = (
"""
INSERT INTO {} (timestamp, user_id, event) VALUES
('2016-01-01 12:30', 'alice', 'logged_in'),
('2016-01-01 12:40', 'bob', 'logged_in'),
('2016-01-01 16:00', 'charlotte', 'logged_in'),
('2016-01-02 00:00', 'dan', 'logged_in'),
('2016-01-02 00:00', 'elizabeth', 'logged_in'),
('2016-01-05 00:00', 'frank', 'logged_in'),
('2016-01-10 00:00', 'gabrielle', 'logged_in'),
('2016-01-20 00:00', 'hans', 'logged_in'),
('2016-02-01 00:00', 'iris', 'logged_in'),
('2016-02-01 00:00', 'james', 'logged_in'),
('2016-03-01 00:00', 'kelly', 'logged_in'),
('2016-03-01 00:00', 'laura', 'logged_in'),
('2016-03-01 00:00', 'mike', 'not_logged_in')
"""
).format(tracker.table)
pd.io.sql.execute(query, tracker.db)
logins_hourly = tracker.count(event="logged_in", resolution="hour")
logins_daily = tracker.count(event="logged_in")
logins_weekly = tracker.count(event="logged_in", resolution="week")
logins_monthly = tracker.count(event="logged_in", resolution="month")
logins_weekly_left_range = tracker.count(
event="logged_in", resolution="week", start=datetime(2016, 2, 1)
)
logins_weekly_right_range = tracker.count(
event="logged_in", resolution="week", end=datetime(2016, 2, 1)
)
logins_daily_full_range = tracker.count(
event="logged_in", start=datetime(2016, 1, 15), end=datetime(2016, 2, 15)
)
# Hourly
assert len(logins_hourly) == 8
assert np.all(logins_hourly["count"].values == [2, 1, 2, 1, 1, 1, 2, 2])
# Daily
assert len(logins_daily) == 7
assert np.all(logins_daily["count"].values == [3, 2, 1, 1, 1, 2, 2])
# Weekly
assert len(logins_weekly) == 5
assert np.all(logins_weekly["count"].values == [5, 2, 1, 2, 2])
# Others
assert len(logins_monthly) == 3
assert len(logins_weekly_left_range) == 2 # weeks start on Monday
assert len(logins_weekly_right_range) == 4 # and not at the start / end dates provided
assert len(logins_daily_full_range) == 2
def test_sum_and_average(pawprint_default_tracker_db_with_table):
"""Test aggregating a specific event, with date ranges and time resolutions."""
tracker = pawprint_default_tracker_db_with_table
metadata = str('{"val": 1}').replace("'", '"')
# Add a bunch of events
query = (
"""
INSERT INTO {table} (timestamp, user_id, event, metadata) VALUES
('2016-01-01 12:30', 'alice', 'logged_in', '{metadata}'),
('2016-01-01 12:40', 'bob', 'logged_in', '{metadata}'),
('2016-01-01 16:00', 'charlotte', 'logged_in', '{metadata}'),
('2016-01-02 00:00', 'dan', 'logged_in', '{metadata}'),
('2016-01-02 00:00', 'elizabeth', 'logged_in', '{metadata}'),
('2016-01-05 00:00', 'frank', 'logged_in', '{metadata}'),
('2016-01-10 00:00', 'gabrielle', 'logged_in', '{metadata}'),
('2016-01-20 00:00', 'hans', 'logged_in', '{metadata}'),
('2016-02-01 00:00', 'iris', 'logged_in', '{metadata}'),
('2016-02-01 00:00', 'james', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'kelly', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'laura', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'mike', 'not_logged_in', '{metadata}')
"""
).format(table=tracker.table, metadata=metadata)
pd.io.sql.execute(query, tracker.db)
x_sum_daily_all = tracker.sum("metadata__val")
x_sum_daily = tracker.sum("metadata__val", event="logged_in")
x_avg_daily_all = tracker.average("metadata__val", event="logged_in")
x_avg_daily = tracker.average("metadata__val", event="logged_in")
assert len(x_sum_daily) == 7
assert np.all(x_sum_daily_all["sum"].values == [3, 2, 1, 1, 1, 2, 3])
assert np.all(x_sum_daily["sum"].values == [3, 2, 1, 1, 1, 2, 2])
assert np.all(x_avg_daily_all["avg"].values == [1, 1, 1, 1, 1, 1, 1])
assert np.all(x_avg_daily["avg"] == x_avg_daily_all["avg"])
def test_parse_fields(pawprint_default_tracker_db):
"""Test args passed to read() and _aggregate() are parsed correctly."""
tracker = pawprint_default_tracker_db
# SELECT * FROM table
args = ()
assert tracker._parse_fields(*args) == "*"
# SELECT event FROM table
args = ("event",)
assert tracker._parse_fields(*args) == "event"
# SELECT user_id, timestamp FROM table
args = ("user_id", "timestamp")
assert tracker._parse_fields(*args) == "user_id, timestamp"
# SELECT metadata #>> '{a, b}' FROM table
args = ("metadata__a__b",)
assert tracker._parse_fields(*args) == "metadata #> '{a, b}' AS json_field"
def test_parse_values(pawprint_default_tracker_db):
"""Test parsing values for write()."""
tracker = pawprint_default_tracker_db
# INSERT INTO table (event) VALUES ('logged_in')
args = ("logged_in",)
assert tracker._parse_values(*args) == "'logged_in'"
# INSERT INTO table (event, user_id) VALUES ('logged_in', 'hannah')
args = ("logged_in", "hannah")
assert tracker._parse_values(*args) == "'logged_in', 'hannah'"
def test_parse_conditionals(pawprint_default_tracker_db):
"""Test kwargs passed to read() and _aggregate() are parsed correctly."""
tracker = pawprint_default_tracker_db
# SELECT * FROM table
kwargs = {}
assert tracker._parse_conditionals(**kwargs) == ""
# SELECT * FROM table WHERE user_id = 'Quentin'
kwargs = {"user_id": "Quentin"}
assert tracker._parse_conditionals(**kwargs) == "WHERE user_id = 'Quentin'"
# SELECT * FROM table WHERE event = 'logged_in' AND user_id = 'Quentin'
kwargs = {"event": "logged_in", "user_id": "Quentin"}
assert tracker._parse_conditionals(**kwargs) in (
"WHERE event = 'logged_in' AND user_id = 'Quentin'",
"WHERE user_id = 'Quentin' AND event = 'logged_in'",
)
# SELECT * FROM table WHERE event IN ('logged_in', 'logged_out')
kwargs = {"event__in": ["logged_in", "logged_out"]}
assert tracker._parse_conditionals(**kwargs) == "WHERE event IN ('logged_in', 'logged_out')"
def test_accessing_json_fields(pawprint_default_tracker_db_with_table):
"""Test some structured data pulling."""
tracker = pawprint_default_tracker_db_with_table
# JSON objects in our tracking database
simple = {"integral": "derivative"}
medium = {"montecarlo": {"prior": "likelihood"}}
difficult = {
"deepnet": ["mlp", "cnn", "rnn"],
"ensembles": {"random": "forest", "always": {"cross_validate": ["kfold", "stratified"]}},
}
tracker.write(event="maths", metadata=simple)
tracker.write(event="stats", metadata=medium)
tracker.write(event="ml", metadata=difficult)
maths_all = tracker.read("metadata__integral")
maths_condition = tracker.read("metadata__integral", event="maths")
assert len(maths_all) == 3
assert len(maths_condition) == 1
assert list(maths_all.json_field) == ["derivative", None, None]
stats = tracker.read("metadata__montecarlo__prior").dropna()
assert len(stats) == 1
assert stats.json_field.iloc[0] == "likelihood"
types_of_nn = tracker.read("metadata__deepnet").dropna()
best_nn = tracker.read("metadata__deepnet__1").dropna()
full_depth = tracker.read("metadata__ensembles__always__cross_validate__0").dropna()
assert len(types_of_nn) == 1
assert len(best_nn) == 1
assert best_nn.json_field.iloc[0] == "cnn"
assert len(full_depth) == 1
assert full_depth.json_field.iloc[0] == "kfold"
def test_json_maths(pawprint_default_tracker_db_with_table):
"""More advanced operations on JSON subfields."""
tracker = pawprint_default_tracker_db_with_table
tracker.write(event="whisky", metadata={"uigeadail": {"value": 123, "lagavulin": [4, 2]}})
tracker.write(event="whisky", metadata={"uigeadail": {"value": 456, "lagavulin": [5, 0]}})
tracker.write(event="whisky", metadata={"uigeadail": {"value": 758, "lagavulin": [7, 10]}})
tracker.write(event="armagnac", metadata={"age": "XO"})
tracker.write(event="armagnac", metadata={"age": 15})
assert len(tracker.read()) == 5
assert len(tracker.read(metadata__uigeadail__contains="lagavulin")) == 3
assert len(tracker.read(metadata__uigeadail__value__gt=123)) == 2
assert len(tracker.read(metadata__uigeadail__value__gte=123)) == 3
whiskies = tracker.sum("metadata__uigeadail__value")
assert len(whiskies) == 1
assert whiskies.iloc[0]["sum"] == 1337
assert len(tracker.read(metadata__contains="age")) == 2
assert len(tracker.read(metadata__age="XO")) == 1
def test_silent_write_errors():
"""When a failure occurs in event write, it should fail silently."""
tracker = pawprint.Tracker(db=None, table=None)
try:
tracker.write(event="This will fail silently.")
except Exception:
pytest.fail("Failed to fail silently.")
def test_nonsilent_write_errors(error_logger):
"""Test non-silent write errors that should output to the logger or raise exceptions."""
tracker = pawprint.Tracker(db="postgresql:///fail", logger=error_logger)
with pytest.raises(Exception):
tracker.write()
with pytest.raises(Exception):
tracker.write(event="going_to_fail")
with open("pawprint.log", mode="r") as f:
logs = f.readlines()
print(logs[3])
assert len(logs) == 6
assert logs[0].startswith("pawprint: pawprint failed to write.")
assert "Table: None. Query: INSERT INTO None () VALUES ();" in logs[0]
assert "Query: INSERT INTO None (event) VALUES ('going_to_fail')" in logs[3]
os.remove("pawprint.log")
def test_auto_timestamp(db_string):
"""Ensure that timestamps are autopopulated correctly if not passed."""
# Define a schema where the timestamp doesn't automatically populate through the database
schema = {"event": "TEXT", "timestamp": "TIMESTAMP"}
# Put together two trackers, one that autopopulates the timestamp
no_auto = pawprint.Tracker(db=db_string, table="no_auto", auto_timestamp=False, schema=schema)
auto = pawprint.Tracker(db=db_string, table="auto", auto_timestamp=True, schema=schema)
# Create clean tables
no_auto.create_table()
auto.create_table()
# Write events with no timestamp
no_auto.write(event="foo")
auto.write(event="bar")
assert len(no_auto.read()) == 1
assert len(auto.read()) == 1
assert len(no_auto.read().dropna()) == 0
assert len(auto.read().dropna()) == 1
# Drop tables at the end
no_auto.drop_table()
auto.drop_table()
def test_repr_and_str(pawprint_default_tracker_db):
"""Test the __repr__ and __str__."""
tracker = pawprint_default_tracker_db
expected_repr = "pawprint.Tracker on table '{}' and database '{}'".format(
tracker.table, tracker.db
)
expected_str = "pawprint Tracker object.\ndb : {}\ntable : {}".format(tracker.db, tracker.table)
assert tracker.__repr__() == expected_repr
assert tracker.__str__() == expected_str
def test_malicious_strings(pawprint_default_tracker_db_with_table):
"""Test that SQL injection strings are sanitized"""
tracker = pawprint_default_tracker_db_with_table
tracker.write(
event="armageddon",
metadata={
"shady business": {
"with": "the following string",
"of sql": "50');INSERT INTO {table} (event, user_id) VALUES "
"('you got pwnd', '50".format(table=tracker.table),
}
},
)
assert len(tracker.read()) == 1
tracker.write(
event="armageddon",
metadata={
"more shady business": {
"my shady sql": "' OR '1'='1;DROP TABLE {table};".format(table=tracker.table)
}
},
)
assert len(tracker.read()) == 2
tracker.write(
event="' OR '1'='1;",
metadata={"foo": "x'); DROP TABLE {table}; --".format(table=tracker.table)},
)
assert len(tracker.read()) == 3
def test_escaping_from_quotes(pawprint_default_tracker_db_with_table):
tracker = pawprint_default_tracker_db_with_table
tracker.write(
event="known crummy string",
metadata={
"foo": {
"toState": "#/app/dealnotes/2345/FORPETE'S_SAKE,_LLC_Tenant_Rep_Lease_2",
"fromState": "#/app/dealdetails/2345",
"platform": "iOS App",
}
},
)
assert len(tracker.read()) == 1
|
flexible
|
{
"blob_id": "89a75ae980b7b48d33d0e8aa53ec92296dbfbc8e",
"index": 2843,
"step-1": "<mask token>\n\n\ndef test_create_table_with_default_options(pawprint_default_tracker_db):\n \"\"\"Ensure the table is correctly created with the default schema.\"\"\"\n tracker = pawprint_default_tracker_db\n assert tracker.create_table() is None\n with pytest.raises(ProgrammingError):\n tracker.create_table()\n assert pd.io.sql.execute('SELECT COUNT(*) FROM {}'.format(tracker.table\n ), tracker.db).fetchall() == [(0,)]\n schema = pd.io.sql.execute(\n \"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'\"\n .format(tracker.table), tracker.db).fetchall()\n expected_schema = [(u'id', u'integer', None), (u'timestamp',\n u'timestamp without time zone', None), (u'user_id', u'text', None),\n (u'event', u'text', None), (u'metadata', u'jsonb', None)]\n assert schema == expected_schema\n\n\ndef test_drop_table(pawprint_default_tracker_db_with_table):\n \"\"\"Ensure that tables are deleted successfully.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n with pytest.raises(ProgrammingError):\n tracker.create_table()\n tracker.drop_table()\n with pytest.raises(ProgrammingError):\n tracker.drop_table()\n\n\n<mask token>\n\n\ndef test_create_table_with_other_options(drop_tracker_test_table, db_string,\n tracker_test_table_name):\n \"\"\"Ensure the table is correctly created with an alternative schema.\"\"\"\n schema = OrderedDict([('pk', 'SERIAL PRIMARY KEY'), ('infofield', 'TEXT')])\n tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name,\n schema=schema)\n tracker.create_table()\n schema = pd.io.sql.execute(\n \"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'\"\n .format(tracker.table), tracker.db).fetchall()\n assert schema == [('pk', 'integer', None), ('infofield', 'text', None)]\n\n\n<mask token>\n\n\ndef test_counts(pawprint_default_tracker_db_with_table):\n \"\"\"Test counting a specific event, with date ranges and time resolutions.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n query = (\n \"\"\"\n INSERT INTO {} (timestamp, user_id, event) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in'),\n ('2016-01-01 12:40', 'bob', 'logged_in'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in'),\n ('2016-01-02 00:00', 'dan', 'logged_in'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in'),\n ('2016-01-05 00:00', 'frank', 'logged_in'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in'),\n ('2016-01-20 00:00', 'hans', 'logged_in'),\n ('2016-02-01 00:00', 'iris', 'logged_in'),\n ('2016-02-01 00:00', 'james', 'logged_in'),\n ('2016-03-01 00:00', 'kelly', 'logged_in'),\n ('2016-03-01 00:00', 'laura', 'logged_in'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in')\n \"\"\"\n .format(tracker.table))\n pd.io.sql.execute(query, tracker.db)\n logins_hourly = tracker.count(event='logged_in', resolution='hour')\n logins_daily = tracker.count(event='logged_in')\n logins_weekly = tracker.count(event='logged_in', resolution='week')\n logins_monthly = tracker.count(event='logged_in', resolution='month')\n logins_weekly_left_range = tracker.count(event='logged_in', resolution=\n 'week', start=datetime(2016, 2, 1))\n logins_weekly_right_range = tracker.count(event='logged_in', resolution\n ='week', end=datetime(2016, 2, 1))\n logins_daily_full_range = tracker.count(event='logged_in', start=\n datetime(2016, 1, 15), end=datetime(2016, 2, 15))\n assert len(logins_hourly) == 8\n assert np.all(logins_hourly['count'].values == [2, 1, 2, 1, 1, 1, 2, 2])\n assert len(logins_daily) == 7\n assert np.all(logins_daily['count'].values == [3, 2, 1, 1, 1, 2, 2])\n assert len(logins_weekly) == 5\n assert np.all(logins_weekly['count'].values == [5, 2, 1, 2, 2])\n assert len(logins_monthly) == 3\n assert len(logins_weekly_left_range) == 2\n assert len(logins_weekly_right_range) == 4\n assert len(logins_daily_full_range) == 2\n\n\ndef test_sum_and_average(pawprint_default_tracker_db_with_table):\n \"\"\"Test aggregating a specific event, with date ranges and time resolutions.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n metadata = str('{\"val\": 1}').replace(\"'\", '\"')\n query = (\n \"\"\"\n INSERT INTO {table} (timestamp, user_id, event, metadata) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in', '{metadata}'),\n ('2016-01-01 12:40', 'bob', 'logged_in', '{metadata}'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in', '{metadata}'),\n ('2016-01-02 00:00', 'dan', 'logged_in', '{metadata}'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in', '{metadata}'),\n ('2016-01-05 00:00', 'frank', 'logged_in', '{metadata}'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in', '{metadata}'),\n ('2016-01-20 00:00', 'hans', 'logged_in', '{metadata}'),\n ('2016-02-01 00:00', 'iris', 'logged_in', '{metadata}'),\n ('2016-02-01 00:00', 'james', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'kelly', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'laura', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in', '{metadata}')\n \"\"\"\n .format(table=tracker.table, metadata=metadata))\n pd.io.sql.execute(query, tracker.db)\n x_sum_daily_all = tracker.sum('metadata__val')\n x_sum_daily = tracker.sum('metadata__val', event='logged_in')\n x_avg_daily_all = tracker.average('metadata__val', event='logged_in')\n x_avg_daily = tracker.average('metadata__val', event='logged_in')\n assert len(x_sum_daily) == 7\n assert np.all(x_sum_daily_all['sum'].values == [3, 2, 1, 1, 1, 2, 3])\n assert np.all(x_sum_daily['sum'].values == [3, 2, 1, 1, 1, 2, 2])\n assert np.all(x_avg_daily_all['avg'].values == [1, 1, 1, 1, 1, 1, 1])\n assert np.all(x_avg_daily['avg'] == x_avg_daily_all['avg'])\n\n\n<mask token>\n\n\ndef test_silent_write_errors():\n \"\"\"When a failure occurs in event write, it should fail silently.\"\"\"\n tracker = pawprint.Tracker(db=None, table=None)\n try:\n tracker.write(event='This will fail silently.')\n except Exception:\n pytest.fail('Failed to fail silently.')\n\n\n<mask token>\n\n\ndef test_malicious_strings(pawprint_default_tracker_db_with_table):\n \"\"\"Test that SQL injection strings are sanitized\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n tracker.write(event='armageddon', metadata={'shady business': {'with':\n 'the following string', 'of sql':\n \"50');INSERT INTO {table} (event, user_id) VALUES ('you got pwnd', '50\"\n .format(table=tracker.table)}})\n assert len(tracker.read()) == 1\n tracker.write(event='armageddon', metadata={'more shady business': {\n 'my shady sql': \"' OR '1'='1;DROP TABLE {table};\".format(table=\n tracker.table)}})\n assert len(tracker.read()) == 2\n tracker.write(event=\"' OR '1'='1;\", metadata={'foo':\n \"x'); DROP TABLE {table}; --\".format(table=tracker.table)})\n assert len(tracker.read()) == 3\n\n\ndef test_escaping_from_quotes(pawprint_default_tracker_db_with_table):\n tracker = pawprint_default_tracker_db_with_table\n tracker.write(event='known crummy string', metadata={'foo': {'toState':\n \"#/app/dealnotes/2345/FORPETE'S_SAKE,_LLC_Tenant_Rep_Lease_2\",\n 'fromState': '#/app/dealdetails/2345', 'platform': 'iOS App'}})\n assert len(tracker.read()) == 1\n",
"step-2": "<mask token>\n\n\ndef test_create_table_with_default_options(pawprint_default_tracker_db):\n \"\"\"Ensure the table is correctly created with the default schema.\"\"\"\n tracker = pawprint_default_tracker_db\n assert tracker.create_table() is None\n with pytest.raises(ProgrammingError):\n tracker.create_table()\n assert pd.io.sql.execute('SELECT COUNT(*) FROM {}'.format(tracker.table\n ), tracker.db).fetchall() == [(0,)]\n schema = pd.io.sql.execute(\n \"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'\"\n .format(tracker.table), tracker.db).fetchall()\n expected_schema = [(u'id', u'integer', None), (u'timestamp',\n u'timestamp without time zone', None), (u'user_id', u'text', None),\n (u'event', u'text', None), (u'metadata', u'jsonb', None)]\n assert schema == expected_schema\n\n\ndef test_drop_table(pawprint_default_tracker_db_with_table):\n \"\"\"Ensure that tables are deleted successfully.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n with pytest.raises(ProgrammingError):\n tracker.create_table()\n tracker.drop_table()\n with pytest.raises(ProgrammingError):\n tracker.drop_table()\n\n\n<mask token>\n\n\ndef test_create_table_with_other_options(drop_tracker_test_table, db_string,\n tracker_test_table_name):\n \"\"\"Ensure the table is correctly created with an alternative schema.\"\"\"\n schema = OrderedDict([('pk', 'SERIAL PRIMARY KEY'), ('infofield', 'TEXT')])\n tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name,\n schema=schema)\n tracker.create_table()\n schema = pd.io.sql.execute(\n \"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'\"\n .format(tracker.table), tracker.db).fetchall()\n assert schema == [('pk', 'integer', None), ('infofield', 'text', None)]\n\n\n<mask token>\n\n\ndef test_counts(pawprint_default_tracker_db_with_table):\n \"\"\"Test counting a specific event, with date ranges and time resolutions.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n query = (\n \"\"\"\n INSERT INTO {} (timestamp, user_id, event) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in'),\n ('2016-01-01 12:40', 'bob', 'logged_in'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in'),\n ('2016-01-02 00:00', 'dan', 'logged_in'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in'),\n ('2016-01-05 00:00', 'frank', 'logged_in'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in'),\n ('2016-01-20 00:00', 'hans', 'logged_in'),\n ('2016-02-01 00:00', 'iris', 'logged_in'),\n ('2016-02-01 00:00', 'james', 'logged_in'),\n ('2016-03-01 00:00', 'kelly', 'logged_in'),\n ('2016-03-01 00:00', 'laura', 'logged_in'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in')\n \"\"\"\n .format(tracker.table))\n pd.io.sql.execute(query, tracker.db)\n logins_hourly = tracker.count(event='logged_in', resolution='hour')\n logins_daily = tracker.count(event='logged_in')\n logins_weekly = tracker.count(event='logged_in', resolution='week')\n logins_monthly = tracker.count(event='logged_in', resolution='month')\n logins_weekly_left_range = tracker.count(event='logged_in', resolution=\n 'week', start=datetime(2016, 2, 1))\n logins_weekly_right_range = tracker.count(event='logged_in', resolution\n ='week', end=datetime(2016, 2, 1))\n logins_daily_full_range = tracker.count(event='logged_in', start=\n datetime(2016, 1, 15), end=datetime(2016, 2, 15))\n assert len(logins_hourly) == 8\n assert np.all(logins_hourly['count'].values == [2, 1, 2, 1, 1, 1, 2, 2])\n assert len(logins_daily) == 7\n assert np.all(logins_daily['count'].values == [3, 2, 1, 1, 1, 2, 2])\n assert len(logins_weekly) == 5\n assert np.all(logins_weekly['count'].values == [5, 2, 1, 2, 2])\n assert len(logins_monthly) == 3\n assert len(logins_weekly_left_range) == 2\n assert len(logins_weekly_right_range) == 4\n assert len(logins_daily_full_range) == 2\n\n\ndef test_sum_and_average(pawprint_default_tracker_db_with_table):\n \"\"\"Test aggregating a specific event, with date ranges and time resolutions.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n metadata = str('{\"val\": 1}').replace(\"'\", '\"')\n query = (\n \"\"\"\n INSERT INTO {table} (timestamp, user_id, event, metadata) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in', '{metadata}'),\n ('2016-01-01 12:40', 'bob', 'logged_in', '{metadata}'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in', '{metadata}'),\n ('2016-01-02 00:00', 'dan', 'logged_in', '{metadata}'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in', '{metadata}'),\n ('2016-01-05 00:00', 'frank', 'logged_in', '{metadata}'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in', '{metadata}'),\n ('2016-01-20 00:00', 'hans', 'logged_in', '{metadata}'),\n ('2016-02-01 00:00', 'iris', 'logged_in', '{metadata}'),\n ('2016-02-01 00:00', 'james', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'kelly', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'laura', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in', '{metadata}')\n \"\"\"\n .format(table=tracker.table, metadata=metadata))\n pd.io.sql.execute(query, tracker.db)\n x_sum_daily_all = tracker.sum('metadata__val')\n x_sum_daily = tracker.sum('metadata__val', event='logged_in')\n x_avg_daily_all = tracker.average('metadata__val', event='logged_in')\n x_avg_daily = tracker.average('metadata__val', event='logged_in')\n assert len(x_sum_daily) == 7\n assert np.all(x_sum_daily_all['sum'].values == [3, 2, 1, 1, 1, 2, 3])\n assert np.all(x_sum_daily['sum'].values == [3, 2, 1, 1, 1, 2, 2])\n assert np.all(x_avg_daily_all['avg'].values == [1, 1, 1, 1, 1, 1, 1])\n assert np.all(x_avg_daily['avg'] == x_avg_daily_all['avg'])\n\n\ndef test_parse_fields(pawprint_default_tracker_db):\n \"\"\"Test args passed to read() and _aggregate() are parsed correctly.\"\"\"\n tracker = pawprint_default_tracker_db\n args = ()\n assert tracker._parse_fields(*args) == '*'\n args = 'event',\n assert tracker._parse_fields(*args) == 'event'\n args = 'user_id', 'timestamp'\n assert tracker._parse_fields(*args) == 'user_id, timestamp'\n args = 'metadata__a__b',\n assert tracker._parse_fields(*args) == \"metadata #> '{a, b}' AS json_field\"\n\n\ndef test_parse_values(pawprint_default_tracker_db):\n \"\"\"Test parsing values for write().\"\"\"\n tracker = pawprint_default_tracker_db\n args = 'logged_in',\n assert tracker._parse_values(*args) == \"'logged_in'\"\n args = 'logged_in', 'hannah'\n assert tracker._parse_values(*args) == \"'logged_in', 'hannah'\"\n\n\ndef test_parse_conditionals(pawprint_default_tracker_db):\n \"\"\"Test kwargs passed to read() and _aggregate() are parsed correctly.\"\"\"\n tracker = pawprint_default_tracker_db\n kwargs = {}\n assert tracker._parse_conditionals(**kwargs) == ''\n kwargs = {'user_id': 'Quentin'}\n assert tracker._parse_conditionals(**kwargs) == \"WHERE user_id = 'Quentin'\"\n kwargs = {'event': 'logged_in', 'user_id': 'Quentin'}\n assert tracker._parse_conditionals(**kwargs) in (\n \"WHERE event = 'logged_in' AND user_id = 'Quentin'\",\n \"WHERE user_id = 'Quentin' AND event = 'logged_in'\")\n kwargs = {'event__in': ['logged_in', 'logged_out']}\n assert tracker._parse_conditionals(**kwargs\n ) == \"WHERE event IN ('logged_in', 'logged_out')\"\n\n\ndef test_accessing_json_fields(pawprint_default_tracker_db_with_table):\n \"\"\"Test some structured data pulling.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n simple = {'integral': 'derivative'}\n medium = {'montecarlo': {'prior': 'likelihood'}}\n difficult = {'deepnet': ['mlp', 'cnn', 'rnn'], 'ensembles': {'random':\n 'forest', 'always': {'cross_validate': ['kfold', 'stratified']}}}\n tracker.write(event='maths', metadata=simple)\n tracker.write(event='stats', metadata=medium)\n tracker.write(event='ml', metadata=difficult)\n maths_all = tracker.read('metadata__integral')\n maths_condition = tracker.read('metadata__integral', event='maths')\n assert len(maths_all) == 3\n assert len(maths_condition) == 1\n assert list(maths_all.json_field) == ['derivative', None, None]\n stats = tracker.read('metadata__montecarlo__prior').dropna()\n assert len(stats) == 1\n assert stats.json_field.iloc[0] == 'likelihood'\n types_of_nn = tracker.read('metadata__deepnet').dropna()\n best_nn = tracker.read('metadata__deepnet__1').dropna()\n full_depth = tracker.read('metadata__ensembles__always__cross_validate__0'\n ).dropna()\n assert len(types_of_nn) == 1\n assert len(best_nn) == 1\n assert best_nn.json_field.iloc[0] == 'cnn'\n assert len(full_depth) == 1\n assert full_depth.json_field.iloc[0] == 'kfold'\n\n\ndef test_json_maths(pawprint_default_tracker_db_with_table):\n \"\"\"More advanced operations on JSON subfields.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n tracker.write(event='whisky', metadata={'uigeadail': {'value': 123,\n 'lagavulin': [4, 2]}})\n tracker.write(event='whisky', metadata={'uigeadail': {'value': 456,\n 'lagavulin': [5, 0]}})\n tracker.write(event='whisky', metadata={'uigeadail': {'value': 758,\n 'lagavulin': [7, 10]}})\n tracker.write(event='armagnac', metadata={'age': 'XO'})\n tracker.write(event='armagnac', metadata={'age': 15})\n assert len(tracker.read()) == 5\n assert len(tracker.read(metadata__uigeadail__contains='lagavulin')) == 3\n assert len(tracker.read(metadata__uigeadail__value__gt=123)) == 2\n assert len(tracker.read(metadata__uigeadail__value__gte=123)) == 3\n whiskies = tracker.sum('metadata__uigeadail__value')\n assert len(whiskies) == 1\n assert whiskies.iloc[0]['sum'] == 1337\n assert len(tracker.read(metadata__contains='age')) == 2\n assert len(tracker.read(metadata__age='XO')) == 1\n\n\ndef test_silent_write_errors():\n \"\"\"When a failure occurs in event write, it should fail silently.\"\"\"\n tracker = pawprint.Tracker(db=None, table=None)\n try:\n tracker.write(event='This will fail silently.')\n except Exception:\n pytest.fail('Failed to fail silently.')\n\n\ndef test_nonsilent_write_errors(error_logger):\n \"\"\"Test non-silent write errors that should output to the logger or raise exceptions.\"\"\"\n tracker = pawprint.Tracker(db='postgresql:///fail', logger=error_logger)\n with pytest.raises(Exception):\n tracker.write()\n with pytest.raises(Exception):\n tracker.write(event='going_to_fail')\n with open('pawprint.log', mode='r') as f:\n logs = f.readlines()\n print(logs[3])\n assert len(logs) == 6\n assert logs[0].startswith('pawprint: pawprint failed to write.')\n assert 'Table: None. Query: INSERT INTO None () VALUES ();' in logs[0]\n assert \"Query: INSERT INTO None (event) VALUES ('going_to_fail')\" in logs[3\n ]\n os.remove('pawprint.log')\n\n\n<mask token>\n\n\ndef test_repr_and_str(pawprint_default_tracker_db):\n \"\"\"Test the __repr__ and __str__.\"\"\"\n tracker = pawprint_default_tracker_db\n expected_repr = \"pawprint.Tracker on table '{}' and database '{}'\".format(\n tracker.table, tracker.db)\n expected_str = 'pawprint Tracker object.\\ndb : {}\\ntable : {}'.format(\n tracker.db, tracker.table)\n assert tracker.__repr__() == expected_repr\n assert tracker.__str__() == expected_str\n\n\ndef test_malicious_strings(pawprint_default_tracker_db_with_table):\n \"\"\"Test that SQL injection strings are sanitized\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n tracker.write(event='armageddon', metadata={'shady business': {'with':\n 'the following string', 'of sql':\n \"50');INSERT INTO {table} (event, user_id) VALUES ('you got pwnd', '50\"\n .format(table=tracker.table)}})\n assert len(tracker.read()) == 1\n tracker.write(event='armageddon', metadata={'more shady business': {\n 'my shady sql': \"' OR '1'='1;DROP TABLE {table};\".format(table=\n tracker.table)}})\n assert len(tracker.read()) == 2\n tracker.write(event=\"' OR '1'='1;\", metadata={'foo':\n \"x'); DROP TABLE {table}; --\".format(table=tracker.table)})\n assert len(tracker.read()) == 3\n\n\ndef test_escaping_from_quotes(pawprint_default_tracker_db_with_table):\n tracker = pawprint_default_tracker_db_with_table\n tracker.write(event='known crummy string', metadata={'foo': {'toState':\n \"#/app/dealnotes/2345/FORPETE'S_SAKE,_LLC_Tenant_Rep_Lease_2\",\n 'fromState': '#/app/dealdetails/2345', 'platform': 'iOS App'}})\n assert len(tracker.read()) == 1\n",
"step-3": "<mask token>\n\n\ndef test_create_table_with_default_options(pawprint_default_tracker_db):\n \"\"\"Ensure the table is correctly created with the default schema.\"\"\"\n tracker = pawprint_default_tracker_db\n assert tracker.create_table() is None\n with pytest.raises(ProgrammingError):\n tracker.create_table()\n assert pd.io.sql.execute('SELECT COUNT(*) FROM {}'.format(tracker.table\n ), tracker.db).fetchall() == [(0,)]\n schema = pd.io.sql.execute(\n \"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'\"\n .format(tracker.table), tracker.db).fetchall()\n expected_schema = [(u'id', u'integer', None), (u'timestamp',\n u'timestamp without time zone', None), (u'user_id', u'text', None),\n (u'event', u'text', None), (u'metadata', u'jsonb', None)]\n assert schema == expected_schema\n\n\ndef test_drop_table(pawprint_default_tracker_db_with_table):\n \"\"\"Ensure that tables are deleted successfully.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n with pytest.raises(ProgrammingError):\n tracker.create_table()\n tracker.drop_table()\n with pytest.raises(ProgrammingError):\n tracker.drop_table()\n\n\ndef test_instantiate_tracker_from_dot_file(drop_tracker_test_table):\n \"\"\"Test instantiating a Tracker with a dotfile instead of using db and table strings.\"\"\"\n dotfile = {'db': 'postgresql:///little_bean_toes', 'json_field':\n 'such_fuzzy'}\n with open('.pawprint', 'w') as f:\n json.dump(dotfile, f)\n tracker = pawprint.Tracker(dotfile='.pawprint', json_field='boop')\n assert tracker.db == 'postgresql:///little_bean_toes'\n assert tracker.table is None\n assert tracker.json_field == 'boop'\n os.remove('.pawprint')\n\n\ndef test_create_table_with_other_options(drop_tracker_test_table, db_string,\n tracker_test_table_name):\n \"\"\"Ensure the table is correctly created with an alternative schema.\"\"\"\n schema = OrderedDict([('pk', 'SERIAL PRIMARY KEY'), ('infofield', 'TEXT')])\n tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name,\n schema=schema)\n tracker.create_table()\n schema = pd.io.sql.execute(\n \"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'\"\n .format(tracker.table), tracker.db).fetchall()\n assert schema == [('pk', 'integer', None), ('infofield', 'text', None)]\n\n\n<mask token>\n\n\ndef test_read(pawprint_default_tracker_db_with_table):\n \"\"\"Test pulling the data into a dataframe according to various simple filters.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n assert len(tracker.read()) == 0\n tracker.write(user_id='Pawprint', event='Testing !')\n tracker.write(user_id='Pawprint')\n tracker.write(event='No user')\n tracker.write(user_id='import this', event='very zen', metadata={\n 'better': 'forgiveness', 'worse': 'permission', 'ordered': [\n 'simple', 'complex', 'complicated']})\n all_data = tracker.read()\n pawprint_events = tracker.read(user_id='Pawprint')\n id_gt_events = tracker.read(id__gt=10)\n id_gte_lt_events = tracker.read(id__gte=1, id__lt=3)\n field_events = tracker.read('event', id__lte=100, event='very zen')\n contains_events = tracker.read(metadata__contains='better')\n not_contains_events = tracker.read(metadata__contains='whisky')\n assert len(all_data) == 4\n assert len(pawprint_events) == 2\n assert len(id_gt_events) == 0\n assert len(id_gte_lt_events) == 2\n assert len(field_events) == 1\n assert len(contains_events) == 1\n assert len(not_contains_events) == 0\n assert set(all_data.columns) == set(['id', 'user_id', 'event',\n 'metadata', 'timestamp'])\n assert set(field_events.columns) == set(['event'])\n\n\ndef test_counts(pawprint_default_tracker_db_with_table):\n \"\"\"Test counting a specific event, with date ranges and time resolutions.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n query = (\n \"\"\"\n INSERT INTO {} (timestamp, user_id, event) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in'),\n ('2016-01-01 12:40', 'bob', 'logged_in'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in'),\n ('2016-01-02 00:00', 'dan', 'logged_in'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in'),\n ('2016-01-05 00:00', 'frank', 'logged_in'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in'),\n ('2016-01-20 00:00', 'hans', 'logged_in'),\n ('2016-02-01 00:00', 'iris', 'logged_in'),\n ('2016-02-01 00:00', 'james', 'logged_in'),\n ('2016-03-01 00:00', 'kelly', 'logged_in'),\n ('2016-03-01 00:00', 'laura', 'logged_in'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in')\n \"\"\"\n .format(tracker.table))\n pd.io.sql.execute(query, tracker.db)\n logins_hourly = tracker.count(event='logged_in', resolution='hour')\n logins_daily = tracker.count(event='logged_in')\n logins_weekly = tracker.count(event='logged_in', resolution='week')\n logins_monthly = tracker.count(event='logged_in', resolution='month')\n logins_weekly_left_range = tracker.count(event='logged_in', resolution=\n 'week', start=datetime(2016, 2, 1))\n logins_weekly_right_range = tracker.count(event='logged_in', resolution\n ='week', end=datetime(2016, 2, 1))\n logins_daily_full_range = tracker.count(event='logged_in', start=\n datetime(2016, 1, 15), end=datetime(2016, 2, 15))\n assert len(logins_hourly) == 8\n assert np.all(logins_hourly['count'].values == [2, 1, 2, 1, 1, 1, 2, 2])\n assert len(logins_daily) == 7\n assert np.all(logins_daily['count'].values == [3, 2, 1, 1, 1, 2, 2])\n assert len(logins_weekly) == 5\n assert np.all(logins_weekly['count'].values == [5, 2, 1, 2, 2])\n assert len(logins_monthly) == 3\n assert len(logins_weekly_left_range) == 2\n assert len(logins_weekly_right_range) == 4\n assert len(logins_daily_full_range) == 2\n\n\ndef test_sum_and_average(pawprint_default_tracker_db_with_table):\n \"\"\"Test aggregating a specific event, with date ranges and time resolutions.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n metadata = str('{\"val\": 1}').replace(\"'\", '\"')\n query = (\n \"\"\"\n INSERT INTO {table} (timestamp, user_id, event, metadata) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in', '{metadata}'),\n ('2016-01-01 12:40', 'bob', 'logged_in', '{metadata}'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in', '{metadata}'),\n ('2016-01-02 00:00', 'dan', 'logged_in', '{metadata}'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in', '{metadata}'),\n ('2016-01-05 00:00', 'frank', 'logged_in', '{metadata}'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in', '{metadata}'),\n ('2016-01-20 00:00', 'hans', 'logged_in', '{metadata}'),\n ('2016-02-01 00:00', 'iris', 'logged_in', '{metadata}'),\n ('2016-02-01 00:00', 'james', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'kelly', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'laura', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in', '{metadata}')\n \"\"\"\n .format(table=tracker.table, metadata=metadata))\n pd.io.sql.execute(query, tracker.db)\n x_sum_daily_all = tracker.sum('metadata__val')\n x_sum_daily = tracker.sum('metadata__val', event='logged_in')\n x_avg_daily_all = tracker.average('metadata__val', event='logged_in')\n x_avg_daily = tracker.average('metadata__val', event='logged_in')\n assert len(x_sum_daily) == 7\n assert np.all(x_sum_daily_all['sum'].values == [3, 2, 1, 1, 1, 2, 3])\n assert np.all(x_sum_daily['sum'].values == [3, 2, 1, 1, 1, 2, 2])\n assert np.all(x_avg_daily_all['avg'].values == [1, 1, 1, 1, 1, 1, 1])\n assert np.all(x_avg_daily['avg'] == x_avg_daily_all['avg'])\n\n\ndef test_parse_fields(pawprint_default_tracker_db):\n \"\"\"Test args passed to read() and _aggregate() are parsed correctly.\"\"\"\n tracker = pawprint_default_tracker_db\n args = ()\n assert tracker._parse_fields(*args) == '*'\n args = 'event',\n assert tracker._parse_fields(*args) == 'event'\n args = 'user_id', 'timestamp'\n assert tracker._parse_fields(*args) == 'user_id, timestamp'\n args = 'metadata__a__b',\n assert tracker._parse_fields(*args) == \"metadata #> '{a, b}' AS json_field\"\n\n\ndef test_parse_values(pawprint_default_tracker_db):\n \"\"\"Test parsing values for write().\"\"\"\n tracker = pawprint_default_tracker_db\n args = 'logged_in',\n assert tracker._parse_values(*args) == \"'logged_in'\"\n args = 'logged_in', 'hannah'\n assert tracker._parse_values(*args) == \"'logged_in', 'hannah'\"\n\n\ndef test_parse_conditionals(pawprint_default_tracker_db):\n \"\"\"Test kwargs passed to read() and _aggregate() are parsed correctly.\"\"\"\n tracker = pawprint_default_tracker_db\n kwargs = {}\n assert tracker._parse_conditionals(**kwargs) == ''\n kwargs = {'user_id': 'Quentin'}\n assert tracker._parse_conditionals(**kwargs) == \"WHERE user_id = 'Quentin'\"\n kwargs = {'event': 'logged_in', 'user_id': 'Quentin'}\n assert tracker._parse_conditionals(**kwargs) in (\n \"WHERE event = 'logged_in' AND user_id = 'Quentin'\",\n \"WHERE user_id = 'Quentin' AND event = 'logged_in'\")\n kwargs = {'event__in': ['logged_in', 'logged_out']}\n assert tracker._parse_conditionals(**kwargs\n ) == \"WHERE event IN ('logged_in', 'logged_out')\"\n\n\ndef test_accessing_json_fields(pawprint_default_tracker_db_with_table):\n \"\"\"Test some structured data pulling.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n simple = {'integral': 'derivative'}\n medium = {'montecarlo': {'prior': 'likelihood'}}\n difficult = {'deepnet': ['mlp', 'cnn', 'rnn'], 'ensembles': {'random':\n 'forest', 'always': {'cross_validate': ['kfold', 'stratified']}}}\n tracker.write(event='maths', metadata=simple)\n tracker.write(event='stats', metadata=medium)\n tracker.write(event='ml', metadata=difficult)\n maths_all = tracker.read('metadata__integral')\n maths_condition = tracker.read('metadata__integral', event='maths')\n assert len(maths_all) == 3\n assert len(maths_condition) == 1\n assert list(maths_all.json_field) == ['derivative', None, None]\n stats = tracker.read('metadata__montecarlo__prior').dropna()\n assert len(stats) == 1\n assert stats.json_field.iloc[0] == 'likelihood'\n types_of_nn = tracker.read('metadata__deepnet').dropna()\n best_nn = tracker.read('metadata__deepnet__1').dropna()\n full_depth = tracker.read('metadata__ensembles__always__cross_validate__0'\n ).dropna()\n assert len(types_of_nn) == 1\n assert len(best_nn) == 1\n assert best_nn.json_field.iloc[0] == 'cnn'\n assert len(full_depth) == 1\n assert full_depth.json_field.iloc[0] == 'kfold'\n\n\ndef test_json_maths(pawprint_default_tracker_db_with_table):\n \"\"\"More advanced operations on JSON subfields.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n tracker.write(event='whisky', metadata={'uigeadail': {'value': 123,\n 'lagavulin': [4, 2]}})\n tracker.write(event='whisky', metadata={'uigeadail': {'value': 456,\n 'lagavulin': [5, 0]}})\n tracker.write(event='whisky', metadata={'uigeadail': {'value': 758,\n 'lagavulin': [7, 10]}})\n tracker.write(event='armagnac', metadata={'age': 'XO'})\n tracker.write(event='armagnac', metadata={'age': 15})\n assert len(tracker.read()) == 5\n assert len(tracker.read(metadata__uigeadail__contains='lagavulin')) == 3\n assert len(tracker.read(metadata__uigeadail__value__gt=123)) == 2\n assert len(tracker.read(metadata__uigeadail__value__gte=123)) == 3\n whiskies = tracker.sum('metadata__uigeadail__value')\n assert len(whiskies) == 1\n assert whiskies.iloc[0]['sum'] == 1337\n assert len(tracker.read(metadata__contains='age')) == 2\n assert len(tracker.read(metadata__age='XO')) == 1\n\n\ndef test_silent_write_errors():\n \"\"\"When a failure occurs in event write, it should fail silently.\"\"\"\n tracker = pawprint.Tracker(db=None, table=None)\n try:\n tracker.write(event='This will fail silently.')\n except Exception:\n pytest.fail('Failed to fail silently.')\n\n\ndef test_nonsilent_write_errors(error_logger):\n \"\"\"Test non-silent write errors that should output to the logger or raise exceptions.\"\"\"\n tracker = pawprint.Tracker(db='postgresql:///fail', logger=error_logger)\n with pytest.raises(Exception):\n tracker.write()\n with pytest.raises(Exception):\n tracker.write(event='going_to_fail')\n with open('pawprint.log', mode='r') as f:\n logs = f.readlines()\n print(logs[3])\n assert len(logs) == 6\n assert logs[0].startswith('pawprint: pawprint failed to write.')\n assert 'Table: None. Query: INSERT INTO None () VALUES ();' in logs[0]\n assert \"Query: INSERT INTO None (event) VALUES ('going_to_fail')\" in logs[3\n ]\n os.remove('pawprint.log')\n\n\ndef test_auto_timestamp(db_string):\n \"\"\"Ensure that timestamps are autopopulated correctly if not passed.\"\"\"\n schema = {'event': 'TEXT', 'timestamp': 'TIMESTAMP'}\n no_auto = pawprint.Tracker(db=db_string, table='no_auto',\n auto_timestamp=False, schema=schema)\n auto = pawprint.Tracker(db=db_string, table='auto', auto_timestamp=True,\n schema=schema)\n no_auto.create_table()\n auto.create_table()\n no_auto.write(event='foo')\n auto.write(event='bar')\n assert len(no_auto.read()) == 1\n assert len(auto.read()) == 1\n assert len(no_auto.read().dropna()) == 0\n assert len(auto.read().dropna()) == 1\n no_auto.drop_table()\n auto.drop_table()\n\n\ndef test_repr_and_str(pawprint_default_tracker_db):\n \"\"\"Test the __repr__ and __str__.\"\"\"\n tracker = pawprint_default_tracker_db\n expected_repr = \"pawprint.Tracker on table '{}' and database '{}'\".format(\n tracker.table, tracker.db)\n expected_str = 'pawprint Tracker object.\\ndb : {}\\ntable : {}'.format(\n tracker.db, tracker.table)\n assert tracker.__repr__() == expected_repr\n assert tracker.__str__() == expected_str\n\n\ndef test_malicious_strings(pawprint_default_tracker_db_with_table):\n \"\"\"Test that SQL injection strings are sanitized\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n tracker.write(event='armageddon', metadata={'shady business': {'with':\n 'the following string', 'of sql':\n \"50');INSERT INTO {table} (event, user_id) VALUES ('you got pwnd', '50\"\n .format(table=tracker.table)}})\n assert len(tracker.read()) == 1\n tracker.write(event='armageddon', metadata={'more shady business': {\n 'my shady sql': \"' OR '1'='1;DROP TABLE {table};\".format(table=\n tracker.table)}})\n assert len(tracker.read()) == 2\n tracker.write(event=\"' OR '1'='1;\", metadata={'foo':\n \"x'); DROP TABLE {table}; --\".format(table=tracker.table)})\n assert len(tracker.read()) == 3\n\n\ndef test_escaping_from_quotes(pawprint_default_tracker_db_with_table):\n tracker = pawprint_default_tracker_db_with_table\n tracker.write(event='known crummy string', metadata={'foo': {'toState':\n \"#/app/dealnotes/2345/FORPETE'S_SAKE,_LLC_Tenant_Rep_Lease_2\",\n 'fromState': '#/app/dealdetails/2345', 'platform': 'iOS App'}})\n assert len(tracker.read()) == 1\n",
"step-4": "import os\nimport json\nimport pytest\nfrom datetime import datetime\nfrom collections import OrderedDict\nimport numpy as np\nimport pandas as pd\nfrom sqlalchemy.exc import ProgrammingError\nimport pawprint\n\n\ndef test_create_table_with_default_options(pawprint_default_tracker_db):\n \"\"\"Ensure the table is correctly created with the default schema.\"\"\"\n tracker = pawprint_default_tracker_db\n assert tracker.create_table() is None\n with pytest.raises(ProgrammingError):\n tracker.create_table()\n assert pd.io.sql.execute('SELECT COUNT(*) FROM {}'.format(tracker.table\n ), tracker.db).fetchall() == [(0,)]\n schema = pd.io.sql.execute(\n \"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'\"\n .format(tracker.table), tracker.db).fetchall()\n expected_schema = [(u'id', u'integer', None), (u'timestamp',\n u'timestamp without time zone', None), (u'user_id', u'text', None),\n (u'event', u'text', None), (u'metadata', u'jsonb', None)]\n assert schema == expected_schema\n\n\ndef test_drop_table(pawprint_default_tracker_db_with_table):\n \"\"\"Ensure that tables are deleted successfully.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n with pytest.raises(ProgrammingError):\n tracker.create_table()\n tracker.drop_table()\n with pytest.raises(ProgrammingError):\n tracker.drop_table()\n\n\ndef test_instantiate_tracker_from_dot_file(drop_tracker_test_table):\n \"\"\"Test instantiating a Tracker with a dotfile instead of using db and table strings.\"\"\"\n dotfile = {'db': 'postgresql:///little_bean_toes', 'json_field':\n 'such_fuzzy'}\n with open('.pawprint', 'w') as f:\n json.dump(dotfile, f)\n tracker = pawprint.Tracker(dotfile='.pawprint', json_field='boop')\n assert tracker.db == 'postgresql:///little_bean_toes'\n assert tracker.table is None\n assert tracker.json_field == 'boop'\n os.remove('.pawprint')\n\n\ndef test_create_table_with_other_options(drop_tracker_test_table, db_string,\n tracker_test_table_name):\n \"\"\"Ensure the table is correctly created with an alternative schema.\"\"\"\n schema = OrderedDict([('pk', 'SERIAL PRIMARY KEY'), ('infofield', 'TEXT')])\n tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name,\n schema=schema)\n tracker.create_table()\n schema = pd.io.sql.execute(\n \"SELECT column_name, data_type, character_maximum_length FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = '{}'\"\n .format(tracker.table), tracker.db).fetchall()\n assert schema == [('pk', 'integer', None), ('infofield', 'text', None)]\n\n\ndef test_write(drop_tracker_test_table, db_string, tracker_test_table_name):\n \"\"\"Test the tracking of an event.\"\"\"\n tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name,\n schema={'id': 'INT'})\n tracker.create_table()\n assert pd.io.sql.execute('SELECT COUNT(*) FROM {}'.format(tracker.table\n ), tracker.db).fetchall() == [(0,)]\n tracker.write(id=1337)\n assert pd.io.sql.execute('SELECT COUNT(*) FROM {}'.format(tracker.table\n ), tracker.db).fetchall() == [(1,)]\n data = pd.read_sql('SELECT * FROM {}'.format(tracker.table), tracker.db)\n assert isinstance(data, pd.DataFrame)\n assert len(data.columns) == 1\n assert data.columns[0] == 'id'\n assert data.id[0] == 1337\n\n\ndef test_read(pawprint_default_tracker_db_with_table):\n \"\"\"Test pulling the data into a dataframe according to various simple filters.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n assert len(tracker.read()) == 0\n tracker.write(user_id='Pawprint', event='Testing !')\n tracker.write(user_id='Pawprint')\n tracker.write(event='No user')\n tracker.write(user_id='import this', event='very zen', metadata={\n 'better': 'forgiveness', 'worse': 'permission', 'ordered': [\n 'simple', 'complex', 'complicated']})\n all_data = tracker.read()\n pawprint_events = tracker.read(user_id='Pawprint')\n id_gt_events = tracker.read(id__gt=10)\n id_gte_lt_events = tracker.read(id__gte=1, id__lt=3)\n field_events = tracker.read('event', id__lte=100, event='very zen')\n contains_events = tracker.read(metadata__contains='better')\n not_contains_events = tracker.read(metadata__contains='whisky')\n assert len(all_data) == 4\n assert len(pawprint_events) == 2\n assert len(id_gt_events) == 0\n assert len(id_gte_lt_events) == 2\n assert len(field_events) == 1\n assert len(contains_events) == 1\n assert len(not_contains_events) == 0\n assert set(all_data.columns) == set(['id', 'user_id', 'event',\n 'metadata', 'timestamp'])\n assert set(field_events.columns) == set(['event'])\n\n\ndef test_counts(pawprint_default_tracker_db_with_table):\n \"\"\"Test counting a specific event, with date ranges and time resolutions.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n query = (\n \"\"\"\n INSERT INTO {} (timestamp, user_id, event) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in'),\n ('2016-01-01 12:40', 'bob', 'logged_in'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in'),\n ('2016-01-02 00:00', 'dan', 'logged_in'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in'),\n ('2016-01-05 00:00', 'frank', 'logged_in'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in'),\n ('2016-01-20 00:00', 'hans', 'logged_in'),\n ('2016-02-01 00:00', 'iris', 'logged_in'),\n ('2016-02-01 00:00', 'james', 'logged_in'),\n ('2016-03-01 00:00', 'kelly', 'logged_in'),\n ('2016-03-01 00:00', 'laura', 'logged_in'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in')\n \"\"\"\n .format(tracker.table))\n pd.io.sql.execute(query, tracker.db)\n logins_hourly = tracker.count(event='logged_in', resolution='hour')\n logins_daily = tracker.count(event='logged_in')\n logins_weekly = tracker.count(event='logged_in', resolution='week')\n logins_monthly = tracker.count(event='logged_in', resolution='month')\n logins_weekly_left_range = tracker.count(event='logged_in', resolution=\n 'week', start=datetime(2016, 2, 1))\n logins_weekly_right_range = tracker.count(event='logged_in', resolution\n ='week', end=datetime(2016, 2, 1))\n logins_daily_full_range = tracker.count(event='logged_in', start=\n datetime(2016, 1, 15), end=datetime(2016, 2, 15))\n assert len(logins_hourly) == 8\n assert np.all(logins_hourly['count'].values == [2, 1, 2, 1, 1, 1, 2, 2])\n assert len(logins_daily) == 7\n assert np.all(logins_daily['count'].values == [3, 2, 1, 1, 1, 2, 2])\n assert len(logins_weekly) == 5\n assert np.all(logins_weekly['count'].values == [5, 2, 1, 2, 2])\n assert len(logins_monthly) == 3\n assert len(logins_weekly_left_range) == 2\n assert len(logins_weekly_right_range) == 4\n assert len(logins_daily_full_range) == 2\n\n\ndef test_sum_and_average(pawprint_default_tracker_db_with_table):\n \"\"\"Test aggregating a specific event, with date ranges and time resolutions.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n metadata = str('{\"val\": 1}').replace(\"'\", '\"')\n query = (\n \"\"\"\n INSERT INTO {table} (timestamp, user_id, event, metadata) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in', '{metadata}'),\n ('2016-01-01 12:40', 'bob', 'logged_in', '{metadata}'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in', '{metadata}'),\n ('2016-01-02 00:00', 'dan', 'logged_in', '{metadata}'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in', '{metadata}'),\n ('2016-01-05 00:00', 'frank', 'logged_in', '{metadata}'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in', '{metadata}'),\n ('2016-01-20 00:00', 'hans', 'logged_in', '{metadata}'),\n ('2016-02-01 00:00', 'iris', 'logged_in', '{metadata}'),\n ('2016-02-01 00:00', 'james', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'kelly', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'laura', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in', '{metadata}')\n \"\"\"\n .format(table=tracker.table, metadata=metadata))\n pd.io.sql.execute(query, tracker.db)\n x_sum_daily_all = tracker.sum('metadata__val')\n x_sum_daily = tracker.sum('metadata__val', event='logged_in')\n x_avg_daily_all = tracker.average('metadata__val', event='logged_in')\n x_avg_daily = tracker.average('metadata__val', event='logged_in')\n assert len(x_sum_daily) == 7\n assert np.all(x_sum_daily_all['sum'].values == [3, 2, 1, 1, 1, 2, 3])\n assert np.all(x_sum_daily['sum'].values == [3, 2, 1, 1, 1, 2, 2])\n assert np.all(x_avg_daily_all['avg'].values == [1, 1, 1, 1, 1, 1, 1])\n assert np.all(x_avg_daily['avg'] == x_avg_daily_all['avg'])\n\n\ndef test_parse_fields(pawprint_default_tracker_db):\n \"\"\"Test args passed to read() and _aggregate() are parsed correctly.\"\"\"\n tracker = pawprint_default_tracker_db\n args = ()\n assert tracker._parse_fields(*args) == '*'\n args = 'event',\n assert tracker._parse_fields(*args) == 'event'\n args = 'user_id', 'timestamp'\n assert tracker._parse_fields(*args) == 'user_id, timestamp'\n args = 'metadata__a__b',\n assert tracker._parse_fields(*args) == \"metadata #> '{a, b}' AS json_field\"\n\n\ndef test_parse_values(pawprint_default_tracker_db):\n \"\"\"Test parsing values for write().\"\"\"\n tracker = pawprint_default_tracker_db\n args = 'logged_in',\n assert tracker._parse_values(*args) == \"'logged_in'\"\n args = 'logged_in', 'hannah'\n assert tracker._parse_values(*args) == \"'logged_in', 'hannah'\"\n\n\ndef test_parse_conditionals(pawprint_default_tracker_db):\n \"\"\"Test kwargs passed to read() and _aggregate() are parsed correctly.\"\"\"\n tracker = pawprint_default_tracker_db\n kwargs = {}\n assert tracker._parse_conditionals(**kwargs) == ''\n kwargs = {'user_id': 'Quentin'}\n assert tracker._parse_conditionals(**kwargs) == \"WHERE user_id = 'Quentin'\"\n kwargs = {'event': 'logged_in', 'user_id': 'Quentin'}\n assert tracker._parse_conditionals(**kwargs) in (\n \"WHERE event = 'logged_in' AND user_id = 'Quentin'\",\n \"WHERE user_id = 'Quentin' AND event = 'logged_in'\")\n kwargs = {'event__in': ['logged_in', 'logged_out']}\n assert tracker._parse_conditionals(**kwargs\n ) == \"WHERE event IN ('logged_in', 'logged_out')\"\n\n\ndef test_accessing_json_fields(pawprint_default_tracker_db_with_table):\n \"\"\"Test some structured data pulling.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n simple = {'integral': 'derivative'}\n medium = {'montecarlo': {'prior': 'likelihood'}}\n difficult = {'deepnet': ['mlp', 'cnn', 'rnn'], 'ensembles': {'random':\n 'forest', 'always': {'cross_validate': ['kfold', 'stratified']}}}\n tracker.write(event='maths', metadata=simple)\n tracker.write(event='stats', metadata=medium)\n tracker.write(event='ml', metadata=difficult)\n maths_all = tracker.read('metadata__integral')\n maths_condition = tracker.read('metadata__integral', event='maths')\n assert len(maths_all) == 3\n assert len(maths_condition) == 1\n assert list(maths_all.json_field) == ['derivative', None, None]\n stats = tracker.read('metadata__montecarlo__prior').dropna()\n assert len(stats) == 1\n assert stats.json_field.iloc[0] == 'likelihood'\n types_of_nn = tracker.read('metadata__deepnet').dropna()\n best_nn = tracker.read('metadata__deepnet__1').dropna()\n full_depth = tracker.read('metadata__ensembles__always__cross_validate__0'\n ).dropna()\n assert len(types_of_nn) == 1\n assert len(best_nn) == 1\n assert best_nn.json_field.iloc[0] == 'cnn'\n assert len(full_depth) == 1\n assert full_depth.json_field.iloc[0] == 'kfold'\n\n\ndef test_json_maths(pawprint_default_tracker_db_with_table):\n \"\"\"More advanced operations on JSON subfields.\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n tracker.write(event='whisky', metadata={'uigeadail': {'value': 123,\n 'lagavulin': [4, 2]}})\n tracker.write(event='whisky', metadata={'uigeadail': {'value': 456,\n 'lagavulin': [5, 0]}})\n tracker.write(event='whisky', metadata={'uigeadail': {'value': 758,\n 'lagavulin': [7, 10]}})\n tracker.write(event='armagnac', metadata={'age': 'XO'})\n tracker.write(event='armagnac', metadata={'age': 15})\n assert len(tracker.read()) == 5\n assert len(tracker.read(metadata__uigeadail__contains='lagavulin')) == 3\n assert len(tracker.read(metadata__uigeadail__value__gt=123)) == 2\n assert len(tracker.read(metadata__uigeadail__value__gte=123)) == 3\n whiskies = tracker.sum('metadata__uigeadail__value')\n assert len(whiskies) == 1\n assert whiskies.iloc[0]['sum'] == 1337\n assert len(tracker.read(metadata__contains='age')) == 2\n assert len(tracker.read(metadata__age='XO')) == 1\n\n\ndef test_silent_write_errors():\n \"\"\"When a failure occurs in event write, it should fail silently.\"\"\"\n tracker = pawprint.Tracker(db=None, table=None)\n try:\n tracker.write(event='This will fail silently.')\n except Exception:\n pytest.fail('Failed to fail silently.')\n\n\ndef test_nonsilent_write_errors(error_logger):\n \"\"\"Test non-silent write errors that should output to the logger or raise exceptions.\"\"\"\n tracker = pawprint.Tracker(db='postgresql:///fail', logger=error_logger)\n with pytest.raises(Exception):\n tracker.write()\n with pytest.raises(Exception):\n tracker.write(event='going_to_fail')\n with open('pawprint.log', mode='r') as f:\n logs = f.readlines()\n print(logs[3])\n assert len(logs) == 6\n assert logs[0].startswith('pawprint: pawprint failed to write.')\n assert 'Table: None. Query: INSERT INTO None () VALUES ();' in logs[0]\n assert \"Query: INSERT INTO None (event) VALUES ('going_to_fail')\" in logs[3\n ]\n os.remove('pawprint.log')\n\n\ndef test_auto_timestamp(db_string):\n \"\"\"Ensure that timestamps are autopopulated correctly if not passed.\"\"\"\n schema = {'event': 'TEXT', 'timestamp': 'TIMESTAMP'}\n no_auto = pawprint.Tracker(db=db_string, table='no_auto',\n auto_timestamp=False, schema=schema)\n auto = pawprint.Tracker(db=db_string, table='auto', auto_timestamp=True,\n schema=schema)\n no_auto.create_table()\n auto.create_table()\n no_auto.write(event='foo')\n auto.write(event='bar')\n assert len(no_auto.read()) == 1\n assert len(auto.read()) == 1\n assert len(no_auto.read().dropna()) == 0\n assert len(auto.read().dropna()) == 1\n no_auto.drop_table()\n auto.drop_table()\n\n\ndef test_repr_and_str(pawprint_default_tracker_db):\n \"\"\"Test the __repr__ and __str__.\"\"\"\n tracker = pawprint_default_tracker_db\n expected_repr = \"pawprint.Tracker on table '{}' and database '{}'\".format(\n tracker.table, tracker.db)\n expected_str = 'pawprint Tracker object.\\ndb : {}\\ntable : {}'.format(\n tracker.db, tracker.table)\n assert tracker.__repr__() == expected_repr\n assert tracker.__str__() == expected_str\n\n\ndef test_malicious_strings(pawprint_default_tracker_db_with_table):\n \"\"\"Test that SQL injection strings are sanitized\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n tracker.write(event='armageddon', metadata={'shady business': {'with':\n 'the following string', 'of sql':\n \"50');INSERT INTO {table} (event, user_id) VALUES ('you got pwnd', '50\"\n .format(table=tracker.table)}})\n assert len(tracker.read()) == 1\n tracker.write(event='armageddon', metadata={'more shady business': {\n 'my shady sql': \"' OR '1'='1;DROP TABLE {table};\".format(table=\n tracker.table)}})\n assert len(tracker.read()) == 2\n tracker.write(event=\"' OR '1'='1;\", metadata={'foo':\n \"x'); DROP TABLE {table}; --\".format(table=tracker.table)})\n assert len(tracker.read()) == 3\n\n\ndef test_escaping_from_quotes(pawprint_default_tracker_db_with_table):\n tracker = pawprint_default_tracker_db_with_table\n tracker.write(event='known crummy string', metadata={'foo': {'toState':\n \"#/app/dealnotes/2345/FORPETE'S_SAKE,_LLC_Tenant_Rep_Lease_2\",\n 'fromState': '#/app/dealdetails/2345', 'platform': 'iOS App'}})\n assert len(tracker.read()) == 1\n",
"step-5": "import os\nimport json\nimport pytest\n\nfrom datetime import datetime\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pandas as pd\nfrom sqlalchemy.exc import ProgrammingError\n\nimport pawprint\n\n\ndef test_create_table_with_default_options(pawprint_default_tracker_db):\n \"\"\"Ensure the table is correctly created with the default schema.\"\"\"\n\n tracker = pawprint_default_tracker_db\n\n # The table shouldn't exist. Assert it's correct created.\n assert tracker.create_table() is None\n\n # Try creating it again. This should raise an error.\n with pytest.raises(ProgrammingError):\n tracker.create_table()\n\n # Assert the table is empty when created\n assert pd.io.sql.execute(\n \"SELECT COUNT(*) FROM {}\".format(tracker.table), tracker.db\n ).fetchall() == [(0,)]\n\n # Ensure its schema is correct\n schema = pd.io.sql.execute(\n \"SELECT column_name, data_type, character_maximum_length \"\n \"FROM INFORMATION_SCHEMA.COLUMNS \"\n \"WHERE table_name = '{}'\".format(tracker.table),\n tracker.db,\n ).fetchall()\n expected_schema = [\n (u\"id\", u\"integer\", None),\n (u\"timestamp\", u\"timestamp without time zone\", None),\n (u\"user_id\", u\"text\", None),\n (u\"event\", u\"text\", None),\n (u\"metadata\", u\"jsonb\", None),\n ]\n assert schema == expected_schema\n\n\ndef test_drop_table(pawprint_default_tracker_db_with_table):\n \"\"\"Ensure that tables are deleted successfully.\"\"\"\n\n tracker = pawprint_default_tracker_db_with_table\n\n # make sure table exists\n with pytest.raises(ProgrammingError):\n tracker.create_table()\n\n tracker.drop_table()\n\n with pytest.raises(ProgrammingError):\n tracker.drop_table()\n\n\ndef test_instantiate_tracker_from_dot_file(drop_tracker_test_table):\n \"\"\"Test instantiating a Tracker with a dotfile instead of using db and table strings.\"\"\"\n\n # Write a dotfile to disk\n dotfile = {\n \"db\": \"postgresql:///little_bean_toes\",\n \"json_field\": \"such_fuzzy\",\n }\n\n with open(\".pawprint\", \"w\") as f:\n json.dump(dotfile, f)\n\n # Create a tracker from this dotfile\n tracker = pawprint.Tracker(dotfile=\".pawprint\", json_field=\"boop\")\n\n # Ensure all the entries are as they should be\n assert tracker.db == \"postgresql:///little_bean_toes\"\n assert tracker.table is None\n # assert tracker.logger is None\n assert tracker.json_field == \"boop\" # field present in dotfile but overwritten in init\n\n os.remove(\".pawprint\")\n\n\ndef test_create_table_with_other_options(\n drop_tracker_test_table, db_string, tracker_test_table_name\n):\n \"\"\"Ensure the table is correctly created with an alternative schema.\"\"\"\n\n schema = OrderedDict([(\"pk\", \"SERIAL PRIMARY KEY\"), (\"infofield\", \"TEXT\")])\n tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name, schema=schema)\n tracker.create_table()\n\n # Ensure its schema is correct\n schema = pd.io.sql.execute(\n \"SELECT column_name, data_type, character_maximum_length \"\n \"FROM INFORMATION_SCHEMA.COLUMNS \"\n \"WHERE table_name = '{}'\".format(tracker.table),\n tracker.db,\n ).fetchall()\n\n assert schema == [(\"pk\", \"integer\", None), (\"infofield\", \"text\", None)]\n\n\ndef test_write(drop_tracker_test_table, db_string, tracker_test_table_name):\n \"\"\"Test the tracking of an event.\"\"\"\n\n tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name, schema={\"id\": \"INT\"})\n tracker.create_table()\n\n # Check the table's empty\n assert pd.io.sql.execute(\n \"SELECT COUNT(*) FROM {}\".format(tracker.table), tracker.db\n ).fetchall() == [(0,)]\n\n # Add some data and check if the row count increases by one\n tracker.write(id=1337)\n assert pd.io.sql.execute(\n \"SELECT COUNT(*) FROM {}\".format(tracker.table), tracker.db\n ).fetchall() == [(1,)]\n\n # Pull the data and ensure it's correct\n data = pd.read_sql(\"SELECT * FROM {}\".format(tracker.table), tracker.db)\n assert isinstance(data, pd.DataFrame)\n assert len(data.columns) == 1\n assert data.columns[0] == \"id\"\n assert data.id[0] == 1337\n\n\ndef test_read(pawprint_default_tracker_db_with_table):\n \"\"\"Test pulling the data into a dataframe according to various simple filters.\"\"\"\n\n tracker = pawprint_default_tracker_db_with_table\n\n # Ensure the table is empty to begin with\n assert len(tracker.read()) == 0\n\n # Add some data\n tracker.write(user_id=\"Pawprint\", event=\"Testing !\")\n tracker.write(user_id=\"Pawprint\")\n tracker.write(event=\"No user\")\n tracker.write(\n user_id=\"import this\",\n event=\"very zen\",\n metadata={\n \"better\": \"forgiveness\",\n \"worse\": \"permission\",\n \"ordered\": [\"simple\", \"complex\", \"complicated\"],\n },\n )\n\n all_data = tracker.read()\n pawprint_events = tracker.read(user_id=\"Pawprint\")\n id_gt_events = tracker.read(id__gt=10)\n id_gte_lt_events = tracker.read(id__gte=1, id__lt=3)\n field_events = tracker.read(\"event\", id__lte=100, event=\"very zen\")\n contains_events = tracker.read(metadata__contains=\"better\")\n not_contains_events = tracker.read(metadata__contains=\"whisky\")\n\n assert len(all_data) == 4\n assert len(pawprint_events) == 2\n assert len(id_gt_events) == 0\n assert len(id_gte_lt_events) == 2\n assert len(field_events) == 1\n assert len(contains_events) == 1\n assert len(not_contains_events) == 0\n\n assert set(all_data.columns) == set([\"id\", \"user_id\", \"event\", \"metadata\", \"timestamp\"])\n assert set(field_events.columns) == set([\"event\"])\n\n\ndef test_counts(pawprint_default_tracker_db_with_table):\n \"\"\"Test counting a specific event, with date ranges and time resolutions.\"\"\"\n\n tracker = pawprint_default_tracker_db_with_table\n\n # Add a bunch of events\n query = (\n \"\"\"\n INSERT INTO {} (timestamp, user_id, event) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in'),\n ('2016-01-01 12:40', 'bob', 'logged_in'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in'),\n ('2016-01-02 00:00', 'dan', 'logged_in'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in'),\n ('2016-01-05 00:00', 'frank', 'logged_in'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in'),\n ('2016-01-20 00:00', 'hans', 'logged_in'),\n ('2016-02-01 00:00', 'iris', 'logged_in'),\n ('2016-02-01 00:00', 'james', 'logged_in'),\n ('2016-03-01 00:00', 'kelly', 'logged_in'),\n ('2016-03-01 00:00', 'laura', 'logged_in'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in')\n \"\"\"\n ).format(tracker.table)\n\n pd.io.sql.execute(query, tracker.db)\n\n logins_hourly = tracker.count(event=\"logged_in\", resolution=\"hour\")\n logins_daily = tracker.count(event=\"logged_in\")\n logins_weekly = tracker.count(event=\"logged_in\", resolution=\"week\")\n logins_monthly = tracker.count(event=\"logged_in\", resolution=\"month\")\n logins_weekly_left_range = tracker.count(\n event=\"logged_in\", resolution=\"week\", start=datetime(2016, 2, 1)\n )\n logins_weekly_right_range = tracker.count(\n event=\"logged_in\", resolution=\"week\", end=datetime(2016, 2, 1)\n )\n logins_daily_full_range = tracker.count(\n event=\"logged_in\", start=datetime(2016, 1, 15), end=datetime(2016, 2, 15)\n )\n\n # Hourly\n assert len(logins_hourly) == 8\n assert np.all(logins_hourly[\"count\"].values == [2, 1, 2, 1, 1, 1, 2, 2])\n\n # Daily\n assert len(logins_daily) == 7\n assert np.all(logins_daily[\"count\"].values == [3, 2, 1, 1, 1, 2, 2])\n\n # Weekly\n assert len(logins_weekly) == 5\n assert np.all(logins_weekly[\"count\"].values == [5, 2, 1, 2, 2])\n\n # Others\n assert len(logins_monthly) == 3\n assert len(logins_weekly_left_range) == 2 # weeks start on Monday\n assert len(logins_weekly_right_range) == 4 # and not at the start / end dates provided\n assert len(logins_daily_full_range) == 2\n\n\ndef test_sum_and_average(pawprint_default_tracker_db_with_table):\n \"\"\"Test aggregating a specific event, with date ranges and time resolutions.\"\"\"\n\n tracker = pawprint_default_tracker_db_with_table\n\n metadata = str('{\"val\": 1}').replace(\"'\", '\"')\n\n # Add a bunch of events\n query = (\n \"\"\"\n INSERT INTO {table} (timestamp, user_id, event, metadata) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in', '{metadata}'),\n ('2016-01-01 12:40', 'bob', 'logged_in', '{metadata}'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in', '{metadata}'),\n ('2016-01-02 00:00', 'dan', 'logged_in', '{metadata}'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in', '{metadata}'),\n ('2016-01-05 00:00', 'frank', 'logged_in', '{metadata}'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in', '{metadata}'),\n ('2016-01-20 00:00', 'hans', 'logged_in', '{metadata}'),\n ('2016-02-01 00:00', 'iris', 'logged_in', '{metadata}'),\n ('2016-02-01 00:00', 'james', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'kelly', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'laura', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in', '{metadata}')\n \"\"\"\n ).format(table=tracker.table, metadata=metadata)\n\n pd.io.sql.execute(query, tracker.db)\n\n x_sum_daily_all = tracker.sum(\"metadata__val\")\n x_sum_daily = tracker.sum(\"metadata__val\", event=\"logged_in\")\n\n x_avg_daily_all = tracker.average(\"metadata__val\", event=\"logged_in\")\n x_avg_daily = tracker.average(\"metadata__val\", event=\"logged_in\")\n\n assert len(x_sum_daily) == 7\n\n assert np.all(x_sum_daily_all[\"sum\"].values == [3, 2, 1, 1, 1, 2, 3])\n assert np.all(x_sum_daily[\"sum\"].values == [3, 2, 1, 1, 1, 2, 2])\n\n assert np.all(x_avg_daily_all[\"avg\"].values == [1, 1, 1, 1, 1, 1, 1])\n assert np.all(x_avg_daily[\"avg\"] == x_avg_daily_all[\"avg\"])\n\n\ndef test_parse_fields(pawprint_default_tracker_db):\n \"\"\"Test args passed to read() and _aggregate() are parsed correctly.\"\"\"\n\n tracker = pawprint_default_tracker_db\n\n # SELECT * FROM table\n args = ()\n assert tracker._parse_fields(*args) == \"*\"\n\n # SELECT event FROM table\n args = (\"event\",)\n assert tracker._parse_fields(*args) == \"event\"\n\n # SELECT user_id, timestamp FROM table\n args = (\"user_id\", \"timestamp\")\n assert tracker._parse_fields(*args) == \"user_id, timestamp\"\n\n # SELECT metadata #>> '{a, b}' FROM table\n args = (\"metadata__a__b\",)\n assert tracker._parse_fields(*args) == \"metadata #> '{a, b}' AS json_field\"\n\n\ndef test_parse_values(pawprint_default_tracker_db):\n \"\"\"Test parsing values for write().\"\"\"\n\n tracker = pawprint_default_tracker_db\n\n # INSERT INTO table (event) VALUES ('logged_in')\n args = (\"logged_in\",)\n assert tracker._parse_values(*args) == \"'logged_in'\"\n\n # INSERT INTO table (event, user_id) VALUES ('logged_in', 'hannah')\n args = (\"logged_in\", \"hannah\")\n assert tracker._parse_values(*args) == \"'logged_in', 'hannah'\"\n\n\ndef test_parse_conditionals(pawprint_default_tracker_db):\n \"\"\"Test kwargs passed to read() and _aggregate() are parsed correctly.\"\"\"\n\n tracker = pawprint_default_tracker_db\n\n # SELECT * FROM table\n kwargs = {}\n assert tracker._parse_conditionals(**kwargs) == \"\"\n\n # SELECT * FROM table WHERE user_id = 'Quentin'\n kwargs = {\"user_id\": \"Quentin\"}\n assert tracker._parse_conditionals(**kwargs) == \"WHERE user_id = 'Quentin'\"\n\n # SELECT * FROM table WHERE event = 'logged_in' AND user_id = 'Quentin'\n kwargs = {\"event\": \"logged_in\", \"user_id\": \"Quentin\"}\n assert tracker._parse_conditionals(**kwargs) in (\n \"WHERE event = 'logged_in' AND user_id = 'Quentin'\",\n \"WHERE user_id = 'Quentin' AND event = 'logged_in'\",\n )\n\n # SELECT * FROM table WHERE event IN ('logged_in', 'logged_out')\n kwargs = {\"event__in\": [\"logged_in\", \"logged_out\"]}\n assert tracker._parse_conditionals(**kwargs) == \"WHERE event IN ('logged_in', 'logged_out')\"\n\n\ndef test_accessing_json_fields(pawprint_default_tracker_db_with_table):\n \"\"\"Test some structured data pulling.\"\"\"\n\n tracker = pawprint_default_tracker_db_with_table\n\n # JSON objects in our tracking database\n simple = {\"integral\": \"derivative\"}\n medium = {\"montecarlo\": {\"prior\": \"likelihood\"}}\n difficult = {\n \"deepnet\": [\"mlp\", \"cnn\", \"rnn\"],\n \"ensembles\": {\"random\": \"forest\", \"always\": {\"cross_validate\": [\"kfold\", \"stratified\"]}},\n }\n\n tracker.write(event=\"maths\", metadata=simple)\n tracker.write(event=\"stats\", metadata=medium)\n tracker.write(event=\"ml\", metadata=difficult)\n\n maths_all = tracker.read(\"metadata__integral\")\n maths_condition = tracker.read(\"metadata__integral\", event=\"maths\")\n assert len(maths_all) == 3\n assert len(maths_condition) == 1\n assert list(maths_all.json_field) == [\"derivative\", None, None]\n\n stats = tracker.read(\"metadata__montecarlo__prior\").dropna()\n assert len(stats) == 1\n assert stats.json_field.iloc[0] == \"likelihood\"\n\n types_of_nn = tracker.read(\"metadata__deepnet\").dropna()\n best_nn = tracker.read(\"metadata__deepnet__1\").dropna()\n full_depth = tracker.read(\"metadata__ensembles__always__cross_validate__0\").dropna()\n assert len(types_of_nn) == 1\n assert len(best_nn) == 1\n assert best_nn.json_field.iloc[0] == \"cnn\"\n assert len(full_depth) == 1\n assert full_depth.json_field.iloc[0] == \"kfold\"\n\n\ndef test_json_maths(pawprint_default_tracker_db_with_table):\n \"\"\"More advanced operations on JSON subfields.\"\"\"\n\n tracker = pawprint_default_tracker_db_with_table\n\n tracker.write(event=\"whisky\", metadata={\"uigeadail\": {\"value\": 123, \"lagavulin\": [4, 2]}})\n tracker.write(event=\"whisky\", metadata={\"uigeadail\": {\"value\": 456, \"lagavulin\": [5, 0]}})\n tracker.write(event=\"whisky\", metadata={\"uigeadail\": {\"value\": 758, \"lagavulin\": [7, 10]}})\n tracker.write(event=\"armagnac\", metadata={\"age\": \"XO\"})\n tracker.write(event=\"armagnac\", metadata={\"age\": 15})\n\n assert len(tracker.read()) == 5\n assert len(tracker.read(metadata__uigeadail__contains=\"lagavulin\")) == 3\n assert len(tracker.read(metadata__uigeadail__value__gt=123)) == 2\n assert len(tracker.read(metadata__uigeadail__value__gte=123)) == 3\n\n whiskies = tracker.sum(\"metadata__uigeadail__value\")\n assert len(whiskies) == 1\n assert whiskies.iloc[0][\"sum\"] == 1337\n\n assert len(tracker.read(metadata__contains=\"age\")) == 2\n assert len(tracker.read(metadata__age=\"XO\")) == 1\n\n\ndef test_silent_write_errors():\n \"\"\"When a failure occurs in event write, it should fail silently.\"\"\"\n\n tracker = pawprint.Tracker(db=None, table=None)\n\n try:\n tracker.write(event=\"This will fail silently.\")\n except Exception:\n pytest.fail(\"Failed to fail silently.\")\n\n\ndef test_nonsilent_write_errors(error_logger):\n \"\"\"Test non-silent write errors that should output to the logger or raise exceptions.\"\"\"\n\n tracker = pawprint.Tracker(db=\"postgresql:///fail\", logger=error_logger)\n\n with pytest.raises(Exception):\n tracker.write()\n with pytest.raises(Exception):\n tracker.write(event=\"going_to_fail\")\n\n with open(\"pawprint.log\", mode=\"r\") as f:\n logs = f.readlines()\n print(logs[3])\n\n assert len(logs) == 6\n assert logs[0].startswith(\"pawprint: pawprint failed to write.\")\n assert \"Table: None. Query: INSERT INTO None () VALUES ();\" in logs[0]\n assert \"Query: INSERT INTO None (event) VALUES ('going_to_fail')\" in logs[3]\n\n os.remove(\"pawprint.log\")\n\n\ndef test_auto_timestamp(db_string):\n \"\"\"Ensure that timestamps are autopopulated correctly if not passed.\"\"\"\n\n # Define a schema where the timestamp doesn't automatically populate through the database\n schema = {\"event\": \"TEXT\", \"timestamp\": \"TIMESTAMP\"}\n\n # Put together two trackers, one that autopopulates the timestamp\n no_auto = pawprint.Tracker(db=db_string, table=\"no_auto\", auto_timestamp=False, schema=schema)\n auto = pawprint.Tracker(db=db_string, table=\"auto\", auto_timestamp=True, schema=schema)\n\n # Create clean tables\n no_auto.create_table()\n auto.create_table()\n\n # Write events with no timestamp\n no_auto.write(event=\"foo\")\n auto.write(event=\"bar\")\n\n assert len(no_auto.read()) == 1\n assert len(auto.read()) == 1\n\n assert len(no_auto.read().dropna()) == 0\n assert len(auto.read().dropna()) == 1\n\n # Drop tables at the end\n no_auto.drop_table()\n auto.drop_table()\n\n\ndef test_repr_and_str(pawprint_default_tracker_db):\n \"\"\"Test the __repr__ and __str__.\"\"\"\n tracker = pawprint_default_tracker_db\n expected_repr = \"pawprint.Tracker on table '{}' and database '{}'\".format(\n tracker.table, tracker.db\n )\n expected_str = \"pawprint Tracker object.\\ndb : {}\\ntable : {}\".format(tracker.db, tracker.table)\n assert tracker.__repr__() == expected_repr\n assert tracker.__str__() == expected_str\n\n\ndef test_malicious_strings(pawprint_default_tracker_db_with_table):\n \"\"\"Test that SQL injection strings are sanitized\"\"\"\n tracker = pawprint_default_tracker_db_with_table\n\n tracker.write(\n event=\"armageddon\",\n metadata={\n \"shady business\": {\n \"with\": \"the following string\",\n \"of sql\": \"50');INSERT INTO {table} (event, user_id) VALUES \"\n \"('you got pwnd', '50\".format(table=tracker.table),\n }\n },\n )\n assert len(tracker.read()) == 1\n\n tracker.write(\n event=\"armageddon\",\n metadata={\n \"more shady business\": {\n \"my shady sql\": \"' OR '1'='1;DROP TABLE {table};\".format(table=tracker.table)\n }\n },\n )\n assert len(tracker.read()) == 2\n\n tracker.write(\n event=\"' OR '1'='1;\",\n metadata={\"foo\": \"x'); DROP TABLE {table}; --\".format(table=tracker.table)},\n )\n assert len(tracker.read()) == 3\n\n\ndef test_escaping_from_quotes(pawprint_default_tracker_db_with_table):\n tracker = pawprint_default_tracker_db_with_table\n tracker.write(\n event=\"known crummy string\",\n metadata={\n \"foo\": {\n \"toState\": \"#/app/dealnotes/2345/FORPETE'S_SAKE,_LLC_Tenant_Rep_Lease_2\",\n \"fromState\": \"#/app/dealdetails/2345\",\n \"platform\": \"iOS App\",\n }\n },\n )\n assert len(tracker.read()) == 1\n",
"step-ids": [
8,
15,
18,
20,
21
]
}
|
[
8,
15,
18,
20,
21
] |
from funct import read_excel
import requests
import unittest
import HTMLTestReportCN
class v2exapi(unittest.TestCase):
def test_node_api(self):
url = "https://www.v2ex.com/api/nodes/show.json"
#querystring = {"name":"php"}
a=read_excel("xx.xlsx",0,0)
for node_name in a:
#for node_name in ['php',"python","qna"]:
response = requests.request("GET", url, params={"name":node_name}).json()
self.assertEqual(response['name'],node_name)
print(response)
def test_nade_type(self):
url = "https://www.apiopen.top/novelSearchApi"
querystring = {"name": "%E7%9B%98%E9%BE%99"}
headers = {
'Cache-Control': "no-cache",
'Postman-Token': "b249737d-aa24-4592-adf1-d19114f3f567"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)
if __name__ == '__main__':
#unittest.main()
suiteTest = unittest.TestSuite()
suiteTest.addTest(unittest.makeSuite(v2exapi))
filepath = '' + 'report.html'
# filepath='C:\\'+now+'.html'
fp = open(filepath, 'wb')
# 定义测试报告的标题与描述
runner = HTMLTestReportCN.HTMLTestRunner(stream=fp, title=u'自动化测试报告', description=u'测试报告')
runner.run(suiteTest)
fp.close()
#print(type(list1))
|
normal
|
{
"blob_id": "5cd573f2b7f91a8b20e96deb1004c0ef7fc62398",
"index": 8072,
"step-1": "<mask token>\n\n\nclass v2exapi(unittest.TestCase):\n\n def test_node_api(self):\n url = 'https://www.v2ex.com/api/nodes/show.json'\n a = read_excel('xx.xlsx', 0, 0)\n for node_name in a:\n response = requests.request('GET', url, params={'name': node_name}\n ).json()\n self.assertEqual(response['name'], node_name)\n print(response)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass v2exapi(unittest.TestCase):\n\n def test_node_api(self):\n url = 'https://www.v2ex.com/api/nodes/show.json'\n a = read_excel('xx.xlsx', 0, 0)\n for node_name in a:\n response = requests.request('GET', url, params={'name': node_name}\n ).json()\n self.assertEqual(response['name'], node_name)\n print(response)\n\n def test_nade_type(self):\n url = 'https://www.apiopen.top/novelSearchApi'\n querystring = {'name': '%E7%9B%98%E9%BE%99'}\n headers = {'Cache-Control': 'no-cache', 'Postman-Token':\n 'b249737d-aa24-4592-adf1-d19114f3f567'}\n response = requests.request('GET', url, headers=headers, params=\n querystring)\n print(response.text)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass v2exapi(unittest.TestCase):\n\n def test_node_api(self):\n url = 'https://www.v2ex.com/api/nodes/show.json'\n a = read_excel('xx.xlsx', 0, 0)\n for node_name in a:\n response = requests.request('GET', url, params={'name': node_name}\n ).json()\n self.assertEqual(response['name'], node_name)\n print(response)\n\n def test_nade_type(self):\n url = 'https://www.apiopen.top/novelSearchApi'\n querystring = {'name': '%E7%9B%98%E9%BE%99'}\n headers = {'Cache-Control': 'no-cache', 'Postman-Token':\n 'b249737d-aa24-4592-adf1-d19114f3f567'}\n response = requests.request('GET', url, headers=headers, params=\n querystring)\n print(response.text)\n\n\nif __name__ == '__main__':\n suiteTest = unittest.TestSuite()\n suiteTest.addTest(unittest.makeSuite(v2exapi))\n filepath = '' + 'report.html'\n fp = open(filepath, 'wb')\n runner = HTMLTestReportCN.HTMLTestRunner(stream=fp, title=u'自动化测试报告',\n description=u'测试报告')\n runner.run(suiteTest)\n fp.close()\n",
"step-4": "from funct import read_excel\nimport requests\nimport unittest\nimport HTMLTestReportCN\n\n\nclass v2exapi(unittest.TestCase):\n\n def test_node_api(self):\n url = 'https://www.v2ex.com/api/nodes/show.json'\n a = read_excel('xx.xlsx', 0, 0)\n for node_name in a:\n response = requests.request('GET', url, params={'name': node_name}\n ).json()\n self.assertEqual(response['name'], node_name)\n print(response)\n\n def test_nade_type(self):\n url = 'https://www.apiopen.top/novelSearchApi'\n querystring = {'name': '%E7%9B%98%E9%BE%99'}\n headers = {'Cache-Control': 'no-cache', 'Postman-Token':\n 'b249737d-aa24-4592-adf1-d19114f3f567'}\n response = requests.request('GET', url, headers=headers, params=\n querystring)\n print(response.text)\n\n\nif __name__ == '__main__':\n suiteTest = unittest.TestSuite()\n suiteTest.addTest(unittest.makeSuite(v2exapi))\n filepath = '' + 'report.html'\n fp = open(filepath, 'wb')\n runner = HTMLTestReportCN.HTMLTestRunner(stream=fp, title=u'自动化测试报告',\n description=u'测试报告')\n runner.run(suiteTest)\n fp.close()\n",
"step-5": "from funct import read_excel\nimport requests\nimport unittest\nimport HTMLTestReportCN\nclass v2exapi(unittest.TestCase):\n def test_node_api(self):\n url = \"https://www.v2ex.com/api/nodes/show.json\"\n\n #querystring = {\"name\":\"php\"}\n a=read_excel(\"xx.xlsx\",0,0)\n for node_name in a:\n #for node_name in ['php',\"python\",\"qna\"]:\n\n\n response = requests.request(\"GET\", url, params={\"name\":node_name}).json()\n self.assertEqual(response['name'],node_name)\n print(response)\n def test_nade_type(self):\n\n\n url = \"https://www.apiopen.top/novelSearchApi\"\n\n querystring = {\"name\": \"%E7%9B%98%E9%BE%99\"}\n\n headers = {\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"b249737d-aa24-4592-adf1-d19114f3f567\"\n }\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n print(response.text)\n\n\nif __name__ == '__main__':\n #unittest.main()\n suiteTest = unittest.TestSuite()\n suiteTest.addTest(unittest.makeSuite(v2exapi))\n filepath = '' + 'report.html'\n\n # filepath='C:\\\\'+now+'.html'\n\n fp = open(filepath, 'wb')\n # 定义测试报告的标题与描述\n runner = HTMLTestReportCN.HTMLTestRunner(stream=fp, title=u'自动化测试报告', description=u'测试报告')\n runner.run(suiteTest)\n fp.close()\n\n\n\n\n#print(type(list1))",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from tkinter import *
# Everything in tkinter is a widget
# We start with the Root Widget
root = Tk()
# Creating a Label Widget
myLabel1 = Label(root, text="Hello User!")
myLabel2 = Label(root, text="Welcome to medBOT")
# Put labels onto the screen
myLabel1.grid(row=0, column=0)
myLabel2.grid(row=1, column=0)
# Grid assigns the texts exacts in the position
# Grid creates a relative position
root.mainloop()
|
normal
|
{
"blob_id": "93fe16e5a97ec2652c4f6b8be844244d9776ea2e",
"index": 4921,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmyLabel1.grid(row=0, column=0)\nmyLabel2.grid(row=1, column=0)\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = Tk()\nmyLabel1 = Label(root, text='Hello User!')\nmyLabel2 = Label(root, text='Welcome to medBOT')\nmyLabel1.grid(row=0, column=0)\nmyLabel2.grid(row=1, column=0)\nroot.mainloop()\n",
"step-4": "from tkinter import *\nroot = Tk()\nmyLabel1 = Label(root, text='Hello User!')\nmyLabel2 = Label(root, text='Welcome to medBOT')\nmyLabel1.grid(row=0, column=0)\nmyLabel2.grid(row=1, column=0)\nroot.mainloop()\n",
"step-5": "from tkinter import *\n\n# Everything in tkinter is a widget\n# We start with the Root Widget\n\nroot = Tk()\n# Creating a Label Widget\nmyLabel1 = Label(root, text=\"Hello User!\")\nmyLabel2 = Label(root, text=\"Welcome to medBOT\")\n\n# Put labels onto the screen\nmyLabel1.grid(row=0, column=0)\nmyLabel2.grid(row=1, column=0)\n# Grid assigns the texts exacts in the position\n# Grid creates a relative position\n\nroot.mainloop()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Dense, Input
from keras.layers import Conv2D, Flatten, Lambda
from keras.layers import Reshape, Conv2DTranspose
from keras.models import Model
from keras.losses import mse, binary_crossentropy
from keras.utils import plot_model
from keras import backend as K
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
K.clear_session()
np.random.seed(237)
# reparameterization trick
# instead of sampling from Q(z|X), sample eps = N(0,I)
# then z = z_mean + sqrt(var)*eps
def sampling(args):
"""Reparameterization trick by sampling fr an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
def process_data(data_path):
data = np.load(data_path)
X_train, X_test = train_test_split(data, test_size=0.05, random_state=42)
print('Shape train/test:', X_train.shape, X_test.shape)
image_size = X_train.shape[1], X_train.shape[2]
data = np.reshape(data, [-1, image_size[0], image_size[1], 1])
X_train = np.reshape(X_train, [-1, image_size[0], image_size[1], 1])
X_test = np.reshape(X_test, [-1, image_size[0], image_size[1], 1])
data = data.astype('float32') / 255
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
return data, X_train, X_test, image_size
def construct_vae(image_size, kernel_size, latent_dim):
# network parameters
input_shape = (image_size[0], image_size[1], 1)
# VAE model = encoder + decoder
# build encoder model
inputs = Input(shape=input_shape, name='encoder_input')
x = inputs
x = Conv2D(filters=16, kernel_size=kernel_size, activation='relu', strides=1, padding='same')(x)
x = Conv2D(filters=32, kernel_size=kernel_size, activation='relu', strides=2, padding='same')(x)
x = Conv2D(filters=64, kernel_size=kernel_size, activation='relu', strides=1, padding='same')(x)
# shape info needed to build decoder model
shape = K.int_shape(x)
# generate latent vector Q(z|X)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True)
# build decoder model
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)
x = Conv2DTranspose(filters=64, kernel_size=kernel_size, activation='relu', strides=1, padding='same')(x)
x = Conv2DTranspose(filters=32, kernel_size=kernel_size, activation='relu', strides=2, padding='same')(x)
x = Conv2DTranspose(filters=16, kernel_size=kernel_size, activation='relu', strides=1, padding='same')(x)
outputs = Conv2DTranspose(filters=1,
kernel_size=kernel_size,
activation='sigmoid',
padding='same',
name='decoder_output')(x)
# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)
# instantiate VAE model
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae')
# VAE loss = mse_loss or xent_loss + kl_loss
reconstruction_loss = binary_crossentropy(K.flatten(inputs), K.flatten(outputs))
reconstruction_loss *= image_size[0] * image_size[1]
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop')
vae.summary()
plot_model(vae, to_file='vae_cnn.png', show_shapes=True)
return vae, encoder, decoder
if __name__ == '__main__':
is_train = False
data_file = '../data/out/moment_frames_5.npy'
data, X_train, X_test, im_size = process_data(data_file)
kernel_size = (3, 3)
latent_dim = 128
batch_size = 128
epochs = 10
vae, encoder, decoder = construct_vae(im_size, kernel_size, latent_dim)
if is_train:
history = vae.fit(X_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(X_test, None),
verbose=2)
vae.save_weights('vae_cnn.h5')
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('vae_train.jpeg')
plt.show()
else:
vae.load_weights('vae_cnn.h5')
# Transform to latent representation
encoded_data = encoder.predict(data, batch_size=batch_size)
pd.DataFrame(encoded_data[0]).to_csv('latest_rep_cnn.csv', index=None)
print('Completed.')
|
normal
|
{
"blob_id": "88343b9c5cac3510e8cea75ac5b11f517ddc164b",
"index": 5943,
"step-1": "<mask token>\n\n\ndef sampling(args):\n \"\"\"Reparameterization trick by sampling fr an isotropic unit Gaussian.\n # Arguments\n args (tensor): mean and log of variance of Q(z|X)\n # Returns\n z (tensor): sampled latent vector\n \"\"\"\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon\n\n\n<mask token>\n\n\ndef construct_vae(image_size, kernel_size, latent_dim):\n input_shape = image_size[0], image_size[1], 1\n inputs = Input(shape=input_shape, name='encoder_input')\n x = inputs\n x = Conv2D(filters=16, kernel_size=kernel_size, activation='relu',\n strides=1, padding='same')(x)\n x = Conv2D(filters=32, kernel_size=kernel_size, activation='relu',\n strides=2, padding='same')(x)\n x = Conv2D(filters=64, kernel_size=kernel_size, activation='relu',\n strides=1, padding='same')(x)\n shape = K.int_shape(x)\n x = Flatten()(x)\n x = Dense(16, activation='relu')(x)\n z_mean = Dense(latent_dim, name='z_mean')(x)\n z_log_var = Dense(latent_dim, name='z_log_var')(x)\n z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean,\n z_log_var])\n encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')\n encoder.summary()\n plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True)\n latent_inputs = Input(shape=(latent_dim,), name='z_sampling')\n x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)\n x = Reshape((shape[1], shape[2], shape[3]))(x)\n x = Conv2DTranspose(filters=64, kernel_size=kernel_size, activation=\n 'relu', strides=1, padding='same')(x)\n x = Conv2DTranspose(filters=32, kernel_size=kernel_size, activation=\n 'relu', strides=2, padding='same')(x)\n x = Conv2DTranspose(filters=16, kernel_size=kernel_size, activation=\n 'relu', strides=1, padding='same')(x)\n outputs = Conv2DTranspose(filters=1, kernel_size=kernel_size,\n activation='sigmoid', padding='same', name='decoder_output')(x)\n decoder = Model(latent_inputs, outputs, name='decoder')\n decoder.summary()\n plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)\n outputs = decoder(encoder(inputs)[2])\n vae = Model(inputs, outputs, name='vae')\n reconstruction_loss = binary_crossentropy(K.flatten(inputs), K.flatten(\n outputs))\n reconstruction_loss *= image_size[0] * image_size[1]\n kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)\n kl_loss = K.sum(kl_loss, axis=-1)\n kl_loss *= -0.5\n vae_loss = K.mean(reconstruction_loss + kl_loss)\n vae.add_loss(vae_loss)\n vae.compile(optimizer='rmsprop')\n vae.summary()\n plot_model(vae, to_file='vae_cnn.png', show_shapes=True)\n return vae, encoder, decoder\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sampling(args):\n \"\"\"Reparameterization trick by sampling fr an isotropic unit Gaussian.\n # Arguments\n args (tensor): mean and log of variance of Q(z|X)\n # Returns\n z (tensor): sampled latent vector\n \"\"\"\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon\n\n\ndef process_data(data_path):\n data = np.load(data_path)\n X_train, X_test = train_test_split(data, test_size=0.05, random_state=42)\n print('Shape train/test:', X_train.shape, X_test.shape)\n image_size = X_train.shape[1], X_train.shape[2]\n data = np.reshape(data, [-1, image_size[0], image_size[1], 1])\n X_train = np.reshape(X_train, [-1, image_size[0], image_size[1], 1])\n X_test = np.reshape(X_test, [-1, image_size[0], image_size[1], 1])\n data = data.astype('float32') / 255\n X_train = X_train.astype('float32') / 255\n X_test = X_test.astype('float32') / 255\n return data, X_train, X_test, image_size\n\n\ndef construct_vae(image_size, kernel_size, latent_dim):\n input_shape = image_size[0], image_size[1], 1\n inputs = Input(shape=input_shape, name='encoder_input')\n x = inputs\n x = Conv2D(filters=16, kernel_size=kernel_size, activation='relu',\n strides=1, padding='same')(x)\n x = Conv2D(filters=32, kernel_size=kernel_size, activation='relu',\n strides=2, padding='same')(x)\n x = Conv2D(filters=64, kernel_size=kernel_size, activation='relu',\n strides=1, padding='same')(x)\n shape = K.int_shape(x)\n x = Flatten()(x)\n x = Dense(16, activation='relu')(x)\n z_mean = Dense(latent_dim, name='z_mean')(x)\n z_log_var = Dense(latent_dim, name='z_log_var')(x)\n z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean,\n z_log_var])\n encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')\n encoder.summary()\n plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True)\n latent_inputs = Input(shape=(latent_dim,), name='z_sampling')\n x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)\n x = Reshape((shape[1], shape[2], shape[3]))(x)\n x = Conv2DTranspose(filters=64, kernel_size=kernel_size, activation=\n 'relu', strides=1, padding='same')(x)\n x = Conv2DTranspose(filters=32, kernel_size=kernel_size, activation=\n 'relu', strides=2, padding='same')(x)\n x = Conv2DTranspose(filters=16, kernel_size=kernel_size, activation=\n 'relu', strides=1, padding='same')(x)\n outputs = Conv2DTranspose(filters=1, kernel_size=kernel_size,\n activation='sigmoid', padding='same', name='decoder_output')(x)\n decoder = Model(latent_inputs, outputs, name='decoder')\n decoder.summary()\n plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)\n outputs = decoder(encoder(inputs)[2])\n vae = Model(inputs, outputs, name='vae')\n reconstruction_loss = binary_crossentropy(K.flatten(inputs), K.flatten(\n outputs))\n reconstruction_loss *= image_size[0] * image_size[1]\n kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)\n kl_loss = K.sum(kl_loss, axis=-1)\n kl_loss *= -0.5\n vae_loss = K.mean(reconstruction_loss + kl_loss)\n vae.add_loss(vae_loss)\n vae.compile(optimizer='rmsprop')\n vae.summary()\n plot_model(vae, to_file='vae_cnn.png', show_shapes=True)\n return vae, encoder, decoder\n\n\n<mask token>\n",
"step-3": "<mask token>\nK.clear_session()\nnp.random.seed(237)\n\n\ndef sampling(args):\n \"\"\"Reparameterization trick by sampling fr an isotropic unit Gaussian.\n # Arguments\n args (tensor): mean and log of variance of Q(z|X)\n # Returns\n z (tensor): sampled latent vector\n \"\"\"\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon\n\n\ndef process_data(data_path):\n data = np.load(data_path)\n X_train, X_test = train_test_split(data, test_size=0.05, random_state=42)\n print('Shape train/test:', X_train.shape, X_test.shape)\n image_size = X_train.shape[1], X_train.shape[2]\n data = np.reshape(data, [-1, image_size[0], image_size[1], 1])\n X_train = np.reshape(X_train, [-1, image_size[0], image_size[1], 1])\n X_test = np.reshape(X_test, [-1, image_size[0], image_size[1], 1])\n data = data.astype('float32') / 255\n X_train = X_train.astype('float32') / 255\n X_test = X_test.astype('float32') / 255\n return data, X_train, X_test, image_size\n\n\ndef construct_vae(image_size, kernel_size, latent_dim):\n input_shape = image_size[0], image_size[1], 1\n inputs = Input(shape=input_shape, name='encoder_input')\n x = inputs\n x = Conv2D(filters=16, kernel_size=kernel_size, activation='relu',\n strides=1, padding='same')(x)\n x = Conv2D(filters=32, kernel_size=kernel_size, activation='relu',\n strides=2, padding='same')(x)\n x = Conv2D(filters=64, kernel_size=kernel_size, activation='relu',\n strides=1, padding='same')(x)\n shape = K.int_shape(x)\n x = Flatten()(x)\n x = Dense(16, activation='relu')(x)\n z_mean = Dense(latent_dim, name='z_mean')(x)\n z_log_var = Dense(latent_dim, name='z_log_var')(x)\n z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean,\n z_log_var])\n encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')\n encoder.summary()\n plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True)\n latent_inputs = Input(shape=(latent_dim,), name='z_sampling')\n x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)\n x = Reshape((shape[1], shape[2], shape[3]))(x)\n x = Conv2DTranspose(filters=64, kernel_size=kernel_size, activation=\n 'relu', strides=1, padding='same')(x)\n x = Conv2DTranspose(filters=32, kernel_size=kernel_size, activation=\n 'relu', strides=2, padding='same')(x)\n x = Conv2DTranspose(filters=16, kernel_size=kernel_size, activation=\n 'relu', strides=1, padding='same')(x)\n outputs = Conv2DTranspose(filters=1, kernel_size=kernel_size,\n activation='sigmoid', padding='same', name='decoder_output')(x)\n decoder = Model(latent_inputs, outputs, name='decoder')\n decoder.summary()\n plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)\n outputs = decoder(encoder(inputs)[2])\n vae = Model(inputs, outputs, name='vae')\n reconstruction_loss = binary_crossentropy(K.flatten(inputs), K.flatten(\n outputs))\n reconstruction_loss *= image_size[0] * image_size[1]\n kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)\n kl_loss = K.sum(kl_loss, axis=-1)\n kl_loss *= -0.5\n vae_loss = K.mean(reconstruction_loss + kl_loss)\n vae.add_loss(vae_loss)\n vae.compile(optimizer='rmsprop')\n vae.summary()\n plot_model(vae, to_file='vae_cnn.png', show_shapes=True)\n return vae, encoder, decoder\n\n\nif __name__ == '__main__':\n is_train = False\n data_file = '../data/out/moment_frames_5.npy'\n data, X_train, X_test, im_size = process_data(data_file)\n kernel_size = 3, 3\n latent_dim = 128\n batch_size = 128\n epochs = 10\n vae, encoder, decoder = construct_vae(im_size, kernel_size, latent_dim)\n if is_train:\n history = vae.fit(X_train, epochs=epochs, batch_size=batch_size,\n validation_data=(X_test, None), verbose=2)\n vae.save_weights('vae_cnn.h5')\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.savefig('vae_train.jpeg')\n plt.show()\n else:\n vae.load_weights('vae_cnn.h5')\n encoded_data = encoder.predict(data, batch_size=batch_size)\n pd.DataFrame(encoded_data[0]).to_csv('latest_rep_cnn.csv', index=None)\n print('Completed.')\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom keras.layers import Dense, Input\nfrom keras.layers import Conv2D, Flatten, Lambda\nfrom keras.layers import Reshape, Conv2DTranspose\nfrom keras.models import Model\nfrom keras.losses import mse, binary_crossentropy\nfrom keras.utils import plot_model\nfrom keras import backend as K\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nK.clear_session()\nnp.random.seed(237)\n\n\ndef sampling(args):\n \"\"\"Reparameterization trick by sampling fr an isotropic unit Gaussian.\n # Arguments\n args (tensor): mean and log of variance of Q(z|X)\n # Returns\n z (tensor): sampled latent vector\n \"\"\"\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon\n\n\ndef process_data(data_path):\n data = np.load(data_path)\n X_train, X_test = train_test_split(data, test_size=0.05, random_state=42)\n print('Shape train/test:', X_train.shape, X_test.shape)\n image_size = X_train.shape[1], X_train.shape[2]\n data = np.reshape(data, [-1, image_size[0], image_size[1], 1])\n X_train = np.reshape(X_train, [-1, image_size[0], image_size[1], 1])\n X_test = np.reshape(X_test, [-1, image_size[0], image_size[1], 1])\n data = data.astype('float32') / 255\n X_train = X_train.astype('float32') / 255\n X_test = X_test.astype('float32') / 255\n return data, X_train, X_test, image_size\n\n\ndef construct_vae(image_size, kernel_size, latent_dim):\n input_shape = image_size[0], image_size[1], 1\n inputs = Input(shape=input_shape, name='encoder_input')\n x = inputs\n x = Conv2D(filters=16, kernel_size=kernel_size, activation='relu',\n strides=1, padding='same')(x)\n x = Conv2D(filters=32, kernel_size=kernel_size, activation='relu',\n strides=2, padding='same')(x)\n x = Conv2D(filters=64, kernel_size=kernel_size, activation='relu',\n strides=1, padding='same')(x)\n shape = K.int_shape(x)\n x = Flatten()(x)\n x = Dense(16, activation='relu')(x)\n z_mean = Dense(latent_dim, name='z_mean')(x)\n z_log_var = Dense(latent_dim, name='z_log_var')(x)\n z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean,\n z_log_var])\n encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')\n encoder.summary()\n plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True)\n latent_inputs = Input(shape=(latent_dim,), name='z_sampling')\n x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)\n x = Reshape((shape[1], shape[2], shape[3]))(x)\n x = Conv2DTranspose(filters=64, kernel_size=kernel_size, activation=\n 'relu', strides=1, padding='same')(x)\n x = Conv2DTranspose(filters=32, kernel_size=kernel_size, activation=\n 'relu', strides=2, padding='same')(x)\n x = Conv2DTranspose(filters=16, kernel_size=kernel_size, activation=\n 'relu', strides=1, padding='same')(x)\n outputs = Conv2DTranspose(filters=1, kernel_size=kernel_size,\n activation='sigmoid', padding='same', name='decoder_output')(x)\n decoder = Model(latent_inputs, outputs, name='decoder')\n decoder.summary()\n plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)\n outputs = decoder(encoder(inputs)[2])\n vae = Model(inputs, outputs, name='vae')\n reconstruction_loss = binary_crossentropy(K.flatten(inputs), K.flatten(\n outputs))\n reconstruction_loss *= image_size[0] * image_size[1]\n kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)\n kl_loss = K.sum(kl_loss, axis=-1)\n kl_loss *= -0.5\n vae_loss = K.mean(reconstruction_loss + kl_loss)\n vae.add_loss(vae_loss)\n vae.compile(optimizer='rmsprop')\n vae.summary()\n plot_model(vae, to_file='vae_cnn.png', show_shapes=True)\n return vae, encoder, decoder\n\n\nif __name__ == '__main__':\n is_train = False\n data_file = '../data/out/moment_frames_5.npy'\n data, X_train, X_test, im_size = process_data(data_file)\n kernel_size = 3, 3\n latent_dim = 128\n batch_size = 128\n epochs = 10\n vae, encoder, decoder = construct_vae(im_size, kernel_size, latent_dim)\n if is_train:\n history = vae.fit(X_train, epochs=epochs, batch_size=batch_size,\n validation_data=(X_test, None), verbose=2)\n vae.save_weights('vae_cnn.h5')\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.savefig('vae_train.jpeg')\n plt.show()\n else:\n vae.load_weights('vae_cnn.h5')\n encoded_data = encoder.predict(data, batch_size=batch_size)\n pd.DataFrame(encoded_data[0]).to_csv('latest_rep_cnn.csv', index=None)\n print('Completed.')\n",
"step-5": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom keras.layers import Dense, Input\nfrom keras.layers import Conv2D, Flatten, Lambda\nfrom keras.layers import Reshape, Conv2DTranspose\nfrom keras.models import Model\nfrom keras.losses import mse, binary_crossentropy\nfrom keras.utils import plot_model\nfrom keras import backend as K\n\nfrom sklearn.model_selection import train_test_split\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nK.clear_session()\n\nnp.random.seed(237)\n\n\n# reparameterization trick\n# instead of sampling from Q(z|X), sample eps = N(0,I)\n# then z = z_mean + sqrt(var)*eps\ndef sampling(args):\n \"\"\"Reparameterization trick by sampling fr an isotropic unit Gaussian.\n # Arguments\n args (tensor): mean and log of variance of Q(z|X)\n # Returns\n z (tensor): sampled latent vector\n \"\"\"\n\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n # by default, random_normal has mean=0 and std=1.0\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon\n\n\ndef process_data(data_path):\n\n data = np.load(data_path)\n\n X_train, X_test = train_test_split(data, test_size=0.05, random_state=42)\n print('Shape train/test:', X_train.shape, X_test.shape)\n\n image_size = X_train.shape[1], X_train.shape[2]\n\n data = np.reshape(data, [-1, image_size[0], image_size[1], 1])\n X_train = np.reshape(X_train, [-1, image_size[0], image_size[1], 1])\n X_test = np.reshape(X_test, [-1, image_size[0], image_size[1], 1])\n\n data = data.astype('float32') / 255\n X_train = X_train.astype('float32') / 255\n X_test = X_test.astype('float32') / 255\n\n return data, X_train, X_test, image_size\n\n\ndef construct_vae(image_size, kernel_size, latent_dim):\n # network parameters\n input_shape = (image_size[0], image_size[1], 1)\n\n # VAE model = encoder + decoder\n # build encoder model\n inputs = Input(shape=input_shape, name='encoder_input')\n x = inputs\n x = Conv2D(filters=16, kernel_size=kernel_size, activation='relu', strides=1, padding='same')(x)\n x = Conv2D(filters=32, kernel_size=kernel_size, activation='relu', strides=2, padding='same')(x)\n x = Conv2D(filters=64, kernel_size=kernel_size, activation='relu', strides=1, padding='same')(x)\n\n # shape info needed to build decoder model\n shape = K.int_shape(x)\n\n # generate latent vector Q(z|X)\n x = Flatten()(x)\n x = Dense(16, activation='relu')(x)\n z_mean = Dense(latent_dim, name='z_mean')(x)\n z_log_var = Dense(latent_dim, name='z_log_var')(x)\n\n # use reparameterization trick to push the sampling out as input\n # note that \"output_shape\" isn't necessary with the TensorFlow backend\n z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])\n\n # instantiate encoder model\n encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')\n encoder.summary()\n plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True)\n\n # build decoder model\n latent_inputs = Input(shape=(latent_dim,), name='z_sampling')\n x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)\n x = Reshape((shape[1], shape[2], shape[3]))(x)\n\n x = Conv2DTranspose(filters=64, kernel_size=kernel_size, activation='relu', strides=1, padding='same')(x)\n x = Conv2DTranspose(filters=32, kernel_size=kernel_size, activation='relu', strides=2, padding='same')(x)\n x = Conv2DTranspose(filters=16, kernel_size=kernel_size, activation='relu', strides=1, padding='same')(x)\n\n outputs = Conv2DTranspose(filters=1,\n kernel_size=kernel_size,\n activation='sigmoid',\n padding='same',\n name='decoder_output')(x)\n\n # instantiate decoder model\n decoder = Model(latent_inputs, outputs, name='decoder')\n decoder.summary()\n plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)\n\n # instantiate VAE model\n outputs = decoder(encoder(inputs)[2])\n vae = Model(inputs, outputs, name='vae')\n\n # VAE loss = mse_loss or xent_loss + kl_loss\n reconstruction_loss = binary_crossentropy(K.flatten(inputs), K.flatten(outputs))\n\n reconstruction_loss *= image_size[0] * image_size[1]\n kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)\n kl_loss = K.sum(kl_loss, axis=-1)\n kl_loss *= -0.5\n vae_loss = K.mean(reconstruction_loss + kl_loss)\n vae.add_loss(vae_loss)\n vae.compile(optimizer='rmsprop')\n vae.summary()\n plot_model(vae, to_file='vae_cnn.png', show_shapes=True)\n\n return vae, encoder, decoder\n\n\nif __name__ == '__main__':\n\n is_train = False\n data_file = '../data/out/moment_frames_5.npy'\n data, X_train, X_test, im_size = process_data(data_file)\n\n kernel_size = (3, 3)\n latent_dim = 128\n batch_size = 128\n epochs = 10\n\n vae, encoder, decoder = construct_vae(im_size, kernel_size, latent_dim)\n\n if is_train:\n history = vae.fit(X_train,\n epochs=epochs,\n batch_size=batch_size,\n validation_data=(X_test, None),\n verbose=2)\n vae.save_weights('vae_cnn.h5')\n\n # summarize history for loss\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.savefig('vae_train.jpeg')\n plt.show()\n\n else:\n vae.load_weights('vae_cnn.h5')\n\n # Transform to latent representation\n encoded_data = encoder.predict(data, batch_size=batch_size)\n\n pd.DataFrame(encoded_data[0]).to_csv('latest_rep_cnn.csv', index=None)\n\n print('Completed.')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from bs4 import BeautifulSoup
import os, re, json
import pandas as pd
from urllib import request
from openpyxl import load_workbook
from bilibili.append_xlsx import append_df_to_excel
# 获取页面的所有的avid, title, url
def parse_html(content):
arr = []
# 使用beautifulsoup解析html文档
soup = BeautifulSoup(content)
# 获取指定标签
tag_list = soup.find_all("a", attrs={'title': True, 'href': True, "class": "title"})
# tag_list = soup.find_all("span", attrs={'class': 'type avid'})
for tag in tag_list:
# print(tag.get("title"), tag.get("href"))
# 获取标签内容,并去除首尾空格
title = tag.get("title")
href = tag.get("href")[2:]
avid = re.search("av([0-9]*)", href).group(0)
base_dict[avid] = [avid, title, href]
return base_dict.keys()
# 读取路径文件名
def read_path(path):
path_set = set()
dir_path = os.listdir(path)
for item in dir_path:
child = os.path.join('%s/%s' % (path, item))
path_set.add(child)
return path_set
# 提取html文件
def filter(path_set):
filterable = []
pattern = re.compile(r'.*\.[html|htm]+', re.I)
for path in path_set:
m = pattern.match(path)
if m:
filterable.append(m.group(0).strip())
return filterable
# 读取文件内容
def read_html(path):
file = open(path.encode('utf-8').strip(), 'r', encoding="utf-8")
content = file.read()
return content
# 写入csv
def storeCSV(filename=r'/Users/robbin/Desktop/bilibili/bilibili.xlsx'):
df_base = pd.DataFrame.from_dict(base_dict, orient="index")
df_base.columns = ['avid', 'title', 'href']
df_tags = pd.DataFrame.from_dict(tags_dict, orient="index")
df_tags.columns = ['tags']
df_info = pd.DataFrame.from_dict(info_dict, orient='index')
df_info.columns = ['like', 'his_rank', 'view', 'now_rank', 'coin', 'reply', 'aid', 'no_reprint', 'favorite', 'danmaku', 'copyright', 'share']
df = df_base.join([df_tags, df_info])
append_df_to_excel(filename, df, index=False)
# 根据avid请求api获得视频信息
def query_info(avid):
stat_url = "https://api.bilibili.com/x/web-interface/archive/stat?aid="
id = avid[2:]
url = stat_url + id
response = request.urlopen(url)
return response.read().decode("utf-8")
# 根据avid请求api获得视频标签
def query_tags(avid):
stat_url = "https://api.bilibili.com/x/tag/archive/tags?aid="
id = avid[2:]
url = stat_url + id
response = request.urlopen(url)
return response.read().decode("utf-8")
if __name__ == '__main__':
print("now read folder...")
path_set = read_path("/Users/robbin/Desktop/bilibili")
print("parse file path finshed...")
filterable = filter(path_set)
for path in filterable:
base_dict = {}
tags_dict = {}
info_dict = {}
print("now parse the file:", path)
content = read_html(path)
avid_list = parse_html(content)
for avid in avid_list:
print("Proccessing:", avid)
tags_json = query_tags(avid)
tags_obj = json.loads(tags_json)
tags_row_list = tags_obj.get("data")
if tags_row_list:
# print(data)
tag_list = []
for item in tags_row_list:
tag_name = item.get("tag_name")
tag_list.append(tag_name)
tag = ",".join(tag_list)
tags_dict[avid] = tag
info_json = query_info(avid)
info_obj = json.loads(info_json)
info_row_dict = info_obj.get("data")
if info_row_dict:
info_dict[avid] = list(info_row_dict.values())
print("Start to writing ", path, " to xls")
storeCSV()
print("End of writing ", path, " to xls")
|
normal
|
{
"blob_id": "a63718ba5f23d6f180bdafcb12b337465d6fa052",
"index": 4734,
"step-1": "<mask token>\n\n\ndef read_path(path):\n path_set = set()\n dir_path = os.listdir(path)\n for item in dir_path:\n child = os.path.join('%s/%s' % (path, item))\n path_set.add(child)\n return path_set\n\n\ndef filter(path_set):\n filterable = []\n pattern = re.compile('.*\\\\.[html|htm]+', re.I)\n for path in path_set:\n m = pattern.match(path)\n if m:\n filterable.append(m.group(0).strip())\n return filterable\n\n\ndef read_html(path):\n file = open(path.encode('utf-8').strip(), 'r', encoding='utf-8')\n content = file.read()\n return content\n\n\n<mask token>\n\n\ndef query_info(avid):\n stat_url = 'https://api.bilibili.com/x/web-interface/archive/stat?aid='\n id = avid[2:]\n url = stat_url + id\n response = request.urlopen(url)\n return response.read().decode('utf-8')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_html(content):\n arr = []\n soup = BeautifulSoup(content)\n tag_list = soup.find_all('a', attrs={'title': True, 'href': True,\n 'class': 'title'})\n for tag in tag_list:\n title = tag.get('title')\n href = tag.get('href')[2:]\n avid = re.search('av([0-9]*)', href).group(0)\n base_dict[avid] = [avid, title, href]\n return base_dict.keys()\n\n\ndef read_path(path):\n path_set = set()\n dir_path = os.listdir(path)\n for item in dir_path:\n child = os.path.join('%s/%s' % (path, item))\n path_set.add(child)\n return path_set\n\n\ndef filter(path_set):\n filterable = []\n pattern = re.compile('.*\\\\.[html|htm]+', re.I)\n for path in path_set:\n m = pattern.match(path)\n if m:\n filterable.append(m.group(0).strip())\n return filterable\n\n\ndef read_html(path):\n file = open(path.encode('utf-8').strip(), 'r', encoding='utf-8')\n content = file.read()\n return content\n\n\n<mask token>\n\n\ndef query_info(avid):\n stat_url = 'https://api.bilibili.com/x/web-interface/archive/stat?aid='\n id = avid[2:]\n url = stat_url + id\n response = request.urlopen(url)\n return response.read().decode('utf-8')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_html(content):\n arr = []\n soup = BeautifulSoup(content)\n tag_list = soup.find_all('a', attrs={'title': True, 'href': True,\n 'class': 'title'})\n for tag in tag_list:\n title = tag.get('title')\n href = tag.get('href')[2:]\n avid = re.search('av([0-9]*)', href).group(0)\n base_dict[avid] = [avid, title, href]\n return base_dict.keys()\n\n\ndef read_path(path):\n path_set = set()\n dir_path = os.listdir(path)\n for item in dir_path:\n child = os.path.join('%s/%s' % (path, item))\n path_set.add(child)\n return path_set\n\n\ndef filter(path_set):\n filterable = []\n pattern = re.compile('.*\\\\.[html|htm]+', re.I)\n for path in path_set:\n m = pattern.match(path)\n if m:\n filterable.append(m.group(0).strip())\n return filterable\n\n\ndef read_html(path):\n file = open(path.encode('utf-8').strip(), 'r', encoding='utf-8')\n content = file.read()\n return content\n\n\ndef storeCSV(filename='/Users/robbin/Desktop/bilibili/bilibili.xlsx'):\n df_base = pd.DataFrame.from_dict(base_dict, orient='index')\n df_base.columns = ['avid', 'title', 'href']\n df_tags = pd.DataFrame.from_dict(tags_dict, orient='index')\n df_tags.columns = ['tags']\n df_info = pd.DataFrame.from_dict(info_dict, orient='index')\n df_info.columns = ['like', 'his_rank', 'view', 'now_rank', 'coin',\n 'reply', 'aid', 'no_reprint', 'favorite', 'danmaku', 'copyright',\n 'share']\n df = df_base.join([df_tags, df_info])\n append_df_to_excel(filename, df, index=False)\n\n\ndef query_info(avid):\n stat_url = 'https://api.bilibili.com/x/web-interface/archive/stat?aid='\n id = avid[2:]\n url = stat_url + id\n response = request.urlopen(url)\n return response.read().decode('utf-8')\n\n\ndef query_tags(avid):\n stat_url = 'https://api.bilibili.com/x/tag/archive/tags?aid='\n id = avid[2:]\n url = stat_url + id\n response = request.urlopen(url)\n return response.read().decode('utf-8')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef parse_html(content):\n arr = []\n soup = BeautifulSoup(content)\n tag_list = soup.find_all('a', attrs={'title': True, 'href': True,\n 'class': 'title'})\n for tag in tag_list:\n title = tag.get('title')\n href = tag.get('href')[2:]\n avid = re.search('av([0-9]*)', href).group(0)\n base_dict[avid] = [avid, title, href]\n return base_dict.keys()\n\n\ndef read_path(path):\n path_set = set()\n dir_path = os.listdir(path)\n for item in dir_path:\n child = os.path.join('%s/%s' % (path, item))\n path_set.add(child)\n return path_set\n\n\ndef filter(path_set):\n filterable = []\n pattern = re.compile('.*\\\\.[html|htm]+', re.I)\n for path in path_set:\n m = pattern.match(path)\n if m:\n filterable.append(m.group(0).strip())\n return filterable\n\n\ndef read_html(path):\n file = open(path.encode('utf-8').strip(), 'r', encoding='utf-8')\n content = file.read()\n return content\n\n\ndef storeCSV(filename='/Users/robbin/Desktop/bilibili/bilibili.xlsx'):\n df_base = pd.DataFrame.from_dict(base_dict, orient='index')\n df_base.columns = ['avid', 'title', 'href']\n df_tags = pd.DataFrame.from_dict(tags_dict, orient='index')\n df_tags.columns = ['tags']\n df_info = pd.DataFrame.from_dict(info_dict, orient='index')\n df_info.columns = ['like', 'his_rank', 'view', 'now_rank', 'coin',\n 'reply', 'aid', 'no_reprint', 'favorite', 'danmaku', 'copyright',\n 'share']\n df = df_base.join([df_tags, df_info])\n append_df_to_excel(filename, df, index=False)\n\n\ndef query_info(avid):\n stat_url = 'https://api.bilibili.com/x/web-interface/archive/stat?aid='\n id = avid[2:]\n url = stat_url + id\n response = request.urlopen(url)\n return response.read().decode('utf-8')\n\n\ndef query_tags(avid):\n stat_url = 'https://api.bilibili.com/x/tag/archive/tags?aid='\n id = avid[2:]\n url = stat_url + id\n response = request.urlopen(url)\n return response.read().decode('utf-8')\n\n\nif __name__ == '__main__':\n print('now read folder...')\n path_set = read_path('/Users/robbin/Desktop/bilibili')\n print('parse file path finshed...')\n filterable = filter(path_set)\n for path in filterable:\n base_dict = {}\n tags_dict = {}\n info_dict = {}\n print('now parse the file:', path)\n content = read_html(path)\n avid_list = parse_html(content)\n for avid in avid_list:\n print('Proccessing:', avid)\n tags_json = query_tags(avid)\n tags_obj = json.loads(tags_json)\n tags_row_list = tags_obj.get('data')\n if tags_row_list:\n tag_list = []\n for item in tags_row_list:\n tag_name = item.get('tag_name')\n tag_list.append(tag_name)\n tag = ','.join(tag_list)\n tags_dict[avid] = tag\n info_json = query_info(avid)\n info_obj = json.loads(info_json)\n info_row_dict = info_obj.get('data')\n if info_row_dict:\n info_dict[avid] = list(info_row_dict.values())\n print('Start to writing ', path, ' to xls')\n storeCSV()\n print('End of writing ', path, ' to xls')\n",
"step-5": "from bs4 import BeautifulSoup\nimport os, re, json\nimport pandas as pd\nfrom urllib import request\nfrom openpyxl import load_workbook\nfrom bilibili.append_xlsx import append_df_to_excel\n\n\n# 获取页面的所有的avid, title, url\ndef parse_html(content):\n arr = []\n # 使用beautifulsoup解析html文档\n soup = BeautifulSoup(content)\n # 获取指定标签\n tag_list = soup.find_all(\"a\", attrs={'title': True, 'href': True, \"class\": \"title\"})\n # tag_list = soup.find_all(\"span\", attrs={'class': 'type avid'})\n for tag in tag_list:\n # print(tag.get(\"title\"), tag.get(\"href\"))\n # 获取标签内容,并去除首尾空格\n title = tag.get(\"title\")\n href = tag.get(\"href\")[2:]\n avid = re.search(\"av([0-9]*)\", href).group(0)\n base_dict[avid] = [avid, title, href]\n return base_dict.keys()\n\n# 读取路径文件名\ndef read_path(path):\n path_set = set()\n dir_path = os.listdir(path)\n for item in dir_path:\n child = os.path.join('%s/%s' % (path, item))\n path_set.add(child)\n return path_set\n\n# 提取html文件\ndef filter(path_set):\n filterable = []\n pattern = re.compile(r'.*\\.[html|htm]+', re.I)\n for path in path_set:\n m = pattern.match(path)\n if m:\n filterable.append(m.group(0).strip())\n return filterable\n\n# 读取文件内容\ndef read_html(path):\n file = open(path.encode('utf-8').strip(), 'r', encoding=\"utf-8\")\n content = file.read()\n return content\n\n# 写入csv\ndef storeCSV(filename=r'/Users/robbin/Desktop/bilibili/bilibili.xlsx'):\n df_base = pd.DataFrame.from_dict(base_dict, orient=\"index\")\n df_base.columns = ['avid', 'title', 'href']\n df_tags = pd.DataFrame.from_dict(tags_dict, orient=\"index\")\n df_tags.columns = ['tags']\n df_info = pd.DataFrame.from_dict(info_dict, orient='index')\n df_info.columns = ['like', 'his_rank', 'view', 'now_rank', 'coin', 'reply', 'aid', 'no_reprint', 'favorite', 'danmaku', 'copyright', 'share']\n df = df_base.join([df_tags, df_info])\n append_df_to_excel(filename, df, index=False)\n\n# 根据avid请求api获得视频信息\ndef query_info(avid):\n stat_url = \"https://api.bilibili.com/x/web-interface/archive/stat?aid=\"\n id = avid[2:]\n url = stat_url + id\n response = request.urlopen(url)\n return response.read().decode(\"utf-8\")\n\n# 根据avid请求api获得视频标签\ndef query_tags(avid):\n stat_url = \"https://api.bilibili.com/x/tag/archive/tags?aid=\"\n id = avid[2:]\n url = stat_url + id\n response = request.urlopen(url)\n return response.read().decode(\"utf-8\")\n\nif __name__ == '__main__':\n print(\"now read folder...\")\n path_set = read_path(\"/Users/robbin/Desktop/bilibili\")\n print(\"parse file path finshed...\")\n filterable = filter(path_set)\n\n for path in filterable:\n base_dict = {}\n tags_dict = {}\n info_dict = {}\n print(\"now parse the file:\", path)\n content = read_html(path)\n avid_list = parse_html(content)\n\n for avid in avid_list:\n print(\"Proccessing:\", avid)\n tags_json = query_tags(avid)\n tags_obj = json.loads(tags_json)\n tags_row_list = tags_obj.get(\"data\")\n if tags_row_list:\n # print(data)\n tag_list = []\n for item in tags_row_list:\n tag_name = item.get(\"tag_name\")\n tag_list.append(tag_name)\n tag = \",\".join(tag_list)\n tags_dict[avid] = tag\n\n info_json = query_info(avid)\n info_obj = json.loads(info_json)\n info_row_dict = info_obj.get(\"data\")\n if info_row_dict:\n info_dict[avid] = list(info_row_dict.values())\n print(\"Start to writing \", path, \" to xls\")\n storeCSV()\n print(\"End of writing \", path, \" to xls\")\n",
"step-ids": [
4,
5,
7,
8,
10
]
}
|
[
4,
5,
7,
8,
10
] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import bootcamp_utils
import numba
@numba.jit(nopython=True)
def backtrack_steps():
"""
Compute the number of steps it takes a 1d random walker starting
at zero to get to +1.
"""
# Initialize position and number of steps
x = 0
n_steps = 0
# Walk until we get to positive 1
while x < 1:
x += 2 * np.random.randint(0, 2) - 1
n_steps += 1
return n_steps
# Stepping time
tau = 0.5 # seconds
# Specify number of samples
n_samples = 10000
# Array of backtrack times
t_bt = np.empty(n_samples)
# Generate the samples
for i in range(n_samples):
t_bt[i] = backtrack_steps()
# Convert to seconds
t_bt *= tau
plt.figure(1)
_ = plt.hist(t_bt, bins=100, normed=True)
plt.xlabel('time (s)')
plt.ylabel('PDF')
def ecdf(data):
return np.sort(data), np.arange(1, len(data)+1) / len(data)
# Generate x, y values
x, y = ecdf(t_bt)
plt.figure(2)
# Plot CDF from random numbers
plt.semilogx(x, y, '.', markersize=10)
# Clean up plot
plt.margins(y=0.02)
plt.xlabel('time (s)')
plt.ylabel('ECDF')
plt.figure(3)
# Plot the CCDF
plt.loglog(x, 1 - y, '.')
# Plot the asymptotic power law
t_smooth = np.logspace(0.5, 8, 100)
plt.loglog(t_smooth, 1 / np.sqrt(t_smooth))
# Label axes
plt.xlabel('time (s)')
plt.ylabel('CCDF')
plt.show()
|
normal
|
{
"blob_id": "00a2992af78f9edadd3f4cbc7d073c1f74fcd9a2",
"index": 2810,
"step-1": "<mask token>\n\n\n@numba.jit(nopython=True)\ndef backtrack_steps():\n \"\"\"\n Compute the number of steps it takes a 1d random walker starting\n at zero to get to +1.\n \"\"\"\n x = 0\n n_steps = 0\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n return n_steps\n\n\n<mask token>\n\n\ndef ecdf(data):\n return np.sort(data), np.arange(1, len(data) + 1) / len(data)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsns.set()\n<mask token>\n\n\n@numba.jit(nopython=True)\ndef backtrack_steps():\n \"\"\"\n Compute the number of steps it takes a 1d random walker starting\n at zero to get to +1.\n \"\"\"\n x = 0\n n_steps = 0\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n return n_steps\n\n\n<mask token>\nfor i in range(n_samples):\n t_bt[i] = backtrack_steps()\nt_bt *= tau\nplt.figure(1)\n<mask token>\nplt.xlabel('time (s)')\nplt.ylabel('PDF')\n\n\ndef ecdf(data):\n return np.sort(data), np.arange(1, len(data) + 1) / len(data)\n\n\n<mask token>\nplt.figure(2)\nplt.semilogx(x, y, '.', markersize=10)\nplt.margins(y=0.02)\nplt.xlabel('time (s)')\nplt.ylabel('ECDF')\nplt.figure(3)\nplt.loglog(x, 1 - y, '.')\n<mask token>\nplt.loglog(t_smooth, 1 / np.sqrt(t_smooth))\nplt.xlabel('time (s)')\nplt.ylabel('CCDF')\nplt.show()\n",
"step-3": "<mask token>\nsns.set()\n<mask token>\n\n\n@numba.jit(nopython=True)\ndef backtrack_steps():\n \"\"\"\n Compute the number of steps it takes a 1d random walker starting\n at zero to get to +1.\n \"\"\"\n x = 0\n n_steps = 0\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n return n_steps\n\n\ntau = 0.5\nn_samples = 10000\nt_bt = np.empty(n_samples)\nfor i in range(n_samples):\n t_bt[i] = backtrack_steps()\nt_bt *= tau\nplt.figure(1)\n_ = plt.hist(t_bt, bins=100, normed=True)\nplt.xlabel('time (s)')\nplt.ylabel('PDF')\n\n\ndef ecdf(data):\n return np.sort(data), np.arange(1, len(data) + 1) / len(data)\n\n\nx, y = ecdf(t_bt)\nplt.figure(2)\nplt.semilogx(x, y, '.', markersize=10)\nplt.margins(y=0.02)\nplt.xlabel('time (s)')\nplt.ylabel('ECDF')\nplt.figure(3)\nplt.loglog(x, 1 - y, '.')\nt_smooth = np.logspace(0.5, 8, 100)\nplt.loglog(t_smooth, 1 / np.sqrt(t_smooth))\nplt.xlabel('time (s)')\nplt.ylabel('CCDF')\nplt.show()\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nimport bootcamp_utils\nimport numba\n\n\n@numba.jit(nopython=True)\ndef backtrack_steps():\n \"\"\"\n Compute the number of steps it takes a 1d random walker starting\n at zero to get to +1.\n \"\"\"\n x = 0\n n_steps = 0\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n return n_steps\n\n\ntau = 0.5\nn_samples = 10000\nt_bt = np.empty(n_samples)\nfor i in range(n_samples):\n t_bt[i] = backtrack_steps()\nt_bt *= tau\nplt.figure(1)\n_ = plt.hist(t_bt, bins=100, normed=True)\nplt.xlabel('time (s)')\nplt.ylabel('PDF')\n\n\ndef ecdf(data):\n return np.sort(data), np.arange(1, len(data) + 1) / len(data)\n\n\nx, y = ecdf(t_bt)\nplt.figure(2)\nplt.semilogx(x, y, '.', markersize=10)\nplt.margins(y=0.02)\nplt.xlabel('time (s)')\nplt.ylabel('ECDF')\nplt.figure(3)\nplt.loglog(x, 1 - y, '.')\nt_smooth = np.logspace(0.5, 8, 100)\nplt.loglog(t_smooth, 1 / np.sqrt(t_smooth))\nplt.xlabel('time (s)')\nplt.ylabel('CCDF')\nplt.show()\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nimport bootcamp_utils\nimport numba\n\n\n\n@numba.jit(nopython=True)\ndef backtrack_steps():\n \"\"\"\n Compute the number of steps it takes a 1d random walker starting\n at zero to get to +1.\n \"\"\"\n\n # Initialize position and number of steps\n x = 0\n n_steps = 0\n\n # Walk until we get to positive 1\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n\n return n_steps\n\n\n# Stepping time\ntau = 0.5 # seconds\n\n# Specify number of samples\nn_samples = 10000\n\n# Array of backtrack times\nt_bt = np.empty(n_samples)\n\n# Generate the samples\nfor i in range(n_samples):\n t_bt[i] = backtrack_steps()\n\n# Convert to seconds\nt_bt *= tau\n\nplt.figure(1)\n_ = plt.hist(t_bt, bins=100, normed=True)\nplt.xlabel('time (s)')\nplt.ylabel('PDF')\n\n\ndef ecdf(data):\n return np.sort(data), np.arange(1, len(data)+1) / len(data)\n\n# Generate x, y values\nx, y = ecdf(t_bt)\n\nplt.figure(2)\n# Plot CDF from random numbers\nplt.semilogx(x, y, '.', markersize=10)\n\n# Clean up plot\nplt.margins(y=0.02)\nplt.xlabel('time (s)')\nplt.ylabel('ECDF')\n\n\nplt.figure(3)\n# Plot the CCDF\nplt.loglog(x, 1 - y, '.')\n\n# Plot the asymptotic power law\nt_smooth = np.logspace(0.5, 8, 100)\nplt.loglog(t_smooth, 1 / np.sqrt(t_smooth))\n\n# Label axes\nplt.xlabel('time (s)')\nplt.ylabel('CCDF')\n\nplt.show()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class CollectionMixin(with_metaclass(ABCMeta, object)):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CollectionMixin(with_metaclass(ABCMeta, object)):
<|reserved_special_token_0|>
def iterate(self, window_size=10, **filter_fields):
current_offset = None
while True:
response = self.list(size=window_size, offset=current_offset,
**filter_fields)
for item in response['data']:
yield item
next_url = response.get('next', None)
if next_url is None:
return
current_offset = parse_query_parameters(next_url).get('offset')[0]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CollectionMixin(with_metaclass(ABCMeta, object)):
@abstractmethod
def list(self, size=100, offset=None, **filter_fields):
"""
:param size: A limit on the number of objects to be returned.
:type size: int
:param offset: A cursor used for pagination. offset is an object identifier that defines a place in the list.
:type offset: uuid.UUID
:param filter_fields: Dictionary containing values to filter for
:type filter_fields: dict
:rtype: dict
:return: Dictionary containing dictionaries
"""
def iterate(self, window_size=10, **filter_fields):
current_offset = None
while True:
response = self.list(size=window_size, offset=current_offset,
**filter_fields)
for item in response['data']:
yield item
next_url = response.get('next', None)
if next_url is None:
return
current_offset = parse_query_parameters(next_url).get('offset')[0]
<|reserved_special_token_1|>
from __future__ import unicode_literals, print_function
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from .utils import parse_query_parameters
class CollectionMixin(with_metaclass(ABCMeta, object)):
@abstractmethod
def list(self, size=100, offset=None, **filter_fields):
"""
:param size: A limit on the number of objects to be returned.
:type size: int
:param offset: A cursor used for pagination. offset is an object identifier that defines a place in the list.
:type offset: uuid.UUID
:param filter_fields: Dictionary containing values to filter for
:type filter_fields: dict
:rtype: dict
:return: Dictionary containing dictionaries
"""
def iterate(self, window_size=10, **filter_fields):
current_offset = None
while True:
response = self.list(size=window_size, offset=current_offset,
**filter_fields)
for item in response['data']:
yield item
next_url = response.get('next', None)
if next_url is None:
return
current_offset = parse_query_parameters(next_url).get('offset')[0]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from .utils import parse_query_parameters
class CollectionMixin(with_metaclass(ABCMeta, object)):
@abstractmethod
def list(self, size=100, offset=None, **filter_fields):
"""
:param size: A limit on the number of objects to be returned.
:type size: int
:param offset: A cursor used for pagination. offset is an object identifier that defines a place in the list.
:type offset: uuid.UUID
:param filter_fields: Dictionary containing values to filter for
:type filter_fields: dict
:rtype: dict
:return: Dictionary containing dictionaries
"""
def iterate(self, window_size=10, **filter_fields):
current_offset = None
while True:
response = self.list(size=window_size, offset=current_offset, **filter_fields)
for item in response['data']:
yield item
next_url = response.get('next', None)
if next_url is None:
return
current_offset = parse_query_parameters(next_url).get('offset')[0]
|
flexible
|
{
"blob_id": "b63ed9e09b9e8c539aff765d719f3610283663fe",
"index": 4496,
"step-1": "<mask token>\n\n\nclass CollectionMixin(with_metaclass(ABCMeta, object)):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CollectionMixin(with_metaclass(ABCMeta, object)):\n <mask token>\n\n def iterate(self, window_size=10, **filter_fields):\n current_offset = None\n while True:\n response = self.list(size=window_size, offset=current_offset,\n **filter_fields)\n for item in response['data']:\n yield item\n next_url = response.get('next', None)\n if next_url is None:\n return\n current_offset = parse_query_parameters(next_url).get('offset')[0]\n",
"step-3": "<mask token>\n\n\nclass CollectionMixin(with_metaclass(ABCMeta, object)):\n\n @abstractmethod\n def list(self, size=100, offset=None, **filter_fields):\n \"\"\"\n :param size: A limit on the number of objects to be returned.\n :type size: int\n :param offset: A cursor used for pagination. offset is an object identifier that defines a place in the list.\n :type offset: uuid.UUID\n :param filter_fields: Dictionary containing values to filter for\n :type filter_fields: dict\n :rtype: dict\n :return: Dictionary containing dictionaries\n \"\"\"\n\n def iterate(self, window_size=10, **filter_fields):\n current_offset = None\n while True:\n response = self.list(size=window_size, offset=current_offset,\n **filter_fields)\n for item in response['data']:\n yield item\n next_url = response.get('next', None)\n if next_url is None:\n return\n current_offset = parse_query_parameters(next_url).get('offset')[0]\n",
"step-4": "from __future__ import unicode_literals, print_function\nfrom abc import ABCMeta, abstractmethod\nfrom six import with_metaclass\nfrom .utils import parse_query_parameters\n\n\nclass CollectionMixin(with_metaclass(ABCMeta, object)):\n\n @abstractmethod\n def list(self, size=100, offset=None, **filter_fields):\n \"\"\"\n :param size: A limit on the number of objects to be returned.\n :type size: int\n :param offset: A cursor used for pagination. offset is an object identifier that defines a place in the list.\n :type offset: uuid.UUID\n :param filter_fields: Dictionary containing values to filter for\n :type filter_fields: dict\n :rtype: dict\n :return: Dictionary containing dictionaries\n \"\"\"\n\n def iterate(self, window_size=10, **filter_fields):\n current_offset = None\n while True:\n response = self.list(size=window_size, offset=current_offset,\n **filter_fields)\n for item in response['data']:\n yield item\n next_url = response.get('next', None)\n if next_url is None:\n return\n current_offset = parse_query_parameters(next_url).get('offset')[0]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import with_metaclass\n\nfrom .utils import parse_query_parameters\n\n\nclass CollectionMixin(with_metaclass(ABCMeta, object)):\n @abstractmethod\n def list(self, size=100, offset=None, **filter_fields):\n \"\"\"\n :param size: A limit on the number of objects to be returned.\n :type size: int\n :param offset: A cursor used for pagination. offset is an object identifier that defines a place in the list.\n :type offset: uuid.UUID\n :param filter_fields: Dictionary containing values to filter for\n :type filter_fields: dict\n :rtype: dict\n :return: Dictionary containing dictionaries\n \"\"\"\n\n def iterate(self, window_size=10, **filter_fields):\n current_offset = None\n while True:\n response = self.list(size=window_size, offset=current_offset, **filter_fields)\n for item in response['data']:\n yield item\n next_url = response.get('next', None)\n if next_url is None:\n return\n current_offset = parse_query_parameters(next_url).get('offset')[0]\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RelationshipPrediction(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __unicode__(self):
return u'%s <- %s: %f, %s' % (self.subject, self.object_, self.
expectancy, self.explanation)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RelationshipPrediction(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __unicode__(self):
return u'%s <- %s: %f, %s' % (self.subject, self.object_, self.
expectancy, self.explanation)
def __repr__(self):
return '< %s >' % str(self.__unicode__())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RelationshipPrediction(object):
<|reserved_special_token_0|>
def __init__(self, subject, object_, expectancy, is_uncertain,
explanation=''):
"""The initializer"""
self.subject = subject
"""The subject"""
self.object_ = object_
"""The object"""
self.expectancy = expectancy
"""The estimated probability of the predicted_relationship
occuring between the subject and the object.
"""
self.is_uncertain = is_uncertain
"""Is the prediction made without having any information available?"""
self.explanation = explanation
"""The explanation for the prediction"""
def __unicode__(self):
return u'%s <- %s: %f, %s' % (self.subject, self.object_, self.
expectancy, self.explanation)
def __repr__(self):
return '< %s >' % str(self.__unicode__())
<|reserved_special_token_1|>
"""The prediction classes. Instances of the class are returned by
the recommender.
"""
class RelationshipPrediction(object):
"""The prediction of the predicted_relationship appearing between
the given subject-object pair.
@type subject: the domain-specific subject
@ivar subject: the subject
@type object_: the domain-specific object
@ivar object_: the object
@type expectancy: float
@ivar expectancy: the estimated probability of the predict_relationship
occuring between the subject and the object
@type explanation: str
@ivar explanation: the explanation for the prediction
"""
def __init__(self, subject, object_, expectancy, is_uncertain, explanation=''):
"""The initializer"""
self.subject = subject
"""The subject"""
self.object_ = object_
"""The object"""
self.expectancy = expectancy
"""The estimated probability of the predicted_relationship
occuring between the subject and the object.
"""
self.is_uncertain = is_uncertain
"""Is the prediction made without having any information available?"""
self.explanation = explanation
"""The explanation for the prediction"""
def __unicode__(self):
return u"%s <- %s: %f, %s" % (
self.subject,
self.object_,
self.expectancy,
self.explanation
)
def __repr__(self):
return "< %s >" % str(self.__unicode__())
|
flexible
|
{
"blob_id": "c3de9e6129bcafd863cd330ac281345fb563cc8c",
"index": 6259,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass RelationshipPrediction(object):\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return u'%s <- %s: %f, %s' % (self.subject, self.object_, self.\n expectancy, self.explanation)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass RelationshipPrediction(object):\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return u'%s <- %s: %f, %s' % (self.subject, self.object_, self.\n expectancy, self.explanation)\n\n def __repr__(self):\n return '< %s >' % str(self.__unicode__())\n",
"step-4": "<mask token>\n\n\nclass RelationshipPrediction(object):\n <mask token>\n\n def __init__(self, subject, object_, expectancy, is_uncertain,\n explanation=''):\n \"\"\"The initializer\"\"\"\n self.subject = subject\n \"\"\"The subject\"\"\"\n self.object_ = object_\n \"\"\"The object\"\"\"\n self.expectancy = expectancy\n \"\"\"The estimated probability of the predicted_relationship\n occuring between the subject and the object.\n \"\"\"\n self.is_uncertain = is_uncertain\n \"\"\"Is the prediction made without having any information available?\"\"\"\n self.explanation = explanation\n \"\"\"The explanation for the prediction\"\"\"\n\n def __unicode__(self):\n return u'%s <- %s: %f, %s' % (self.subject, self.object_, self.\n expectancy, self.explanation)\n\n def __repr__(self):\n return '< %s >' % str(self.__unicode__())\n",
"step-5": "\"\"\"The prediction classes. Instances of the class are returned by \nthe recommender.\n\"\"\"\n\nclass RelationshipPrediction(object):\n \"\"\"The prediction of the predicted_relationship appearing between\n the given subject-object pair.\n \n @type subject: the domain-specific subject\n @ivar subject: the subject \n \n @type object_: the domain-specific object\n @ivar object_: the object\n \n @type expectancy: float\n @ivar expectancy: the estimated probability of the predict_relationship\n occuring between the subject and the object\n \n @type explanation: str\n @ivar explanation: the explanation for the prediction \n \"\"\"\n \n def __init__(self, subject, object_, expectancy, is_uncertain, explanation=''):\n \"\"\"The initializer\"\"\"\n \n self.subject = subject\n \"\"\"The subject\"\"\"\n \n self.object_ = object_\n \"\"\"The object\"\"\"\n \n self.expectancy = expectancy\n \"\"\"The estimated probability of the predicted_relationship\n occuring between the subject and the object.\n \"\"\"\n \n self.is_uncertain = is_uncertain\n \"\"\"Is the prediction made without having any information available?\"\"\"\n \n self.explanation = explanation\n \"\"\"The explanation for the prediction\"\"\"\n\n def __unicode__(self):\n return u\"%s <- %s: %f, %s\" % (\n self.subject, \n self.object_, \n self.expectancy, \n self.explanation\n )\n \n def __repr__(self):\n return \"< %s >\" % str(self.__unicode__())\n",
"step-ids": [
0,
2,
3,
4,
6
]
}
|
[
0,
2,
3,
4,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
with open('id_generator.bin', 'rb') as f:
print(pickle.load(f))
<|reserved_special_token_1|>
import pickle
if __name__ == '__main__':
with open('id_generator.bin', 'rb') as f:
print(pickle.load(f))
|
flexible
|
{
"blob_id": "080110e404cf5edfe53622a5942b53f9188ddd76",
"index": 1854,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n with open('id_generator.bin', 'rb') as f:\n print(pickle.load(f))\n",
"step-3": "import pickle\nif __name__ == '__main__':\n with open('id_generator.bin', 'rb') as f:\n print(pickle.load(f))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
Python package setup file.
"""
from setuptools import setup
setup(
name="TF_Speech",
version="0.2.0",
extras_require={'tensorflow': ['tensorflow'],
'tensorflow with gpu': ['tensorflow-gpu']},
)
|
normal
|
{
"blob_id": "97ebdeada3d797a971b5c3851b75f9754595f67c",
"index": 358,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='TF_Speech', version='0.2.0', extras_require={'tensorflow': [\n 'tensorflow'], 'tensorflow with gpu': ['tensorflow-gpu']})\n",
"step-3": "<mask token>\nfrom setuptools import setup\nsetup(name='TF_Speech', version='0.2.0', extras_require={'tensorflow': [\n 'tensorflow'], 'tensorflow with gpu': ['tensorflow-gpu']})\n",
"step-4": "\"\"\"\nPython package setup file.\n\"\"\"\n\nfrom setuptools import setup\n\nsetup(\n name=\"TF_Speech\",\n version=\"0.2.0\",\n extras_require={'tensorflow': ['tensorflow'],\n 'tensorflow with gpu': ['tensorflow-gpu']},\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# __author__: Stanley
# date: 2018/10/22
class Foo:
def __init__(self, name, age):
self.name = name
self.age = age
def __getitem__(self, item):
return item + 10
def __setitem__(self, key, value):
print(key, value)
def __delitem__(self, key):
print(key)
obj = Foo("stnley", 25)
# 自动执行obj对象的类中的__getitem__方法。555当作参数传递
result = obj[555]
print(result)
obj[111] = 444
del obj[222]
|
normal
|
{
"blob_id": "d4b9403366a16dfbb12a2161a996e641b3a785a5",
"index": 8027,
"step-1": "class Foo:\n <mask token>\n <mask token>\n\n def __setitem__(self, key, value):\n print(key, value)\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Foo:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n <mask token>\n\n def __setitem__(self, key, value):\n print(key, value)\n\n def __delitem__(self, key):\n print(key)\n\n\n<mask token>\n",
"step-3": "class Foo:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __getitem__(self, item):\n return item + 10\n\n def __setitem__(self, key, value):\n print(key, value)\n\n def __delitem__(self, key):\n print(key)\n\n\n<mask token>\nprint(result)\n<mask token>\ndel obj[222]\n",
"step-4": "class Foo:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __getitem__(self, item):\n return item + 10\n\n def __setitem__(self, key, value):\n print(key, value)\n\n def __delitem__(self, key):\n print(key)\n\n\nobj = Foo('stnley', 25)\nresult = obj[555]\nprint(result)\nobj[111] = 444\ndel obj[222]\n",
"step-5": "# __author__: Stanley\n# date: 2018/10/22\n\nclass Foo:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __getitem__(self, item):\n return item + 10\n\n def __setitem__(self, key, value):\n print(key, value)\n\n def __delitem__(self, key):\n print(key)\n\n\nobj = Foo(\"stnley\", 25)\n# 自动执行obj对象的类中的__getitem__方法。555当作参数传递\nresult = obj[555]\nprint(result)\nobj[111] = 444\ndel obj[222]\n\n",
"step-ids": [
2,
4,
6,
7,
8
]
}
|
[
2,
4,
6,
7,
8
] |
# # 8/19/2020
# Of course, binary classification is just a single special case. Target encoding could be applied to any target variable type:
# For binary classification usually mean target encoding is used
# For regression mean could be changed to median, quartiles, etc.
# For multi-class classification with N classes we create N features with target mean for each category in one vs. all fashion
# The mean_target_encoding() function you've created could be used for any target type specified above. Let's apply it for the regression problem on the example of House Prices Kaggle competition.
# Your goal is to encode a categorical feature "RoofStyle" using mean target encoding. The train and test DataFrames are already available in your workspace.
# Create mean target encoded feature
train['RoofStyle_enc'], test['RoofStyle_enc'] = mean_target_encoding(train=train,
test=test,
target='SalePrice',
categorical='RoofStyle',
alpha=10)
# Look at the encoding
print(test[['RoofStyle', 'RoofStyle_enc']].drop_duplicates())
# <script.py> output:
# RoofStyle RoofStyle_enc
# 0 Gable 171565.947836
# 1 Hip 217594.645131
# 98 Gambrel 164152.950424
# 133 Flat 188703.563431
# 362 Mansard 180775.938759
# 1053 Shed 188267.663242
|
normal
|
{
"blob_id": "5433e75bdc46d5a975969e7ece799174dc9b8713",
"index": 2918,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(test[['RoofStyle', 'RoofStyle_enc']].drop_duplicates())\n",
"step-3": "train['RoofStyle_enc'], test['RoofStyle_enc'] = mean_target_encoding(train=\n train, test=test, target='SalePrice', categorical='RoofStyle', alpha=10)\nprint(test[['RoofStyle', 'RoofStyle_enc']].drop_duplicates())\n",
"step-4": "# # 8/19/2020\n# Of course, binary classification is just a single special case. Target encoding could be applied to any target variable type:\n\n# For binary classification usually mean target encoding is used\n# For regression mean could be changed to median, quartiles, etc.\n# For multi-class classification with N classes we create N features with target mean for each category in one vs. all fashion\n# The mean_target_encoding() function you've created could be used for any target type specified above. Let's apply it for the regression problem on the example of House Prices Kaggle competition.\n\n# Your goal is to encode a categorical feature \"RoofStyle\" using mean target encoding. The train and test DataFrames are already available in your workspace.\n\n# Create mean target encoded feature\ntrain['RoofStyle_enc'], test['RoofStyle_enc'] = mean_target_encoding(train=train,\n test=test,\n target='SalePrice',\n categorical='RoofStyle',\n alpha=10)\n\n# Look at the encoding\nprint(test[['RoofStyle', 'RoofStyle_enc']].drop_duplicates())\n\n# <script.py> output:\n# RoofStyle RoofStyle_enc\n# 0 Gable 171565.947836\n# 1 Hip 217594.645131\n# 98 Gambrel 164152.950424\n# 133 Flat 188703.563431\n# 362 Mansard 180775.938759\n# 1053 Shed 188267.663242\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class RwidSpider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def parse(self, response):
data = {'username': 'user', 'password': 'user12345'}
return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata
=data, callback=self.after_login)
<|reserved_special_token_0|>
def parse_detail(self, response):
yield {'title': response.css('title::text').get()}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RwidSpider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def parse(self, response):
data = {'username': 'user', 'password': 'user12345'}
return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata
=data, callback=self.after_login)
def after_login(self, response):
"""
Ada 2 Task disini :
1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)
2. Ambil semua link next -> akan balik ke self.after_login
:param response:
:return:
"""
detail_products: List[Selector] = response.css('.card .card-title a')
for detail in detail_products:
href = detail.attrib.get('href')
yield response.follow(href, callback=self.parse_detail)
yield {'title': response.css('title::text').get()}
def parse_detail(self, response):
yield {'title': response.css('title::text').get()}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RwidSpider(scrapy.Spider):
name = 'rwid'
allowed_domains = ['0.0.0.0']
start_urls = ['http://0.0.0.0:9999/']
def parse(self, response):
data = {'username': 'user', 'password': 'user12345'}
return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata
=data, callback=self.after_login)
def after_login(self, response):
"""
Ada 2 Task disini :
1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)
2. Ambil semua link next -> akan balik ke self.after_login
:param response:
:return:
"""
detail_products: List[Selector] = response.css('.card .card-title a')
for detail in detail_products:
href = detail.attrib.get('href')
yield response.follow(href, callback=self.parse_detail)
yield {'title': response.css('title::text').get()}
def parse_detail(self, response):
yield {'title': response.css('title::text').get()}
<|reserved_special_token_1|>
from typing import List
import scrapy
from cssselect import Selector
class RwidSpider(scrapy.Spider):
name = 'rwid'
allowed_domains = ['0.0.0.0']
start_urls = ['http://0.0.0.0:9999/']
def parse(self, response):
data = {'username': 'user', 'password': 'user12345'}
return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata
=data, callback=self.after_login)
def after_login(self, response):
"""
Ada 2 Task disini :
1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)
2. Ambil semua link next -> akan balik ke self.after_login
:param response:
:return:
"""
detail_products: List[Selector] = response.css('.card .card-title a')
for detail in detail_products:
href = detail.attrib.get('href')
yield response.follow(href, callback=self.parse_detail)
yield {'title': response.css('title::text').get()}
def parse_detail(self, response):
yield {'title': response.css('title::text').get()}
<|reserved_special_token_1|>
from typing import List
import scrapy
from cssselect import Selector
class RwidSpider(scrapy.Spider):
name = 'rwid'
allowed_domains = ['0.0.0.0']
# REQUEST LOGIN DARI URLS
start_urls = ['http://0.0.0.0:9999/']
# LOGIN DISINI
def parse(self, response):
# apa bedanya yield & return
# yield {"title": response.css("title::text").get()}
# cek di inspect element perlu login tidak?
data = {
"username": "user",
"password": "user12345"
}
# cek di FormRequest butuhnya apa aja
return scrapy.FormRequest(
url="http://0.0.0.0:9999/login",
formdata=data,
callback=self.after_login # untuk mengektraksi data
)
def after_login(self, response):
"""
Ada 2 Task disini :
1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)
2. Ambil semua link next -> akan balik ke self.after_login
:param response:
:return:
"""
# get detail product
detail_products: List[Selector] = response.css(".card .card-title a")
for detail in detail_products:
href = detail.attrib.get("href") # untuk mendapatkan urls
yield response.follow(href, callback=self.parse_detail) # masukkan urls ini ke antrian scrapy
yield {"title": response.css("title::text").get()}
def parse_detail(self, response):
yield {"title": response.css("title::text").get()}
|
flexible
|
{
"blob_id": "2185d332f7cd4cbf17d6b72a19297d156c2182a1",
"index": 2233,
"step-1": "<mask token>\n\n\nclass RwidSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n data = {'username': 'user', 'password': 'user12345'}\n return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata\n =data, callback=self.after_login)\n <mask token>\n\n def parse_detail(self, response):\n yield {'title': response.css('title::text').get()}\n",
"step-2": "<mask token>\n\n\nclass RwidSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n data = {'username': 'user', 'password': 'user12345'}\n return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata\n =data, callback=self.after_login)\n\n def after_login(self, response):\n \"\"\"\n Ada 2 Task disini :\n 1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)\n 2. Ambil semua link next -> akan balik ke self.after_login\n\n :param response:\n :return:\n \"\"\"\n detail_products: List[Selector] = response.css('.card .card-title a')\n for detail in detail_products:\n href = detail.attrib.get('href')\n yield response.follow(href, callback=self.parse_detail)\n yield {'title': response.css('title::text').get()}\n\n def parse_detail(self, response):\n yield {'title': response.css('title::text').get()}\n",
"step-3": "<mask token>\n\n\nclass RwidSpider(scrapy.Spider):\n name = 'rwid'\n allowed_domains = ['0.0.0.0']\n start_urls = ['http://0.0.0.0:9999/']\n\n def parse(self, response):\n data = {'username': 'user', 'password': 'user12345'}\n return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata\n =data, callback=self.after_login)\n\n def after_login(self, response):\n \"\"\"\n Ada 2 Task disini :\n 1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)\n 2. Ambil semua link next -> akan balik ke self.after_login\n\n :param response:\n :return:\n \"\"\"\n detail_products: List[Selector] = response.css('.card .card-title a')\n for detail in detail_products:\n href = detail.attrib.get('href')\n yield response.follow(href, callback=self.parse_detail)\n yield {'title': response.css('title::text').get()}\n\n def parse_detail(self, response):\n yield {'title': response.css('title::text').get()}\n",
"step-4": "from typing import List\nimport scrapy\nfrom cssselect import Selector\n\n\nclass RwidSpider(scrapy.Spider):\n name = 'rwid'\n allowed_domains = ['0.0.0.0']\n start_urls = ['http://0.0.0.0:9999/']\n\n def parse(self, response):\n data = {'username': 'user', 'password': 'user12345'}\n return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata\n =data, callback=self.after_login)\n\n def after_login(self, response):\n \"\"\"\n Ada 2 Task disini :\n 1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)\n 2. Ambil semua link next -> akan balik ke self.after_login\n\n :param response:\n :return:\n \"\"\"\n detail_products: List[Selector] = response.css('.card .card-title a')\n for detail in detail_products:\n href = detail.attrib.get('href')\n yield response.follow(href, callback=self.parse_detail)\n yield {'title': response.css('title::text').get()}\n\n def parse_detail(self, response):\n yield {'title': response.css('title::text').get()}\n",
"step-5": "from typing import List\n\nimport scrapy\nfrom cssselect import Selector\n\nclass RwidSpider(scrapy.Spider):\n name = 'rwid'\n allowed_domains = ['0.0.0.0']\n\n # REQUEST LOGIN DARI URLS\n start_urls = ['http://0.0.0.0:9999/']\n\n # LOGIN DISINI\n def parse(self, response):\n # apa bedanya yield & return\n # yield {\"title\": response.css(\"title::text\").get()}\n\n # cek di inspect element perlu login tidak?\n\n data = {\n \"username\": \"user\",\n \"password\": \"user12345\"\n }\n\n # cek di FormRequest butuhnya apa aja\n return scrapy.FormRequest(\n url=\"http://0.0.0.0:9999/login\",\n formdata=data,\n callback=self.after_login # untuk mengektraksi data\n )\n\n def after_login(self, response):\n \"\"\"\n Ada 2 Task disini :\n 1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)\n 2. Ambil semua link next -> akan balik ke self.after_login\n\n :param response:\n :return:\n \"\"\"\n\n # get detail product\n detail_products: List[Selector] = response.css(\".card .card-title a\")\n for detail in detail_products:\n href = detail.attrib.get(\"href\") # untuk mendapatkan urls\n yield response.follow(href, callback=self.parse_detail) # masukkan urls ini ke antrian scrapy\n\n yield {\"title\": response.css(\"title::text\").get()}\n\n def parse_detail(self, response):\n yield {\"title\": response.css(\"title::text\").get()}\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
os.chdir(common.root)
shutil.rmtree('shared/target', ignore_errors=True)
shutil.rmtree('platform/build', ignore_errors=True)
shutil.rmtree('platform/target', ignore_errors=True)
shutil.rmtree('tests/target', ignore_errors=True)
shutil.rmtree('examples/lwjgl/target', ignore_errors=True)
shutil.rmtree('examples/kwinit/target', ignore_errors=True)
shutil.rmtree('examples/jwm/target', ignore_errors=True)
shutil.rmtree('examples/swt/target', ignore_errors=True)
return 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
os.chdir(common.root)
shutil.rmtree('shared/target', ignore_errors=True)
shutil.rmtree('platform/build', ignore_errors=True)
shutil.rmtree('platform/target', ignore_errors=True)
shutil.rmtree('tests/target', ignore_errors=True)
shutil.rmtree('examples/lwjgl/target', ignore_errors=True)
shutil.rmtree('examples/kwinit/target', ignore_errors=True)
shutil.rmtree('examples/jwm/target', ignore_errors=True)
shutil.rmtree('examples/swt/target', ignore_errors=True)
return 0
if __name__ == '__main__':
sys.exit(main())
<|reserved_special_token_1|>
import common, os, shutil, sys
def main():
os.chdir(common.root)
shutil.rmtree('shared/target', ignore_errors=True)
shutil.rmtree('platform/build', ignore_errors=True)
shutil.rmtree('platform/target', ignore_errors=True)
shutil.rmtree('tests/target', ignore_errors=True)
shutil.rmtree('examples/lwjgl/target', ignore_errors=True)
shutil.rmtree('examples/kwinit/target', ignore_errors=True)
shutil.rmtree('examples/jwm/target', ignore_errors=True)
shutil.rmtree('examples/swt/target', ignore_errors=True)
return 0
if __name__ == '__main__':
sys.exit(main())
<|reserved_special_token_1|>
#! /usr/bin/env python3
import common, os, shutil, sys
def main():
os.chdir(common.root)
shutil.rmtree('shared/target', ignore_errors = True)
shutil.rmtree('platform/build', ignore_errors = True)
shutil.rmtree('platform/target', ignore_errors = True)
shutil.rmtree('tests/target', ignore_errors = True)
shutil.rmtree('examples/lwjgl/target', ignore_errors = True)
shutil.rmtree('examples/kwinit/target', ignore_errors = True)
shutil.rmtree('examples/jwm/target', ignore_errors = True)
shutil.rmtree('examples/swt/target', ignore_errors = True)
return 0
if __name__ == '__main__':
sys.exit(main())
|
flexible
|
{
"blob_id": "2305d0b7ec0d9e08e3f1c0cedaafa6ed60786e50",
"index": 7359,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n os.chdir(common.root)\n shutil.rmtree('shared/target', ignore_errors=True)\n shutil.rmtree('platform/build', ignore_errors=True)\n shutil.rmtree('platform/target', ignore_errors=True)\n shutil.rmtree('tests/target', ignore_errors=True)\n shutil.rmtree('examples/lwjgl/target', ignore_errors=True)\n shutil.rmtree('examples/kwinit/target', ignore_errors=True)\n shutil.rmtree('examples/jwm/target', ignore_errors=True)\n shutil.rmtree('examples/swt/target', ignore_errors=True)\n return 0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n os.chdir(common.root)\n shutil.rmtree('shared/target', ignore_errors=True)\n shutil.rmtree('platform/build', ignore_errors=True)\n shutil.rmtree('platform/target', ignore_errors=True)\n shutil.rmtree('tests/target', ignore_errors=True)\n shutil.rmtree('examples/lwjgl/target', ignore_errors=True)\n shutil.rmtree('examples/kwinit/target', ignore_errors=True)\n shutil.rmtree('examples/jwm/target', ignore_errors=True)\n shutil.rmtree('examples/swt/target', ignore_errors=True)\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-4": "import common, os, shutil, sys\n\n\ndef main():\n os.chdir(common.root)\n shutil.rmtree('shared/target', ignore_errors=True)\n shutil.rmtree('platform/build', ignore_errors=True)\n shutil.rmtree('platform/target', ignore_errors=True)\n shutil.rmtree('tests/target', ignore_errors=True)\n shutil.rmtree('examples/lwjgl/target', ignore_errors=True)\n shutil.rmtree('examples/kwinit/target', ignore_errors=True)\n shutil.rmtree('examples/jwm/target', ignore_errors=True)\n shutil.rmtree('examples/swt/target', ignore_errors=True)\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-5": "#! /usr/bin/env python3\nimport common, os, shutil, sys\n\ndef main():\n os.chdir(common.root)\n shutil.rmtree('shared/target', ignore_errors = True)\n shutil.rmtree('platform/build', ignore_errors = True)\n shutil.rmtree('platform/target', ignore_errors = True)\n shutil.rmtree('tests/target', ignore_errors = True)\n shutil.rmtree('examples/lwjgl/target', ignore_errors = True)\n shutil.rmtree('examples/kwinit/target', ignore_errors = True)\n shutil.rmtree('examples/jwm/target', ignore_errors = True)\n shutil.rmtree('examples/swt/target', ignore_errors = True)\n\n return 0\n\nif __name__ == '__main__':\n sys.exit(main())",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import tkinter as tk
import random
root = tk.Tk()
main_frame = tk.Frame(root)
var = tk.StringVar()
ch = [ "hello world" , "HI Pyton", "Mar Java", "Mit Java", "Lut Java" ]
var.set("Hello world I am a Label")
label = tk.Label(main_frame,textvariable=var,
bg="black",fg="white",font=("Times New Roman",24,"bold"))
label.pack()
def change_label():
var.set(random.choice(ch))
b1 = tk.Button(main_frame,text="click",command=change_label,
font=("Arial",15,'bold'),bg="pink",fg="red")
b1.pack()
expr = tk.StringVar()
e1 = tk.Entry(root,textvariable=expr,font=("Arial",20,'bold'),
bg='gray',fg='white')
main_frame.pack()
button = tk.Button(root,text="!!EXIT!!",command=root.destroy,
font=("Arial",15,'bold'),bg="pink",fg="red")
button.pack()
def slove():
expr.set(eval(expr.get()))
result_button= tk.Button(root,text="!!Result!!",command=slove,
font=("Arial",15,'bold'),bg="pink",fg="red")
def clear():
expr.set("")
clr_button= tk.Button(root,text="!!clear!!",command=clear,
font=("Arial",15,'bold'),bg="pink",fg="red")
e1.pack()
result_button.pack()
clr_button.pack(anchor='sw')
root.title("My Appliction")
root.wm_minsize(400,400)
root.wm_maxsize(500,500)
root.geometry("+500+200")
root.mainloop()
|
normal
|
{
"blob_id": "33938a28aad29e996255827825a0cdb1db6b70b7",
"index": 5842,
"step-1": "<mask token>\n\n\ndef change_label():\n var.set(random.choice(ch))\n\n\n<mask token>\n\n\ndef slove():\n expr.set(eval(expr.get()))\n\n\n<mask token>\n\n\ndef clear():\n expr.set('')\n\n\n<mask token>\n",
"step-2": "<mask token>\nvar.set('Hello world I am a Label')\n<mask token>\nlabel.pack()\n\n\ndef change_label():\n var.set(random.choice(ch))\n\n\n<mask token>\nb1.pack()\n<mask token>\nmain_frame.pack()\n<mask token>\nbutton.pack()\n\n\ndef slove():\n expr.set(eval(expr.get()))\n\n\n<mask token>\n\n\ndef clear():\n expr.set('')\n\n\n<mask token>\ne1.pack()\nresult_button.pack()\nclr_button.pack(anchor='sw')\nroot.title('My Appliction')\nroot.wm_minsize(400, 400)\nroot.wm_maxsize(500, 500)\nroot.geometry('+500+200')\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = tk.Tk()\nmain_frame = tk.Frame(root)\nvar = tk.StringVar()\nch = ['hello world', 'HI Pyton', 'Mar Java', 'Mit Java', 'Lut Java']\nvar.set('Hello world I am a Label')\nlabel = tk.Label(main_frame, textvariable=var, bg='black', fg='white', font\n =('Times New Roman', 24, 'bold'))\nlabel.pack()\n\n\ndef change_label():\n var.set(random.choice(ch))\n\n\nb1 = tk.Button(main_frame, text='click', command=change_label, font=(\n 'Arial', 15, 'bold'), bg='pink', fg='red')\nb1.pack()\nexpr = tk.StringVar()\ne1 = tk.Entry(root, textvariable=expr, font=('Arial', 20, 'bold'), bg=\n 'gray', fg='white')\nmain_frame.pack()\nbutton = tk.Button(root, text='!!EXIT!!', command=root.destroy, font=(\n 'Arial', 15, 'bold'), bg='pink', fg='red')\nbutton.pack()\n\n\ndef slove():\n expr.set(eval(expr.get()))\n\n\nresult_button = tk.Button(root, text='!!Result!!', command=slove, font=(\n 'Arial', 15, 'bold'), bg='pink', fg='red')\n\n\ndef clear():\n expr.set('')\n\n\nclr_button = tk.Button(root, text='!!clear!!', command=clear, font=('Arial',\n 15, 'bold'), bg='pink', fg='red')\ne1.pack()\nresult_button.pack()\nclr_button.pack(anchor='sw')\nroot.title('My Appliction')\nroot.wm_minsize(400, 400)\nroot.wm_maxsize(500, 500)\nroot.geometry('+500+200')\nroot.mainloop()\n",
"step-4": "import tkinter as tk\nimport random\nroot = tk.Tk()\nmain_frame = tk.Frame(root)\nvar = tk.StringVar()\nch = ['hello world', 'HI Pyton', 'Mar Java', 'Mit Java', 'Lut Java']\nvar.set('Hello world I am a Label')\nlabel = tk.Label(main_frame, textvariable=var, bg='black', fg='white', font\n =('Times New Roman', 24, 'bold'))\nlabel.pack()\n\n\ndef change_label():\n var.set(random.choice(ch))\n\n\nb1 = tk.Button(main_frame, text='click', command=change_label, font=(\n 'Arial', 15, 'bold'), bg='pink', fg='red')\nb1.pack()\nexpr = tk.StringVar()\ne1 = tk.Entry(root, textvariable=expr, font=('Arial', 20, 'bold'), bg=\n 'gray', fg='white')\nmain_frame.pack()\nbutton = tk.Button(root, text='!!EXIT!!', command=root.destroy, font=(\n 'Arial', 15, 'bold'), bg='pink', fg='red')\nbutton.pack()\n\n\ndef slove():\n expr.set(eval(expr.get()))\n\n\nresult_button = tk.Button(root, text='!!Result!!', command=slove, font=(\n 'Arial', 15, 'bold'), bg='pink', fg='red')\n\n\ndef clear():\n expr.set('')\n\n\nclr_button = tk.Button(root, text='!!clear!!', command=clear, font=('Arial',\n 15, 'bold'), bg='pink', fg='red')\ne1.pack()\nresult_button.pack()\nclr_button.pack(anchor='sw')\nroot.title('My Appliction')\nroot.wm_minsize(400, 400)\nroot.wm_maxsize(500, 500)\nroot.geometry('+500+200')\nroot.mainloop()\n",
"step-5": "import tkinter as tk \nimport random\nroot = tk.Tk()\nmain_frame = tk.Frame(root)\nvar = tk.StringVar()\nch = [ \"hello world\" , \"HI Pyton\", \"Mar Java\", \"Mit Java\", \"Lut Java\" ]\nvar.set(\"Hello world I am a Label\")\nlabel = tk.Label(main_frame,textvariable=var,\n bg=\"black\",fg=\"white\",font=(\"Times New Roman\",24,\"bold\"))\nlabel.pack()\ndef change_label():\n var.set(random.choice(ch))\nb1 = tk.Button(main_frame,text=\"click\",command=change_label,\n font=(\"Arial\",15,'bold'),bg=\"pink\",fg=\"red\")\n\nb1.pack()\n\nexpr = tk.StringVar()\ne1 = tk.Entry(root,textvariable=expr,font=(\"Arial\",20,'bold'),\n bg='gray',fg='white')\n\nmain_frame.pack()\n\nbutton = tk.Button(root,text=\"!!EXIT!!\",command=root.destroy,\n font=(\"Arial\",15,'bold'),bg=\"pink\",fg=\"red\")\nbutton.pack()\ndef slove():\n expr.set(eval(expr.get()))\nresult_button= tk.Button(root,text=\"!!Result!!\",command=slove,\n font=(\"Arial\",15,'bold'),bg=\"pink\",fg=\"red\")\ndef clear():\n expr.set(\"\")\nclr_button= tk.Button(root,text=\"!!clear!!\",command=clear,\n font=(\"Arial\",15,'bold'),bg=\"pink\",fg=\"red\")\ne1.pack()\nresult_button.pack()\nclr_button.pack(anchor='sw')\nroot.title(\"My Appliction\")\nroot.wm_minsize(400,400)\nroot.wm_maxsize(500,500)\nroot.geometry(\"+500+200\")\nroot.mainloop()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import copy
import os
from datetime import datetime
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')
DATA_FILE = os.path.join(ROOT_DIR, 'data/data_train.csv')
TRAINING_FILE_NAME = os.path.join(
ROOT_DIR, 'data/trainingIndices.csv')
VALIDATION_FILE_NAME = os.path.join(
ROOT_DIR, 'data/validationIndices.csv')
VALIDATION_MASK_FILE_NAME = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_mask.csv')
AUX = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_first.csv')
META_VALIDATION_FILE_NAME = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_second.csv')
SAMPLE_SUBMISSION = os.path.join(ROOT_DIR, \
'data/sampleSubmission.csv')
ENSEMBLE_INPUT_DIR = 'data/stacking/good_data'
ITEM_COUNT = 1000
USER_COUNT = 10000
WEIGHT_KNN = 0.001
N_NEIGHBORS = 3
USER_COUNT_WEIGHT = 10
SAVE_META_PREDICTIONS = False
def load_ratings(data_file=DATA_FILE):
ratings = []
with open(data_file, 'r') as file:
# Read header.
_ = file.readline()
for line in file:
key, value_string = line.split(",")
rating = float(value_string)
row_string, col_string = key.split("_")
row = int(row_string[1:])
col = int(col_string[1:])
ratings.append((row - 1, col - 1, rating))
return ratings
def ratings_to_matrix(ratings):
matrix_rows = USER_COUNT
matrix_cols = ITEM_COUNT
matrix = np.zeros([matrix_rows, matrix_cols])
for row, col, rating in ratings:
matrix[row, col] = rating
return matrix
def mask_validation(data, use_three_way):
masked_data = np.copy(data)
if use_three_way:
mask_file = VALIDATION_MASK_FILE_NAME
else:
mask_file = VALIDATION_FILE_NAME
mask_indices = get_indices_from_file(mask_file)
for row_index, col_index in mask_indices:
masked_data[row_index][col_index] = 0
return masked_data
def get_validation_indices(use_three_way):
if use_three_way:
validation_indices = get_indices_from_file(AUX)
else:
validation_indices = get_indices_from_file(VALIDATION_FILE_NAME)
return validation_indices
def get_meta_validation_indices():
return get_indices_from_file(META_VALIDATION_FILE_NAME)
def get_observed_indices(data):
row_indices, col_indices = np.where(data != 0)
return list(zip(row_indices, col_indices))
def get_unobserved_indices(data):
row_indices, col_indices = np.where(data == 0)
return list(zip(row_indices, col_indices))
def get_indices_from_file(file_name):
indices = []
with open(file_name, 'r') as file:
# Read header.
_ = file.readline()
for line in file:
i, j = line.split(",")
indices.append((int(i), int(j)))
return indices
def get_indices_to_predict():
"""Get list of indices to predict from sample submission file.
Returns:
indices_to_predict: list of tuples with indices"""
indices_to_predict = []
with open(SAMPLE_SUBMISSION, 'r') as file:
_ = file.readline()
for line in file:
key, _ = line.split(",")
row_string, col_string = key.split("_")
i = int(row_string[1:]) - 1
j = int(col_string[1:]) - 1
indices_to_predict.append((i, j))
return indices_to_predict
def write_ratings(predictions, submission_file):
with open(submission_file, 'w') as file:
file.write('Id,Prediction\n')
for i, j, prediction in predictions:
file.write('r%d_c%d,%f\n' % (i, j, prediction))
def reconstruction_to_predictions(
reconstruction, submission_file, indices_to_predict=None):
if indices_to_predict is None:
indices_to_predict = get_indices_to_predict()
enumerate_predictions = lambda t: (
t[0] + 1, t[1] + 1, reconstruction[t[0], t[1]])
predictions = list(map(enumerate_predictions, indices_to_predict))
write_ratings(predictions, submission_file)
def save_ensembling_predictions(reconstruction, name):
reconstruction_to_predictions(
reconstruction, ROOT_DIR + 'data/meta_training_' + name + '_stacking'
+ datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',
indices_to_predict=get_validation_indices(use_three_way=True))
reconstruction_to_predictions(
reconstruction, ROOT_DIR + 'data/meta_validation_' + name + '_stacking'
+ datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',
indices_to_predict=get_meta_validation_indices())
def clip(data):
data[data > 5] = 5
data[data < 1] = 1
return data
def ampute_reconstruction(reconstruction, data):
observed_indices = get_observed_indices(data)
for row_index, col_index in observed_indices:
reconstruction[row_index][col_index] = data[row_index][col_index]
def impute_by_avg(data, by_row):
data = data.T if by_row else data
for row in data:
empty = (row == 0)
row_sum = np.sum(row)
row[empty] = row_sum / np.count_nonzero(row)
return data.T if by_row else data
def impute_by_bias(data):
total_average = np.mean(data[np.nonzero(data)])
row_biases = np.zeros(data.shape[0])
col_biases = np.zeros(data.shape[1])
for row_index in range(data.shape[0]):
row_biases[row_index] = np.sum(data[row_index]) / \
np.count_nonzero(data[row_index]) - total_average
for col_index in range(data.shape[1]):
col_biases[col_index] = np.sum(data[:][col_index]) / \
np.count_nonzero(data[:][col_index]) - total_average
for row_index in range(data.shape[0]):
for col_index in range(data.shape[1]):
if data[row_index, col_index] == 0:
new_value = total_average + \
row_biases[row_index] + col_biases[col_index]
data[row_index, col_index] = new_value
return data
def impute_by_variance(data):
global_average = np.sum(data) / np.count_nonzero(data)
global_variance = np.var(data[data != 0])
adjusted_movie_means = np.zeros((data.shape[1],))
for i in range(data.shape[1]):
movie_ratings = data[:, i]
movie_ratings = movie_ratings[movie_ratings != 0]
movie_variance = np.var(movie_ratings)
relative_variance = movie_variance / global_variance
adjusted_movie_means[i] = (
global_average * relative_variance + np.sum(movie_ratings)) / (
relative_variance + np.count_nonzero(movie_ratings))
adjusted_user_deviation = np.zeros((data.shape[0],))
for i in range(data.shape[0]):
user_ratings = data[i]
user_deviations = adjusted_movie_means - user_ratings
user_deviations = user_deviations[user_ratings != 0]
user_deviation_variance = np.var(user_deviations)
relative_variance = user_deviation_variance / global_variance
adjusted_user_deviation[i] = (
global_average * relative_variance + sum(user_deviations)) / (
relative_variance + np.count_nonzero(user_deviations))
user_counts = np.count_nonzero(data, axis=1)
movie_counts = np.count_nonzero(data, axis=0)
movie_count_matrix = np.tile(movie_counts, (len(user_counts), 1))
user_count_matrix = np.tile(user_counts, (len(movie_counts), 1)).T
combined_matrix = copy.copy(
movie_count_matrix) + USER_COUNT_WEIGHT * copy.copy(user_count_matrix)
d_matrix = np.divide(movie_count_matrix, combined_matrix)
m_matrix = np.tile(
adjusted_movie_means, (len(adjusted_user_deviation), 1))
u_matrix = np.tile(
adjusted_user_deviation, (len(adjusted_movie_means), 1)).T
data = np.multiply(m_matrix, d_matrix) + \
np.multiply(u_matrix, np.ones(d_matrix.shape) - d_matrix)
return data
def compute_rmse(data, prediction, indices=None):
if indices is None:
indices = get_indices_from_file(VALIDATION_FILE_NAME)
squared_error = 0
for i, j in indices:
squared_error += (data[i][j] - prediction[i][j]) ** 2
return np.sqrt(squared_error / len(indices))
def knn_smoothing(reconstruction, user_embeddings):
normalized_user_embeddings = normalize(user_embeddings)
knn = NearestNeighbors(n_neighbors=N_NEIGHBORS + 1)
knn.fit(normalized_user_embeddings)
distances, neighbors = knn.kneighbors(normalized_user_embeddings)
distances = distances[:, 1:]
neighbors = neighbors[:, 1:]
ones = np.ones(distances.shape)
similarities = ones - distances
weights = np.square(np.square(similarities))
smoothed_data = np.zeros(reconstruction.shape)
aggregated_neighbor_ratings = np.zeros(reconstruction.shape)
for i in range(reconstruction.shape[0]):
stacked_ratings = []
for neighbor in neighbors[i]:
stacked_ratings.append(reconstruction[neighbor])
stacked_ratings = np.asarray(stacked_ratings)
aggregated_neighbor_ratings[i] =\
np.matmul(weights[i], stacked_ratings) / sum(weights[i])
for i in range(reconstruction.shape[0]):
smoothed_data[i] = (1 - WEIGHT_KNN) * reconstruction[i] + WEIGHT_KNN *\
aggregated_neighbor_ratings[i]
smoothed_data = clip(smoothed_data)
return smoothed_data
def load_predictions_from_files(file_prefix='submission_'):
path = os.path.join(ROOT_DIR, ENSEMBLE_INPUT_DIR)
files = [os.path.join(path, i) for i in os.listdir(path) if \
os.path.isfile(os.path.join(path, i)) and file_prefix in i]
all_ratings = []
for file in files:
print("loading {}".format(file))
ratings = load_ratings(file)
ratings = ratings_to_matrix(ratings)
all_ratings.append(ratings)
return all_ratings
def compute_mean_predictions(all_ratings):
reconstruction = np.mean(np.array(all_ratings), axis=0)
reconstruction = impute_by_avg(reconstruction, by_row=False)
return reconstruction
|
normal
|
{
"blob_id": "af1eab58fd641b14ac054fa26e28d52c9741fb16",
"index": 7675,
"step-1": "<mask token>\n\n\ndef ratings_to_matrix(ratings):\n matrix_rows = USER_COUNT\n matrix_cols = ITEM_COUNT\n matrix = np.zeros([matrix_rows, matrix_cols])\n for row, col, rating in ratings:\n matrix[row, col] = rating\n return matrix\n\n\ndef mask_validation(data, use_three_way):\n masked_data = np.copy(data)\n if use_three_way:\n mask_file = VALIDATION_MASK_FILE_NAME\n else:\n mask_file = VALIDATION_FILE_NAME\n mask_indices = get_indices_from_file(mask_file)\n for row_index, col_index in mask_indices:\n masked_data[row_index][col_index] = 0\n return masked_data\n\n\ndef get_validation_indices(use_three_way):\n if use_three_way:\n validation_indices = get_indices_from_file(AUX)\n else:\n validation_indices = get_indices_from_file(VALIDATION_FILE_NAME)\n return validation_indices\n\n\ndef get_meta_validation_indices():\n return get_indices_from_file(META_VALIDATION_FILE_NAME)\n\n\ndef get_observed_indices(data):\n row_indices, col_indices = np.where(data != 0)\n return list(zip(row_indices, col_indices))\n\n\ndef get_unobserved_indices(data):\n row_indices, col_indices = np.where(data == 0)\n return list(zip(row_indices, col_indices))\n\n\ndef get_indices_from_file(file_name):\n indices = []\n with open(file_name, 'r') as file:\n _ = file.readline()\n for line in file:\n i, j = line.split(',')\n indices.append((int(i), int(j)))\n return indices\n\n\n<mask token>\n\n\ndef write_ratings(predictions, submission_file):\n with open(submission_file, 'w') as file:\n file.write('Id,Prediction\\n')\n for i, j, prediction in predictions:\n file.write('r%d_c%d,%f\\n' % (i, j, prediction))\n\n\n<mask token>\n\n\ndef save_ensembling_predictions(reconstruction, name):\n reconstruction_to_predictions(reconstruction, ROOT_DIR +\n 'data/meta_training_' + name + '_stacking' + datetime.now().\n strftime('%Y-%b-%d-%H-%M-%S') + '.csv', indices_to_predict=\n get_validation_indices(use_three_way=True))\n reconstruction_to_predictions(reconstruction, ROOT_DIR +\n 'data/meta_validation_' + name + '_stacking' + datetime.now().\n strftime('%Y-%b-%d-%H-%M-%S') + '.csv', indices_to_predict=\n get_meta_validation_indices())\n\n\ndef clip(data):\n data[data > 5] = 5\n data[data < 1] = 1\n return data\n\n\n<mask token>\n\n\ndef impute_by_avg(data, by_row):\n data = data.T if by_row else data\n for row in data:\n empty = row == 0\n row_sum = np.sum(row)\n row[empty] = row_sum / np.count_nonzero(row)\n return data.T if by_row else data\n\n\ndef impute_by_bias(data):\n total_average = np.mean(data[np.nonzero(data)])\n row_biases = np.zeros(data.shape[0])\n col_biases = np.zeros(data.shape[1])\n for row_index in range(data.shape[0]):\n row_biases[row_index] = np.sum(data[row_index]) / np.count_nonzero(data\n [row_index]) - total_average\n for col_index in range(data.shape[1]):\n col_biases[col_index] = np.sum(data[:][col_index]) / np.count_nonzero(\n data[:][col_index]) - total_average\n for row_index in range(data.shape[0]):\n for col_index in range(data.shape[1]):\n if data[row_index, col_index] == 0:\n new_value = total_average + row_biases[row_index] + col_biases[\n col_index]\n data[row_index, col_index] = new_value\n return data\n\n\ndef impute_by_variance(data):\n global_average = np.sum(data) / np.count_nonzero(data)\n global_variance = np.var(data[data != 0])\n adjusted_movie_means = np.zeros((data.shape[1],))\n for i in range(data.shape[1]):\n movie_ratings = data[:, i]\n movie_ratings = movie_ratings[movie_ratings != 0]\n movie_variance = np.var(movie_ratings)\n relative_variance = movie_variance / global_variance\n adjusted_movie_means[i] = (global_average * relative_variance + np.\n sum(movie_ratings)) / (relative_variance + np.count_nonzero(\n movie_ratings))\n adjusted_user_deviation = np.zeros((data.shape[0],))\n for i in range(data.shape[0]):\n user_ratings = data[i]\n user_deviations = adjusted_movie_means - user_ratings\n user_deviations = user_deviations[user_ratings != 0]\n user_deviation_variance = np.var(user_deviations)\n relative_variance = user_deviation_variance / global_variance\n adjusted_user_deviation[i] = (global_average * relative_variance +\n sum(user_deviations)) / (relative_variance + np.count_nonzero(\n user_deviations))\n user_counts = np.count_nonzero(data, axis=1)\n movie_counts = np.count_nonzero(data, axis=0)\n movie_count_matrix = np.tile(movie_counts, (len(user_counts), 1))\n user_count_matrix = np.tile(user_counts, (len(movie_counts), 1)).T\n combined_matrix = copy.copy(movie_count_matrix\n ) + USER_COUNT_WEIGHT * copy.copy(user_count_matrix)\n d_matrix = np.divide(movie_count_matrix, combined_matrix)\n m_matrix = np.tile(adjusted_movie_means, (len(adjusted_user_deviation), 1))\n u_matrix = np.tile(adjusted_user_deviation, (len(adjusted_movie_means), 1)\n ).T\n data = np.multiply(m_matrix, d_matrix) + np.multiply(u_matrix, np.ones(\n d_matrix.shape) - d_matrix)\n return data\n\n\ndef compute_rmse(data, prediction, indices=None):\n if indices is None:\n indices = get_indices_from_file(VALIDATION_FILE_NAME)\n squared_error = 0\n for i, j in indices:\n squared_error += (data[i][j] - prediction[i][j]) ** 2\n return np.sqrt(squared_error / len(indices))\n\n\ndef knn_smoothing(reconstruction, user_embeddings):\n normalized_user_embeddings = normalize(user_embeddings)\n knn = NearestNeighbors(n_neighbors=N_NEIGHBORS + 1)\n knn.fit(normalized_user_embeddings)\n distances, neighbors = knn.kneighbors(normalized_user_embeddings)\n distances = distances[:, 1:]\n neighbors = neighbors[:, 1:]\n ones = np.ones(distances.shape)\n similarities = ones - distances\n weights = np.square(np.square(similarities))\n smoothed_data = np.zeros(reconstruction.shape)\n aggregated_neighbor_ratings = np.zeros(reconstruction.shape)\n for i in range(reconstruction.shape[0]):\n stacked_ratings = []\n for neighbor in neighbors[i]:\n stacked_ratings.append(reconstruction[neighbor])\n stacked_ratings = np.asarray(stacked_ratings)\n aggregated_neighbor_ratings[i] = np.matmul(weights[i], stacked_ratings\n ) / sum(weights[i])\n for i in range(reconstruction.shape[0]):\n smoothed_data[i] = (1 - WEIGHT_KNN) * reconstruction[i\n ] + WEIGHT_KNN * aggregated_neighbor_ratings[i]\n smoothed_data = clip(smoothed_data)\n return smoothed_data\n\n\ndef load_predictions_from_files(file_prefix='submission_'):\n path = os.path.join(ROOT_DIR, ENSEMBLE_INPUT_DIR)\n files = [os.path.join(path, i) for i in os.listdir(path) if os.path.\n isfile(os.path.join(path, i)) and file_prefix in i]\n all_ratings = []\n for file in files:\n print('loading {}'.format(file))\n ratings = load_ratings(file)\n ratings = ratings_to_matrix(ratings)\n all_ratings.append(ratings)\n return all_ratings\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_ratings(data_file=DATA_FILE):\n ratings = []\n with open(data_file, 'r') as file:\n _ = file.readline()\n for line in file:\n key, value_string = line.split(',')\n rating = float(value_string)\n row_string, col_string = key.split('_')\n row = int(row_string[1:])\n col = int(col_string[1:])\n ratings.append((row - 1, col - 1, rating))\n return ratings\n\n\ndef ratings_to_matrix(ratings):\n matrix_rows = USER_COUNT\n matrix_cols = ITEM_COUNT\n matrix = np.zeros([matrix_rows, matrix_cols])\n for row, col, rating in ratings:\n matrix[row, col] = rating\n return matrix\n\n\ndef mask_validation(data, use_three_way):\n masked_data = np.copy(data)\n if use_three_way:\n mask_file = VALIDATION_MASK_FILE_NAME\n else:\n mask_file = VALIDATION_FILE_NAME\n mask_indices = get_indices_from_file(mask_file)\n for row_index, col_index in mask_indices:\n masked_data[row_index][col_index] = 0\n return masked_data\n\n\ndef get_validation_indices(use_three_way):\n if use_three_way:\n validation_indices = get_indices_from_file(AUX)\n else:\n validation_indices = get_indices_from_file(VALIDATION_FILE_NAME)\n return validation_indices\n\n\ndef get_meta_validation_indices():\n return get_indices_from_file(META_VALIDATION_FILE_NAME)\n\n\ndef get_observed_indices(data):\n row_indices, col_indices = np.where(data != 0)\n return list(zip(row_indices, col_indices))\n\n\ndef get_unobserved_indices(data):\n row_indices, col_indices = np.where(data == 0)\n return list(zip(row_indices, col_indices))\n\n\ndef get_indices_from_file(file_name):\n indices = []\n with open(file_name, 'r') as file:\n _ = file.readline()\n for line in file:\n i, j = line.split(',')\n indices.append((int(i), int(j)))\n return indices\n\n\n<mask token>\n\n\ndef write_ratings(predictions, submission_file):\n with open(submission_file, 'w') as file:\n file.write('Id,Prediction\\n')\n for i, j, prediction in predictions:\n file.write('r%d_c%d,%f\\n' % (i, j, prediction))\n\n\ndef reconstruction_to_predictions(reconstruction, submission_file,\n indices_to_predict=None):\n if indices_to_predict is None:\n indices_to_predict = get_indices_to_predict()\n enumerate_predictions = lambda t: (t[0] + 1, t[1] + 1, reconstruction[t\n [0], t[1]])\n predictions = list(map(enumerate_predictions, indices_to_predict))\n write_ratings(predictions, submission_file)\n\n\ndef save_ensembling_predictions(reconstruction, name):\n reconstruction_to_predictions(reconstruction, ROOT_DIR +\n 'data/meta_training_' + name + '_stacking' + datetime.now().\n strftime('%Y-%b-%d-%H-%M-%S') + '.csv', indices_to_predict=\n get_validation_indices(use_three_way=True))\n reconstruction_to_predictions(reconstruction, ROOT_DIR +\n 'data/meta_validation_' + name + '_stacking' + datetime.now().\n strftime('%Y-%b-%d-%H-%M-%S') + '.csv', indices_to_predict=\n get_meta_validation_indices())\n\n\ndef clip(data):\n data[data > 5] = 5\n data[data < 1] = 1\n return data\n\n\n<mask token>\n\n\ndef impute_by_avg(data, by_row):\n data = data.T if by_row else data\n for row in data:\n empty = row == 0\n row_sum = np.sum(row)\n row[empty] = row_sum / np.count_nonzero(row)\n return data.T if by_row else data\n\n\ndef impute_by_bias(data):\n total_average = np.mean(data[np.nonzero(data)])\n row_biases = np.zeros(data.shape[0])\n col_biases = np.zeros(data.shape[1])\n for row_index in range(data.shape[0]):\n row_biases[row_index] = np.sum(data[row_index]) / np.count_nonzero(data\n [row_index]) - total_average\n for col_index in range(data.shape[1]):\n col_biases[col_index] = np.sum(data[:][col_index]) / np.count_nonzero(\n data[:][col_index]) - total_average\n for row_index in range(data.shape[0]):\n for col_index in range(data.shape[1]):\n if data[row_index, col_index] == 0:\n new_value = total_average + row_biases[row_index] + col_biases[\n col_index]\n data[row_index, col_index] = new_value\n return data\n\n\ndef impute_by_variance(data):\n global_average = np.sum(data) / np.count_nonzero(data)\n global_variance = np.var(data[data != 0])\n adjusted_movie_means = np.zeros((data.shape[1],))\n for i in range(data.shape[1]):\n movie_ratings = data[:, i]\n movie_ratings = movie_ratings[movie_ratings != 0]\n movie_variance = np.var(movie_ratings)\n relative_variance = movie_variance / global_variance\n adjusted_movie_means[i] = (global_average * relative_variance + np.\n sum(movie_ratings)) / (relative_variance + np.count_nonzero(\n movie_ratings))\n adjusted_user_deviation = np.zeros((data.shape[0],))\n for i in range(data.shape[0]):\n user_ratings = data[i]\n user_deviations = adjusted_movie_means - user_ratings\n user_deviations = user_deviations[user_ratings != 0]\n user_deviation_variance = np.var(user_deviations)\n relative_variance = user_deviation_variance / global_variance\n adjusted_user_deviation[i] = (global_average * relative_variance +\n sum(user_deviations)) / (relative_variance + np.count_nonzero(\n user_deviations))\n user_counts = np.count_nonzero(data, axis=1)\n movie_counts = np.count_nonzero(data, axis=0)\n movie_count_matrix = np.tile(movie_counts, (len(user_counts), 1))\n user_count_matrix = np.tile(user_counts, (len(movie_counts), 1)).T\n combined_matrix = copy.copy(movie_count_matrix\n ) + USER_COUNT_WEIGHT * copy.copy(user_count_matrix)\n d_matrix = np.divide(movie_count_matrix, combined_matrix)\n m_matrix = np.tile(adjusted_movie_means, (len(adjusted_user_deviation), 1))\n u_matrix = np.tile(adjusted_user_deviation, (len(adjusted_movie_means), 1)\n ).T\n data = np.multiply(m_matrix, d_matrix) + np.multiply(u_matrix, np.ones(\n d_matrix.shape) - d_matrix)\n return data\n\n\ndef compute_rmse(data, prediction, indices=None):\n if indices is None:\n indices = get_indices_from_file(VALIDATION_FILE_NAME)\n squared_error = 0\n for i, j in indices:\n squared_error += (data[i][j] - prediction[i][j]) ** 2\n return np.sqrt(squared_error / len(indices))\n\n\ndef knn_smoothing(reconstruction, user_embeddings):\n normalized_user_embeddings = normalize(user_embeddings)\n knn = NearestNeighbors(n_neighbors=N_NEIGHBORS + 1)\n knn.fit(normalized_user_embeddings)\n distances, neighbors = knn.kneighbors(normalized_user_embeddings)\n distances = distances[:, 1:]\n neighbors = neighbors[:, 1:]\n ones = np.ones(distances.shape)\n similarities = ones - distances\n weights = np.square(np.square(similarities))\n smoothed_data = np.zeros(reconstruction.shape)\n aggregated_neighbor_ratings = np.zeros(reconstruction.shape)\n for i in range(reconstruction.shape[0]):\n stacked_ratings = []\n for neighbor in neighbors[i]:\n stacked_ratings.append(reconstruction[neighbor])\n stacked_ratings = np.asarray(stacked_ratings)\n aggregated_neighbor_ratings[i] = np.matmul(weights[i], stacked_ratings\n ) / sum(weights[i])\n for i in range(reconstruction.shape[0]):\n smoothed_data[i] = (1 - WEIGHT_KNN) * reconstruction[i\n ] + WEIGHT_KNN * aggregated_neighbor_ratings[i]\n smoothed_data = clip(smoothed_data)\n return smoothed_data\n\n\ndef load_predictions_from_files(file_prefix='submission_'):\n path = os.path.join(ROOT_DIR, ENSEMBLE_INPUT_DIR)\n files = [os.path.join(path, i) for i in os.listdir(path) if os.path.\n isfile(os.path.join(path, i)) and file_prefix in i]\n all_ratings = []\n for file in files:\n print('loading {}'.format(file))\n ratings = load_ratings(file)\n ratings = ratings_to_matrix(ratings)\n all_ratings.append(ratings)\n return all_ratings\n\n\ndef compute_mean_predictions(all_ratings):\n reconstruction = np.mean(np.array(all_ratings), axis=0)\n reconstruction = impute_by_avg(reconstruction, by_row=False)\n return reconstruction\n",
"step-3": "<mask token>\n\n\ndef load_ratings(data_file=DATA_FILE):\n ratings = []\n with open(data_file, 'r') as file:\n _ = file.readline()\n for line in file:\n key, value_string = line.split(',')\n rating = float(value_string)\n row_string, col_string = key.split('_')\n row = int(row_string[1:])\n col = int(col_string[1:])\n ratings.append((row - 1, col - 1, rating))\n return ratings\n\n\ndef ratings_to_matrix(ratings):\n matrix_rows = USER_COUNT\n matrix_cols = ITEM_COUNT\n matrix = np.zeros([matrix_rows, matrix_cols])\n for row, col, rating in ratings:\n matrix[row, col] = rating\n return matrix\n\n\ndef mask_validation(data, use_three_way):\n masked_data = np.copy(data)\n if use_three_way:\n mask_file = VALIDATION_MASK_FILE_NAME\n else:\n mask_file = VALIDATION_FILE_NAME\n mask_indices = get_indices_from_file(mask_file)\n for row_index, col_index in mask_indices:\n masked_data[row_index][col_index] = 0\n return masked_data\n\n\ndef get_validation_indices(use_three_way):\n if use_three_way:\n validation_indices = get_indices_from_file(AUX)\n else:\n validation_indices = get_indices_from_file(VALIDATION_FILE_NAME)\n return validation_indices\n\n\ndef get_meta_validation_indices():\n return get_indices_from_file(META_VALIDATION_FILE_NAME)\n\n\ndef get_observed_indices(data):\n row_indices, col_indices = np.where(data != 0)\n return list(zip(row_indices, col_indices))\n\n\ndef get_unobserved_indices(data):\n row_indices, col_indices = np.where(data == 0)\n return list(zip(row_indices, col_indices))\n\n\ndef get_indices_from_file(file_name):\n indices = []\n with open(file_name, 'r') as file:\n _ = file.readline()\n for line in file:\n i, j = line.split(',')\n indices.append((int(i), int(j)))\n return indices\n\n\ndef get_indices_to_predict():\n \"\"\"Get list of indices to predict from sample submission file.\n Returns:\n indices_to_predict: list of tuples with indices\"\"\"\n indices_to_predict = []\n with open(SAMPLE_SUBMISSION, 'r') as file:\n _ = file.readline()\n for line in file:\n key, _ = line.split(',')\n row_string, col_string = key.split('_')\n i = int(row_string[1:]) - 1\n j = int(col_string[1:]) - 1\n indices_to_predict.append((i, j))\n return indices_to_predict\n\n\ndef write_ratings(predictions, submission_file):\n with open(submission_file, 'w') as file:\n file.write('Id,Prediction\\n')\n for i, j, prediction in predictions:\n file.write('r%d_c%d,%f\\n' % (i, j, prediction))\n\n\ndef reconstruction_to_predictions(reconstruction, submission_file,\n indices_to_predict=None):\n if indices_to_predict is None:\n indices_to_predict = get_indices_to_predict()\n enumerate_predictions = lambda t: (t[0] + 1, t[1] + 1, reconstruction[t\n [0], t[1]])\n predictions = list(map(enumerate_predictions, indices_to_predict))\n write_ratings(predictions, submission_file)\n\n\ndef save_ensembling_predictions(reconstruction, name):\n reconstruction_to_predictions(reconstruction, ROOT_DIR +\n 'data/meta_training_' + name + '_stacking' + datetime.now().\n strftime('%Y-%b-%d-%H-%M-%S') + '.csv', indices_to_predict=\n get_validation_indices(use_three_way=True))\n reconstruction_to_predictions(reconstruction, ROOT_DIR +\n 'data/meta_validation_' + name + '_stacking' + datetime.now().\n strftime('%Y-%b-%d-%H-%M-%S') + '.csv', indices_to_predict=\n get_meta_validation_indices())\n\n\ndef clip(data):\n data[data > 5] = 5\n data[data < 1] = 1\n return data\n\n\ndef ampute_reconstruction(reconstruction, data):\n observed_indices = get_observed_indices(data)\n for row_index, col_index in observed_indices:\n reconstruction[row_index][col_index] = data[row_index][col_index]\n\n\ndef impute_by_avg(data, by_row):\n data = data.T if by_row else data\n for row in data:\n empty = row == 0\n row_sum = np.sum(row)\n row[empty] = row_sum / np.count_nonzero(row)\n return data.T if by_row else data\n\n\ndef impute_by_bias(data):\n total_average = np.mean(data[np.nonzero(data)])\n row_biases = np.zeros(data.shape[0])\n col_biases = np.zeros(data.shape[1])\n for row_index in range(data.shape[0]):\n row_biases[row_index] = np.sum(data[row_index]) / np.count_nonzero(data\n [row_index]) - total_average\n for col_index in range(data.shape[1]):\n col_biases[col_index] = np.sum(data[:][col_index]) / np.count_nonzero(\n data[:][col_index]) - total_average\n for row_index in range(data.shape[0]):\n for col_index in range(data.shape[1]):\n if data[row_index, col_index] == 0:\n new_value = total_average + row_biases[row_index] + col_biases[\n col_index]\n data[row_index, col_index] = new_value\n return data\n\n\ndef impute_by_variance(data):\n global_average = np.sum(data) / np.count_nonzero(data)\n global_variance = np.var(data[data != 0])\n adjusted_movie_means = np.zeros((data.shape[1],))\n for i in range(data.shape[1]):\n movie_ratings = data[:, i]\n movie_ratings = movie_ratings[movie_ratings != 0]\n movie_variance = np.var(movie_ratings)\n relative_variance = movie_variance / global_variance\n adjusted_movie_means[i] = (global_average * relative_variance + np.\n sum(movie_ratings)) / (relative_variance + np.count_nonzero(\n movie_ratings))\n adjusted_user_deviation = np.zeros((data.shape[0],))\n for i in range(data.shape[0]):\n user_ratings = data[i]\n user_deviations = adjusted_movie_means - user_ratings\n user_deviations = user_deviations[user_ratings != 0]\n user_deviation_variance = np.var(user_deviations)\n relative_variance = user_deviation_variance / global_variance\n adjusted_user_deviation[i] = (global_average * relative_variance +\n sum(user_deviations)) / (relative_variance + np.count_nonzero(\n user_deviations))\n user_counts = np.count_nonzero(data, axis=1)\n movie_counts = np.count_nonzero(data, axis=0)\n movie_count_matrix = np.tile(movie_counts, (len(user_counts), 1))\n user_count_matrix = np.tile(user_counts, (len(movie_counts), 1)).T\n combined_matrix = copy.copy(movie_count_matrix\n ) + USER_COUNT_WEIGHT * copy.copy(user_count_matrix)\n d_matrix = np.divide(movie_count_matrix, combined_matrix)\n m_matrix = np.tile(adjusted_movie_means, (len(adjusted_user_deviation), 1))\n u_matrix = np.tile(adjusted_user_deviation, (len(adjusted_movie_means), 1)\n ).T\n data = np.multiply(m_matrix, d_matrix) + np.multiply(u_matrix, np.ones(\n d_matrix.shape) - d_matrix)\n return data\n\n\ndef compute_rmse(data, prediction, indices=None):\n if indices is None:\n indices = get_indices_from_file(VALIDATION_FILE_NAME)\n squared_error = 0\n for i, j in indices:\n squared_error += (data[i][j] - prediction[i][j]) ** 2\n return np.sqrt(squared_error / len(indices))\n\n\ndef knn_smoothing(reconstruction, user_embeddings):\n normalized_user_embeddings = normalize(user_embeddings)\n knn = NearestNeighbors(n_neighbors=N_NEIGHBORS + 1)\n knn.fit(normalized_user_embeddings)\n distances, neighbors = knn.kneighbors(normalized_user_embeddings)\n distances = distances[:, 1:]\n neighbors = neighbors[:, 1:]\n ones = np.ones(distances.shape)\n similarities = ones - distances\n weights = np.square(np.square(similarities))\n smoothed_data = np.zeros(reconstruction.shape)\n aggregated_neighbor_ratings = np.zeros(reconstruction.shape)\n for i in range(reconstruction.shape[0]):\n stacked_ratings = []\n for neighbor in neighbors[i]:\n stacked_ratings.append(reconstruction[neighbor])\n stacked_ratings = np.asarray(stacked_ratings)\n aggregated_neighbor_ratings[i] = np.matmul(weights[i], stacked_ratings\n ) / sum(weights[i])\n for i in range(reconstruction.shape[0]):\n smoothed_data[i] = (1 - WEIGHT_KNN) * reconstruction[i\n ] + WEIGHT_KNN * aggregated_neighbor_ratings[i]\n smoothed_data = clip(smoothed_data)\n return smoothed_data\n\n\ndef load_predictions_from_files(file_prefix='submission_'):\n path = os.path.join(ROOT_DIR, ENSEMBLE_INPUT_DIR)\n files = [os.path.join(path, i) for i in os.listdir(path) if os.path.\n isfile(os.path.join(path, i)) and file_prefix in i]\n all_ratings = []\n for file in files:\n print('loading {}'.format(file))\n ratings = load_ratings(file)\n ratings = ratings_to_matrix(ratings)\n all_ratings.append(ratings)\n return all_ratings\n\n\ndef compute_mean_predictions(all_ratings):\n reconstruction = np.mean(np.array(all_ratings), axis=0)\n reconstruction = impute_by_avg(reconstruction, by_row=False)\n return reconstruction\n",
"step-4": "<mask token>\nROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')\nDATA_FILE = os.path.join(ROOT_DIR, 'data/data_train.csv')\nTRAINING_FILE_NAME = os.path.join(ROOT_DIR, 'data/trainingIndices.csv')\nVALIDATION_FILE_NAME = os.path.join(ROOT_DIR, 'data/validationIndices.csv')\nVALIDATION_MASK_FILE_NAME = os.path.join(ROOT_DIR,\n 'data/train_valid_80_10_10/validationIndices_mask.csv')\nAUX = os.path.join(ROOT_DIR,\n 'data/train_valid_80_10_10/validationIndices_first.csv')\nMETA_VALIDATION_FILE_NAME = os.path.join(ROOT_DIR,\n 'data/train_valid_80_10_10/validationIndices_second.csv')\nSAMPLE_SUBMISSION = os.path.join(ROOT_DIR, 'data/sampleSubmission.csv')\nENSEMBLE_INPUT_DIR = 'data/stacking/good_data'\nITEM_COUNT = 1000\nUSER_COUNT = 10000\nWEIGHT_KNN = 0.001\nN_NEIGHBORS = 3\nUSER_COUNT_WEIGHT = 10\nSAVE_META_PREDICTIONS = False\n\n\ndef load_ratings(data_file=DATA_FILE):\n ratings = []\n with open(data_file, 'r') as file:\n _ = file.readline()\n for line in file:\n key, value_string = line.split(',')\n rating = float(value_string)\n row_string, col_string = key.split('_')\n row = int(row_string[1:])\n col = int(col_string[1:])\n ratings.append((row - 1, col - 1, rating))\n return ratings\n\n\ndef ratings_to_matrix(ratings):\n matrix_rows = USER_COUNT\n matrix_cols = ITEM_COUNT\n matrix = np.zeros([matrix_rows, matrix_cols])\n for row, col, rating in ratings:\n matrix[row, col] = rating\n return matrix\n\n\ndef mask_validation(data, use_three_way):\n masked_data = np.copy(data)\n if use_three_way:\n mask_file = VALIDATION_MASK_FILE_NAME\n else:\n mask_file = VALIDATION_FILE_NAME\n mask_indices = get_indices_from_file(mask_file)\n for row_index, col_index in mask_indices:\n masked_data[row_index][col_index] = 0\n return masked_data\n\n\ndef get_validation_indices(use_three_way):\n if use_three_way:\n validation_indices = get_indices_from_file(AUX)\n else:\n validation_indices = get_indices_from_file(VALIDATION_FILE_NAME)\n return validation_indices\n\n\ndef get_meta_validation_indices():\n return get_indices_from_file(META_VALIDATION_FILE_NAME)\n\n\ndef get_observed_indices(data):\n row_indices, col_indices = np.where(data != 0)\n return list(zip(row_indices, col_indices))\n\n\ndef get_unobserved_indices(data):\n row_indices, col_indices = np.where(data == 0)\n return list(zip(row_indices, col_indices))\n\n\ndef get_indices_from_file(file_name):\n indices = []\n with open(file_name, 'r') as file:\n _ = file.readline()\n for line in file:\n i, j = line.split(',')\n indices.append((int(i), int(j)))\n return indices\n\n\ndef get_indices_to_predict():\n \"\"\"Get list of indices to predict from sample submission file.\n Returns:\n indices_to_predict: list of tuples with indices\"\"\"\n indices_to_predict = []\n with open(SAMPLE_SUBMISSION, 'r') as file:\n _ = file.readline()\n for line in file:\n key, _ = line.split(',')\n row_string, col_string = key.split('_')\n i = int(row_string[1:]) - 1\n j = int(col_string[1:]) - 1\n indices_to_predict.append((i, j))\n return indices_to_predict\n\n\ndef write_ratings(predictions, submission_file):\n with open(submission_file, 'w') as file:\n file.write('Id,Prediction\\n')\n for i, j, prediction in predictions:\n file.write('r%d_c%d,%f\\n' % (i, j, prediction))\n\n\ndef reconstruction_to_predictions(reconstruction, submission_file,\n indices_to_predict=None):\n if indices_to_predict is None:\n indices_to_predict = get_indices_to_predict()\n enumerate_predictions = lambda t: (t[0] + 1, t[1] + 1, reconstruction[t\n [0], t[1]])\n predictions = list(map(enumerate_predictions, indices_to_predict))\n write_ratings(predictions, submission_file)\n\n\ndef save_ensembling_predictions(reconstruction, name):\n reconstruction_to_predictions(reconstruction, ROOT_DIR +\n 'data/meta_training_' + name + '_stacking' + datetime.now().\n strftime('%Y-%b-%d-%H-%M-%S') + '.csv', indices_to_predict=\n get_validation_indices(use_three_way=True))\n reconstruction_to_predictions(reconstruction, ROOT_DIR +\n 'data/meta_validation_' + name + '_stacking' + datetime.now().\n strftime('%Y-%b-%d-%H-%M-%S') + '.csv', indices_to_predict=\n get_meta_validation_indices())\n\n\ndef clip(data):\n data[data > 5] = 5\n data[data < 1] = 1\n return data\n\n\ndef ampute_reconstruction(reconstruction, data):\n observed_indices = get_observed_indices(data)\n for row_index, col_index in observed_indices:\n reconstruction[row_index][col_index] = data[row_index][col_index]\n\n\ndef impute_by_avg(data, by_row):\n data = data.T if by_row else data\n for row in data:\n empty = row == 0\n row_sum = np.sum(row)\n row[empty] = row_sum / np.count_nonzero(row)\n return data.T if by_row else data\n\n\ndef impute_by_bias(data):\n total_average = np.mean(data[np.nonzero(data)])\n row_biases = np.zeros(data.shape[0])\n col_biases = np.zeros(data.shape[1])\n for row_index in range(data.shape[0]):\n row_biases[row_index] = np.sum(data[row_index]) / np.count_nonzero(data\n [row_index]) - total_average\n for col_index in range(data.shape[1]):\n col_biases[col_index] = np.sum(data[:][col_index]) / np.count_nonzero(\n data[:][col_index]) - total_average\n for row_index in range(data.shape[0]):\n for col_index in range(data.shape[1]):\n if data[row_index, col_index] == 0:\n new_value = total_average + row_biases[row_index] + col_biases[\n col_index]\n data[row_index, col_index] = new_value\n return data\n\n\ndef impute_by_variance(data):\n global_average = np.sum(data) / np.count_nonzero(data)\n global_variance = np.var(data[data != 0])\n adjusted_movie_means = np.zeros((data.shape[1],))\n for i in range(data.shape[1]):\n movie_ratings = data[:, i]\n movie_ratings = movie_ratings[movie_ratings != 0]\n movie_variance = np.var(movie_ratings)\n relative_variance = movie_variance / global_variance\n adjusted_movie_means[i] = (global_average * relative_variance + np.\n sum(movie_ratings)) / (relative_variance + np.count_nonzero(\n movie_ratings))\n adjusted_user_deviation = np.zeros((data.shape[0],))\n for i in range(data.shape[0]):\n user_ratings = data[i]\n user_deviations = adjusted_movie_means - user_ratings\n user_deviations = user_deviations[user_ratings != 0]\n user_deviation_variance = np.var(user_deviations)\n relative_variance = user_deviation_variance / global_variance\n adjusted_user_deviation[i] = (global_average * relative_variance +\n sum(user_deviations)) / (relative_variance + np.count_nonzero(\n user_deviations))\n user_counts = np.count_nonzero(data, axis=1)\n movie_counts = np.count_nonzero(data, axis=0)\n movie_count_matrix = np.tile(movie_counts, (len(user_counts), 1))\n user_count_matrix = np.tile(user_counts, (len(movie_counts), 1)).T\n combined_matrix = copy.copy(movie_count_matrix\n ) + USER_COUNT_WEIGHT * copy.copy(user_count_matrix)\n d_matrix = np.divide(movie_count_matrix, combined_matrix)\n m_matrix = np.tile(adjusted_movie_means, (len(adjusted_user_deviation), 1))\n u_matrix = np.tile(adjusted_user_deviation, (len(adjusted_movie_means), 1)\n ).T\n data = np.multiply(m_matrix, d_matrix) + np.multiply(u_matrix, np.ones(\n d_matrix.shape) - d_matrix)\n return data\n\n\ndef compute_rmse(data, prediction, indices=None):\n if indices is None:\n indices = get_indices_from_file(VALIDATION_FILE_NAME)\n squared_error = 0\n for i, j in indices:\n squared_error += (data[i][j] - prediction[i][j]) ** 2\n return np.sqrt(squared_error / len(indices))\n\n\ndef knn_smoothing(reconstruction, user_embeddings):\n normalized_user_embeddings = normalize(user_embeddings)\n knn = NearestNeighbors(n_neighbors=N_NEIGHBORS + 1)\n knn.fit(normalized_user_embeddings)\n distances, neighbors = knn.kneighbors(normalized_user_embeddings)\n distances = distances[:, 1:]\n neighbors = neighbors[:, 1:]\n ones = np.ones(distances.shape)\n similarities = ones - distances\n weights = np.square(np.square(similarities))\n smoothed_data = np.zeros(reconstruction.shape)\n aggregated_neighbor_ratings = np.zeros(reconstruction.shape)\n for i in range(reconstruction.shape[0]):\n stacked_ratings = []\n for neighbor in neighbors[i]:\n stacked_ratings.append(reconstruction[neighbor])\n stacked_ratings = np.asarray(stacked_ratings)\n aggregated_neighbor_ratings[i] = np.matmul(weights[i], stacked_ratings\n ) / sum(weights[i])\n for i in range(reconstruction.shape[0]):\n smoothed_data[i] = (1 - WEIGHT_KNN) * reconstruction[i\n ] + WEIGHT_KNN * aggregated_neighbor_ratings[i]\n smoothed_data = clip(smoothed_data)\n return smoothed_data\n\n\ndef load_predictions_from_files(file_prefix='submission_'):\n path = os.path.join(ROOT_DIR, ENSEMBLE_INPUT_DIR)\n files = [os.path.join(path, i) for i in os.listdir(path) if os.path.\n isfile(os.path.join(path, i)) and file_prefix in i]\n all_ratings = []\n for file in files:\n print('loading {}'.format(file))\n ratings = load_ratings(file)\n ratings = ratings_to_matrix(ratings)\n all_ratings.append(ratings)\n return all_ratings\n\n\ndef compute_mean_predictions(all_ratings):\n reconstruction = np.mean(np.array(all_ratings), axis=0)\n reconstruction = impute_by_avg(reconstruction, by_row=False)\n return reconstruction\n",
"step-5": "import copy\nimport os\nfrom datetime import datetime\nimport numpy as np\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.preprocessing import normalize\n\nROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')\nDATA_FILE = os.path.join(ROOT_DIR, 'data/data_train.csv')\nTRAINING_FILE_NAME = os.path.join(\n ROOT_DIR, 'data/trainingIndices.csv')\nVALIDATION_FILE_NAME = os.path.join(\n ROOT_DIR, 'data/validationIndices.csv')\nVALIDATION_MASK_FILE_NAME = os.path.join(\n ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_mask.csv')\nAUX = os.path.join(\n ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_first.csv')\nMETA_VALIDATION_FILE_NAME = os.path.join(\n ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_second.csv')\n\nSAMPLE_SUBMISSION = os.path.join(ROOT_DIR, \\\n 'data/sampleSubmission.csv')\nENSEMBLE_INPUT_DIR = 'data/stacking/good_data'\nITEM_COUNT = 1000\nUSER_COUNT = 10000\nWEIGHT_KNN = 0.001\nN_NEIGHBORS = 3\nUSER_COUNT_WEIGHT = 10\nSAVE_META_PREDICTIONS = False\n\ndef load_ratings(data_file=DATA_FILE):\n ratings = []\n with open(data_file, 'r') as file:\n # Read header.\n _ = file.readline()\n for line in file:\n key, value_string = line.split(\",\")\n rating = float(value_string)\n row_string, col_string = key.split(\"_\")\n row = int(row_string[1:])\n col = int(col_string[1:])\n ratings.append((row - 1, col - 1, rating))\n return ratings\n\ndef ratings_to_matrix(ratings):\n matrix_rows = USER_COUNT\n matrix_cols = ITEM_COUNT\n matrix = np.zeros([matrix_rows, matrix_cols])\n for row, col, rating in ratings:\n matrix[row, col] = rating\n return matrix\n\ndef mask_validation(data, use_three_way):\n masked_data = np.copy(data)\n if use_three_way:\n mask_file = VALIDATION_MASK_FILE_NAME\n else:\n mask_file = VALIDATION_FILE_NAME\n mask_indices = get_indices_from_file(mask_file)\n for row_index, col_index in mask_indices:\n masked_data[row_index][col_index] = 0\n return masked_data\n\ndef get_validation_indices(use_three_way):\n if use_three_way:\n validation_indices = get_indices_from_file(AUX)\n else:\n validation_indices = get_indices_from_file(VALIDATION_FILE_NAME)\n return validation_indices\n\ndef get_meta_validation_indices():\n return get_indices_from_file(META_VALIDATION_FILE_NAME)\n\ndef get_observed_indices(data):\n row_indices, col_indices = np.where(data != 0)\n return list(zip(row_indices, col_indices))\n\ndef get_unobserved_indices(data):\n row_indices, col_indices = np.where(data == 0)\n return list(zip(row_indices, col_indices))\n\ndef get_indices_from_file(file_name):\n indices = []\n with open(file_name, 'r') as file:\n # Read header.\n _ = file.readline()\n for line in file:\n i, j = line.split(\",\")\n indices.append((int(i), int(j)))\n return indices\n\ndef get_indices_to_predict():\n \"\"\"Get list of indices to predict from sample submission file.\n Returns:\n indices_to_predict: list of tuples with indices\"\"\"\n indices_to_predict = []\n with open(SAMPLE_SUBMISSION, 'r') as file:\n _ = file.readline()\n for line in file:\n key, _ = line.split(\",\")\n row_string, col_string = key.split(\"_\")\n i = int(row_string[1:]) - 1\n j = int(col_string[1:]) - 1\n indices_to_predict.append((i, j))\n return indices_to_predict\n\ndef write_ratings(predictions, submission_file):\n with open(submission_file, 'w') as file:\n file.write('Id,Prediction\\n')\n for i, j, prediction in predictions:\n file.write('r%d_c%d,%f\\n' % (i, j, prediction))\n\ndef reconstruction_to_predictions(\n reconstruction, submission_file, indices_to_predict=None):\n if indices_to_predict is None:\n indices_to_predict = get_indices_to_predict()\n enumerate_predictions = lambda t: (\n t[0] + 1, t[1] + 1, reconstruction[t[0], t[1]])\n predictions = list(map(enumerate_predictions, indices_to_predict))\n write_ratings(predictions, submission_file)\n\ndef save_ensembling_predictions(reconstruction, name):\n reconstruction_to_predictions(\n reconstruction, ROOT_DIR + 'data/meta_training_' + name + '_stacking'\n + datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',\n indices_to_predict=get_validation_indices(use_three_way=True))\n reconstruction_to_predictions(\n reconstruction, ROOT_DIR + 'data/meta_validation_' + name + '_stacking'\n + datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',\n indices_to_predict=get_meta_validation_indices())\n\ndef clip(data):\n data[data > 5] = 5\n data[data < 1] = 1\n return data\n\ndef ampute_reconstruction(reconstruction, data):\n observed_indices = get_observed_indices(data)\n for row_index, col_index in observed_indices:\n reconstruction[row_index][col_index] = data[row_index][col_index]\n\ndef impute_by_avg(data, by_row):\n data = data.T if by_row else data\n for row in data:\n empty = (row == 0)\n row_sum = np.sum(row)\n row[empty] = row_sum / np.count_nonzero(row)\n return data.T if by_row else data\n\ndef impute_by_bias(data):\n total_average = np.mean(data[np.nonzero(data)])\n row_biases = np.zeros(data.shape[0])\n col_biases = np.zeros(data.shape[1])\n for row_index in range(data.shape[0]):\n row_biases[row_index] = np.sum(data[row_index]) / \\\n np.count_nonzero(data[row_index]) - total_average\n for col_index in range(data.shape[1]):\n col_biases[col_index] = np.sum(data[:][col_index]) / \\\n np.count_nonzero(data[:][col_index]) - total_average\n for row_index in range(data.shape[0]):\n for col_index in range(data.shape[1]):\n if data[row_index, col_index] == 0:\n new_value = total_average + \\\n row_biases[row_index] + col_biases[col_index]\n data[row_index, col_index] = new_value\n return data\n\ndef impute_by_variance(data):\n global_average = np.sum(data) / np.count_nonzero(data)\n global_variance = np.var(data[data != 0])\n\n adjusted_movie_means = np.zeros((data.shape[1],))\n for i in range(data.shape[1]):\n movie_ratings = data[:, i]\n movie_ratings = movie_ratings[movie_ratings != 0]\n movie_variance = np.var(movie_ratings)\n relative_variance = movie_variance / global_variance\n adjusted_movie_means[i] = (\n global_average * relative_variance + np.sum(movie_ratings)) / (\n relative_variance + np.count_nonzero(movie_ratings))\n\n adjusted_user_deviation = np.zeros((data.shape[0],))\n for i in range(data.shape[0]):\n user_ratings = data[i]\n user_deviations = adjusted_movie_means - user_ratings\n user_deviations = user_deviations[user_ratings != 0]\n user_deviation_variance = np.var(user_deviations)\n relative_variance = user_deviation_variance / global_variance\n adjusted_user_deviation[i] = (\n global_average * relative_variance + sum(user_deviations)) / (\n relative_variance + np.count_nonzero(user_deviations))\n\n user_counts = np.count_nonzero(data, axis=1)\n movie_counts = np.count_nonzero(data, axis=0)\n\n movie_count_matrix = np.tile(movie_counts, (len(user_counts), 1))\n user_count_matrix = np.tile(user_counts, (len(movie_counts), 1)).T\n combined_matrix = copy.copy(\n movie_count_matrix) + USER_COUNT_WEIGHT * copy.copy(user_count_matrix)\n d_matrix = np.divide(movie_count_matrix, combined_matrix)\n\n m_matrix = np.tile(\n adjusted_movie_means, (len(adjusted_user_deviation), 1))\n u_matrix = np.tile(\n adjusted_user_deviation, (len(adjusted_movie_means), 1)).T\n\n data = np.multiply(m_matrix, d_matrix) + \\\n np.multiply(u_matrix, np.ones(d_matrix.shape) - d_matrix)\n return data\n\ndef compute_rmse(data, prediction, indices=None):\n if indices is None:\n indices = get_indices_from_file(VALIDATION_FILE_NAME)\n squared_error = 0\n for i, j in indices:\n squared_error += (data[i][j] - prediction[i][j]) ** 2\n return np.sqrt(squared_error / len(indices))\n\ndef knn_smoothing(reconstruction, user_embeddings):\n normalized_user_embeddings = normalize(user_embeddings)\n knn = NearestNeighbors(n_neighbors=N_NEIGHBORS + 1)\n knn.fit(normalized_user_embeddings)\n distances, neighbors = knn.kneighbors(normalized_user_embeddings)\n distances = distances[:, 1:]\n neighbors = neighbors[:, 1:]\n\n ones = np.ones(distances.shape)\n similarities = ones - distances\n weights = np.square(np.square(similarities))\n smoothed_data = np.zeros(reconstruction.shape)\n aggregated_neighbor_ratings = np.zeros(reconstruction.shape)\n\n for i in range(reconstruction.shape[0]):\n stacked_ratings = []\n for neighbor in neighbors[i]:\n stacked_ratings.append(reconstruction[neighbor])\n stacked_ratings = np.asarray(stacked_ratings)\n aggregated_neighbor_ratings[i] =\\\n np.matmul(weights[i], stacked_ratings) / sum(weights[i])\n\n for i in range(reconstruction.shape[0]):\n smoothed_data[i] = (1 - WEIGHT_KNN) * reconstruction[i] + WEIGHT_KNN *\\\n aggregated_neighbor_ratings[i]\n\n smoothed_data = clip(smoothed_data)\n return smoothed_data\n\ndef load_predictions_from_files(file_prefix='submission_'):\n path = os.path.join(ROOT_DIR, ENSEMBLE_INPUT_DIR)\n files = [os.path.join(path, i) for i in os.listdir(path) if \\\n os.path.isfile(os.path.join(path, i)) and file_prefix in i]\n all_ratings = []\n for file in files:\n print(\"loading {}\".format(file))\n ratings = load_ratings(file)\n ratings = ratings_to_matrix(ratings)\n all_ratings.append(ratings)\n return all_ratings\n\ndef compute_mean_predictions(all_ratings):\n reconstruction = np.mean(np.array(all_ratings), axis=0)\n reconstruction = impute_by_avg(reconstruction, by_row=False)\n return reconstruction\n",
"step-ids": [
16,
19,
21,
22,
24
]
}
|
[
16,
19,
21,
22,
24
] |
#! /usr/bin/env python
import os
import re
from codecs import open
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
def get_changelog():
with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:
text = f.read()
header_matches = list(re.finditer('^=+$', text, re.MULTILINE))
text = text[:header_matches[5].start()] # until fifth header
lines = text.splitlines()[:-1] # all lines without fifth release number
return '=========\nChangelog\n=========\n\n' + '\n'.join(lines)
about = {}
with open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
with open('README.rst', encoding='utf-8') as f:
README = f.read()
CHANGELOG = get_changelog()
requires = [
'py>=1.4.23',
'appdirs',
'devpi_common<4,>=3.3.0',
'itsdangerous>=0.24',
'execnet>=1.2',
'pyramid>=1.8',
'waitress>=1.0.1',
'repoze.lru>=0.6',
'passlib[argon2]',
'pluggy>=0.3.0,<1.0',
'strictyaml',
]
extras_require = {}
setup(
name=about['__title__'],
description=about['__description__'],
keywords='pypi realtime cache server',
long_description="\n\n".join([README, CHANGELOG]),
url=about['__url__'],
version=about['__version__'],
maintainer=about['__maintainer__'],
maintainer_email=about['__maintainer_email__'],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
license=about['__license__'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=requires,
extras_require=extras_require,
entry_points={
'console_scripts': [
'devpi-server = devpi_server.main:main' ],
'devpi_server': [
'devpi-server-auth-basic = devpi_server.auth_basic',
'devpi-server-auth-devpi = devpi_server.auth_devpi',
'devpi-server-sqlite = devpi_server.keyfs_sqlite',
'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs' ],
'devpi_web': [
'devpi-server-status = devpi_server.views'],
'pytest11': [
'pytest_devpi_server = pytest_devpi_server' ],
},
)
|
normal
|
{
"blob_id": "c81889cf4d87933b562aa4618bc5185a8d213107",
"index": 8075,
"step-1": "<mask token>\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()]\n lines = text.splitlines()[:-1]\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()]\n lines = text.splitlines()[:-1]\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\n\n<mask token>\nwith open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8'\n ) as f:\n exec(f.read(), about)\nwith open('README.rst', encoding='utf-8') as f:\n README = f.read()\n<mask token>\nsetup(name=about['__title__'], description=about['__description__'],\n keywords='pypi realtime cache server', long_description='\\n\\n'.join([\n README, CHANGELOG]), url=about['__url__'], version=about['__version__'],\n maintainer=about['__maintainer__'], maintainer_email=about[\n '__maintainer_email__'], packages=find_packages(), include_package_data\n =True, zip_safe=False, license=about['__license__'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'], install_requires=requires,\n extras_require=extras_require, entry_points={'console_scripts': [\n 'devpi-server = devpi_server.main:main'], 'devpi_server': [\n 'devpi-server-auth-basic = devpi_server.auth_basic',\n 'devpi-server-auth-devpi = devpi_server.auth_devpi',\n 'devpi-server-sqlite = devpi_server.keyfs_sqlite',\n 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs'], 'devpi_web':\n ['devpi-server-status = devpi_server.views'], 'pytest11': [\n 'pytest_devpi_server = pytest_devpi_server']})\n",
"step-3": "<mask token>\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()]\n lines = text.splitlines()[:-1]\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\n\nabout = {}\nwith open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8'\n ) as f:\n exec(f.read(), about)\nwith open('README.rst', encoding='utf-8') as f:\n README = f.read()\nCHANGELOG = get_changelog()\nrequires = ['py>=1.4.23', 'appdirs', 'devpi_common<4,>=3.3.0',\n 'itsdangerous>=0.24', 'execnet>=1.2', 'pyramid>=1.8', 'waitress>=1.0.1',\n 'repoze.lru>=0.6', 'passlib[argon2]', 'pluggy>=0.3.0,<1.0', 'strictyaml']\nextras_require = {}\nsetup(name=about['__title__'], description=about['__description__'],\n keywords='pypi realtime cache server', long_description='\\n\\n'.join([\n README, CHANGELOG]), url=about['__url__'], version=about['__version__'],\n maintainer=about['__maintainer__'], maintainer_email=about[\n '__maintainer_email__'], packages=find_packages(), include_package_data\n =True, zip_safe=False, license=about['__license__'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'], install_requires=requires,\n extras_require=extras_require, entry_points={'console_scripts': [\n 'devpi-server = devpi_server.main:main'], 'devpi_server': [\n 'devpi-server-auth-basic = devpi_server.auth_basic',\n 'devpi-server-auth-devpi = devpi_server.auth_devpi',\n 'devpi-server-sqlite = devpi_server.keyfs_sqlite',\n 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs'], 'devpi_web':\n ['devpi-server-status = devpi_server.views'], 'pytest11': [\n 'pytest_devpi_server = pytest_devpi_server']})\n",
"step-4": "import os\nimport re\nfrom codecs import open\nfrom setuptools import find_packages, setup\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()]\n lines = text.splitlines()[:-1]\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\n\nabout = {}\nwith open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8'\n ) as f:\n exec(f.read(), about)\nwith open('README.rst', encoding='utf-8') as f:\n README = f.read()\nCHANGELOG = get_changelog()\nrequires = ['py>=1.4.23', 'appdirs', 'devpi_common<4,>=3.3.0',\n 'itsdangerous>=0.24', 'execnet>=1.2', 'pyramid>=1.8', 'waitress>=1.0.1',\n 'repoze.lru>=0.6', 'passlib[argon2]', 'pluggy>=0.3.0,<1.0', 'strictyaml']\nextras_require = {}\nsetup(name=about['__title__'], description=about['__description__'],\n keywords='pypi realtime cache server', long_description='\\n\\n'.join([\n README, CHANGELOG]), url=about['__url__'], version=about['__version__'],\n maintainer=about['__maintainer__'], maintainer_email=about[\n '__maintainer_email__'], packages=find_packages(), include_package_data\n =True, zip_safe=False, license=about['__license__'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'], install_requires=requires,\n extras_require=extras_require, entry_points={'console_scripts': [\n 'devpi-server = devpi_server.main:main'], 'devpi_server': [\n 'devpi-server-auth-basic = devpi_server.auth_basic',\n 'devpi-server-auth-devpi = devpi_server.auth_devpi',\n 'devpi-server-sqlite = devpi_server.keyfs_sqlite',\n 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs'], 'devpi_web':\n ['devpi-server-status = devpi_server.views'], 'pytest11': [\n 'pytest_devpi_server = pytest_devpi_server']})\n",
"step-5": "#! /usr/bin/env python\n\nimport os\nimport re\n\nfrom codecs import open\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()] # until fifth header\n lines = text.splitlines()[:-1] # all lines without fifth release number\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\nabout = {}\n\nwith open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8') as f:\n exec(f.read(), about)\n\nwith open('README.rst', encoding='utf-8') as f:\n README = f.read()\n\nCHANGELOG = get_changelog()\n\nrequires = [\n 'py>=1.4.23',\n 'appdirs',\n 'devpi_common<4,>=3.3.0',\n 'itsdangerous>=0.24',\n 'execnet>=1.2',\n 'pyramid>=1.8',\n 'waitress>=1.0.1',\n 'repoze.lru>=0.6',\n 'passlib[argon2]',\n 'pluggy>=0.3.0,<1.0',\n 'strictyaml',\n ]\nextras_require = {}\n\nsetup(\n name=about['__title__'],\n description=about['__description__'],\n keywords='pypi realtime cache server',\n long_description=\"\\n\\n\".join([README, CHANGELOG]),\n url=about['__url__'],\n version=about['__version__'],\n maintainer=about['__maintainer__'],\n maintainer_email=about['__maintainer_email__'],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n license=about['__license__'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n install_requires=requires,\n extras_require=extras_require,\n entry_points={\n 'console_scripts': [\n 'devpi-server = devpi_server.main:main' ],\n 'devpi_server': [\n 'devpi-server-auth-basic = devpi_server.auth_basic',\n 'devpi-server-auth-devpi = devpi_server.auth_devpi',\n 'devpi-server-sqlite = devpi_server.keyfs_sqlite',\n 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs' ],\n 'devpi_web': [\n 'devpi-server-status = devpi_server.views'],\n 'pytest11': [\n 'pytest_devpi_server = pytest_devpi_server' ],\n },\n )\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import sys
n = int(sys.stdin.readline().rstrip())
l = list(map(int,sys.stdin.readline().rstrip().split()))
m = int(sys.stdin.readline().rstrip())
v = list(map(int,sys.stdin.readline().rstrip().split()))
card = [0] * (max(l)-min(l)+1)
a = min(l)
b = max(l)
for i in l:
card[i-a]+=1
for j in v:
if ((j>=a)&(j<=b)):
print(card[j-a],end = " ")
else:
print(0, end = " ")
|
normal
|
{
"blob_id": "6b0081e829f9252e44fa7b81fbfcdd4115856373",
"index": 3748,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in l:\n card[i - a] += 1\nfor j in v:\n if (j >= a) & (j <= b):\n print(card[j - a], end=' ')\n else:\n print(0, end=' ')\n",
"step-3": "<mask token>\nn = int(sys.stdin.readline().rstrip())\nl = list(map(int, sys.stdin.readline().rstrip().split()))\nm = int(sys.stdin.readline().rstrip())\nv = list(map(int, sys.stdin.readline().rstrip().split()))\ncard = [0] * (max(l) - min(l) + 1)\na = min(l)\nb = max(l)\nfor i in l:\n card[i - a] += 1\nfor j in v:\n if (j >= a) & (j <= b):\n print(card[j - a], end=' ')\n else:\n print(0, end=' ')\n",
"step-4": "import sys\nn = int(sys.stdin.readline().rstrip())\nl = list(map(int, sys.stdin.readline().rstrip().split()))\nm = int(sys.stdin.readline().rstrip())\nv = list(map(int, sys.stdin.readline().rstrip().split()))\ncard = [0] * (max(l) - min(l) + 1)\na = min(l)\nb = max(l)\nfor i in l:\n card[i - a] += 1\nfor j in v:\n if (j >= a) & (j <= b):\n print(card[j - a], end=' ')\n else:\n print(0, end=' ')\n",
"step-5": "import sys\nn = int(sys.stdin.readline().rstrip())\nl = list(map(int,sys.stdin.readline().rstrip().split()))\n\nm = int(sys.stdin.readline().rstrip())\nv = list(map(int,sys.stdin.readline().rstrip().split()))\n\ncard = [0] * (max(l)-min(l)+1)\n\na = min(l)\nb = max(l)\n\nfor i in l:\n card[i-a]+=1\n\nfor j in v:\n if ((j>=a)&(j<=b)):\n print(card[j-a],end = \" \")\n else:\n print(0, end = \" \")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.urls import path
from main.views import IndexView, BuiltinsView, CustomView
app_name = 'main'
urlpatterns = [path('', IndexView.as_view(), name='index'), path(
'builtins/', BuiltinsView.as_view(), name='builtins'), path('custom/',
CustomView.as_view(), name='custom')]
|
normal
|
{
"blob_id": "7b527f9ec66ddf35f3395d78c857c021975402c7",
"index": 5141,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'main'\nurlpatterns = [path('', IndexView.as_view(), name='index'), path(\n 'builtins/', BuiltinsView.as_view(), name='builtins'), path('custom/',\n CustomView.as_view(), name='custom')]\n",
"step-3": "from django.urls import path\nfrom main.views import IndexView, BuiltinsView, CustomView\napp_name = 'main'\nurlpatterns = [path('', IndexView.as_view(), name='index'), path(\n 'builtins/', BuiltinsView.as_view(), name='builtins'), path('custom/',\n CustomView.as_view(), name='custom')]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.db import models
class TestModel(models.Model):
name = models.CharField(max_length=15)
surname = models.CharField(max_length=10)
age = models.IntegerField()
class Example(models.Model):
integer_field = models.IntegerField()
positive_field = models.PositiveIntegerField()
positive_small_field = models.PositiveSmallIntegerField()
big_integer_field = models.BigIntegerField()
float_field = models.FloatField()
binary_field = models.BinaryField()
boolean_field = models.BooleanField()
char_field = models.CharField(max_length=5)
text_field = models.TextField(max_length=20)
date_field = models.DateField(auto_now=False, auto_now_add=False)
date_time_field = models.DateTimeField(auto_now_add=False)
decimal_field = models.DecimalField(max_digits=8, decimal_places=2) #222222.22
email = models.EmailField()
file_field = models.FileField(upload_to='file')
image_field = models.ImageField(upload_to='images')
class Author(models.Model):
name = models.CharField(max_length=50, verbose_name="Имя", blank=True)
surname = models.CharField(max_length=50, verbose_name="Фамилия")
date_birth = models.DateField(auto_now=False, verbose_name="Дата рождения")
def __str__(self):
return self.name + ' ' + self.surname
class Book(models.Model):
CHOISE_GENRE = (
('comedy', "Comedy"),
('tragedy', "Tragedy"),
('drama', "Drama"),
)
author = models.ForeignKey(Author, on_delete=models.CASCADE)
title = models.CharField(max_length=50)
text = models.TextField(max_length=1000)
genre = models.CharField(max_length=50, choices=CHOISE_GENRE)
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return self.name
class Restaurant(models.Model):
place = models.OneToOneField(Place, on_delete=models.CASCADE, primary_key=True)
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return self.place
class Publication(models.Model):
title = models.CharField(max_length=30)
# def __str__(self):
# return self.title
#
# class Meta:
# ordering = ('title', )
class Article(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(Publication)
# def __str__(self):
# return self.headline
#
class Meta:
ordering = ('headline', )
|
normal
|
{
"blob_id": "8afce5b47c7c9c67a8be493f7f4de1510352b1c7",
"index": 4559,
"step-1": "<mask token>\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=50)\n address = models.CharField(max_length=80)\n\n def __str__(self):\n return self.name\n\n\nclass Restaurant(models.Model):\n place = models.OneToOneField(Place, on_delete=models.CASCADE,\n primary_key=True)\n serves_hot_dogs = models.BooleanField(default=False)\n serves_pizza = models.BooleanField(default=False)\n\n def __str__(self):\n return self.place\n\n\nclass Publication(models.Model):\n title = models.CharField(max_length=30)\n\n\nclass Article(models.Model):\n headline = models.CharField(max_length=100)\n publications = models.ManyToManyField(Publication)\n\n\n class Meta:\n ordering = 'headline',\n",
"step-2": "<mask token>\n\n\nclass Book(models.Model):\n CHOISE_GENRE = ('comedy', 'Comedy'), ('tragedy', 'Tragedy'), ('drama',\n 'Drama')\n author = models.ForeignKey(Author, on_delete=models.CASCADE)\n title = models.CharField(max_length=50)\n text = models.TextField(max_length=1000)\n genre = models.CharField(max_length=50, choices=CHOISE_GENRE)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=50)\n address = models.CharField(max_length=80)\n\n def __str__(self):\n return self.name\n\n\nclass Restaurant(models.Model):\n place = models.OneToOneField(Place, on_delete=models.CASCADE,\n primary_key=True)\n serves_hot_dogs = models.BooleanField(default=False)\n serves_pizza = models.BooleanField(default=False)\n\n def __str__(self):\n return self.place\n\n\nclass Publication(models.Model):\n title = models.CharField(max_length=30)\n\n\nclass Article(models.Model):\n headline = models.CharField(max_length=100)\n publications = models.ManyToManyField(Publication)\n\n\n class Meta:\n ordering = 'headline',\n",
"step-3": "<mask token>\n\n\nclass Example(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Author(models.Model):\n name = models.CharField(max_length=50, verbose_name='Имя', blank=True)\n surname = models.CharField(max_length=50, verbose_name='Фамилия')\n date_birth = models.DateField(auto_now=False, verbose_name='Дата рождения')\n\n def __str__(self):\n return self.name + ' ' + self.surname\n\n\nclass Book(models.Model):\n CHOISE_GENRE = ('comedy', 'Comedy'), ('tragedy', 'Tragedy'), ('drama',\n 'Drama')\n author = models.ForeignKey(Author, on_delete=models.CASCADE)\n title = models.CharField(max_length=50)\n text = models.TextField(max_length=1000)\n genre = models.CharField(max_length=50, choices=CHOISE_GENRE)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=50)\n address = models.CharField(max_length=80)\n\n def __str__(self):\n return self.name\n\n\nclass Restaurant(models.Model):\n place = models.OneToOneField(Place, on_delete=models.CASCADE,\n primary_key=True)\n serves_hot_dogs = models.BooleanField(default=False)\n serves_pizza = models.BooleanField(default=False)\n\n def __str__(self):\n return self.place\n\n\nclass Publication(models.Model):\n title = models.CharField(max_length=30)\n\n\nclass Article(models.Model):\n headline = models.CharField(max_length=100)\n publications = models.ManyToManyField(Publication)\n\n\n class Meta:\n ordering = 'headline',\n",
"step-4": "<mask token>\n\n\nclass Example(models.Model):\n integer_field = models.IntegerField()\n positive_field = models.PositiveIntegerField()\n positive_small_field = models.PositiveSmallIntegerField()\n big_integer_field = models.BigIntegerField()\n float_field = models.FloatField()\n binary_field = models.BinaryField()\n boolean_field = models.BooleanField()\n char_field = models.CharField(max_length=5)\n text_field = models.TextField(max_length=20)\n date_field = models.DateField(auto_now=False, auto_now_add=False)\n date_time_field = models.DateTimeField(auto_now_add=False)\n decimal_field = models.DecimalField(max_digits=8, decimal_places=2)\n email = models.EmailField()\n file_field = models.FileField(upload_to='file')\n image_field = models.ImageField(upload_to='images')\n\n\nclass Author(models.Model):\n name = models.CharField(max_length=50, verbose_name='Имя', blank=True)\n surname = models.CharField(max_length=50, verbose_name='Фамилия')\n date_birth = models.DateField(auto_now=False, verbose_name='Дата рождения')\n\n def __str__(self):\n return self.name + ' ' + self.surname\n\n\nclass Book(models.Model):\n CHOISE_GENRE = ('comedy', 'Comedy'), ('tragedy', 'Tragedy'), ('drama',\n 'Drama')\n author = models.ForeignKey(Author, on_delete=models.CASCADE)\n title = models.CharField(max_length=50)\n text = models.TextField(max_length=1000)\n genre = models.CharField(max_length=50, choices=CHOISE_GENRE)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=50)\n address = models.CharField(max_length=80)\n\n def __str__(self):\n return self.name\n\n\nclass Restaurant(models.Model):\n place = models.OneToOneField(Place, on_delete=models.CASCADE,\n primary_key=True)\n serves_hot_dogs = models.BooleanField(default=False)\n serves_pizza = models.BooleanField(default=False)\n\n def __str__(self):\n return self.place\n\n\nclass Publication(models.Model):\n title = models.CharField(max_length=30)\n\n\nclass Article(models.Model):\n headline = models.CharField(max_length=100)\n publications = models.ManyToManyField(Publication)\n\n\n class Meta:\n ordering = 'headline',\n",
"step-5": "from django.db import models\n\n\nclass TestModel(models.Model):\n name = models.CharField(max_length=15)\n surname = models.CharField(max_length=10)\n age = models.IntegerField()\n\n\nclass Example(models.Model):\n integer_field = models.IntegerField()\n positive_field = models.PositiveIntegerField()\n positive_small_field = models.PositiveSmallIntegerField()\n big_integer_field = models.BigIntegerField()\n float_field = models.FloatField()\n binary_field = models.BinaryField()\n boolean_field = models.BooleanField()\n char_field = models.CharField(max_length=5)\n text_field = models.TextField(max_length=20)\n date_field = models.DateField(auto_now=False, auto_now_add=False)\n date_time_field = models.DateTimeField(auto_now_add=False)\n decimal_field = models.DecimalField(max_digits=8, decimal_places=2) #222222.22\n email = models.EmailField()\n file_field = models.FileField(upload_to='file')\n image_field = models.ImageField(upload_to='images')\n\n\nclass Author(models.Model):\n name = models.CharField(max_length=50, verbose_name=\"Имя\", blank=True)\n surname = models.CharField(max_length=50, verbose_name=\"Фамилия\")\n date_birth = models.DateField(auto_now=False, verbose_name=\"Дата рождения\")\n\n def __str__(self):\n return self.name + ' ' + self.surname\n\n\nclass Book(models.Model):\n\n CHOISE_GENRE = (\n ('comedy', \"Comedy\"),\n ('tragedy', \"Tragedy\"),\n ('drama', \"Drama\"),\n )\n\n author = models.ForeignKey(Author, on_delete=models.CASCADE)\n title = models.CharField(max_length=50)\n text = models.TextField(max_length=1000)\n genre = models.CharField(max_length=50, choices=CHOISE_GENRE)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=50)\n address = models.CharField(max_length=80)\n\n def __str__(self):\n return self.name\n\n\nclass Restaurant(models.Model):\n place = models.OneToOneField(Place, on_delete=models.CASCADE, primary_key=True)\n serves_hot_dogs = models.BooleanField(default=False)\n serves_pizza = models.BooleanField(default=False)\n\n def __str__(self):\n return self.place\n\n\nclass Publication(models.Model):\n title = models.CharField(max_length=30)\n\n # def __str__(self):\n # return self.title\n #\n # class Meta:\n # ordering = ('title', )\n\n\nclass Article(models.Model):\n headline = models.CharField(max_length=100)\n publications = models.ManyToManyField(Publication)\n\n # def __str__(self):\n # return self.headline\n #\n class Meta:\n ordering = ('headline', )\n",
"step-ids": [
10,
12,
16,
17,
21
]
}
|
[
10,
12,
16,
17,
21
] |
from django.conf import settings
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from fish.labinterface.models import *
from registration import signals
from registration.forms import RegistrationForm
from registration.models import RegistrationProfile
from labinterface.models import StaffMember
class CustomRegistrationBackend(object):
def register(self, request, **kwargs):
username, email, password = kwargs['username'], kwargs['email'], kwargs['password1']
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
new_user = RegistrationProfile.objects.create_inactive_user(username, email, password, site)
signals.user_registered.send(sender=self.__class__, user=new_user, request=request)
new_profile = StaffMember.objects.get(user=new_user)
new_profile.first_name=kwargs['first_name']
new_profile.last_name=kwargs['last_name']
new_profile.position=kwargs['position']
new_profile.save()
return new_user
def activate(self, request, activation_key):
activated = RegistrationProfile.objects.activate_user(activation_key)
if activated:
signals.user_activated.send(sender=self.__class__,
user=activated,
request=request)
return activated
def registration_allowed(self, request):
"""
Indicate whether account registration is currently permitted,
based on the value of the setting ``REGISTRATION_OPEN``. This
is determined as follows:
* If ``REGISTRATION_OPEN`` is not specified in settings, or is
set to ``True``, registration is permitted.
* If ``REGISTRATION_OPEN`` is both specified and set to
``False``, registration is not permitted.
"""
return getattr(settings, 'REGISTRATION_OPEN', True)
def get_form_class(self, request):
"""
Return the default form class used for user registration.
"""
return RegistrationForm
def post_registration_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
user registration.
"""
return ('registration_complete', (), {})
def post_activation_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
account activation.
"""
newMember = StaffMember.objects.filter(user_id__exact=user.pk).get()
labGroup = LabGroup.objects.filter(pk=1).get()
newMember.lab_group = labGroup
newMember.save()
return ('registration_activation_complete', (), {})
|
normal
|
{
"blob_id": "201279c0cba2d52b6863204bfadb6291a0065f60",
"index": 3961,
"step-1": "<mask token>\n\n\nclass CustomRegistrationBackend(object):\n <mask token>\n\n def activate(self, request, activation_key):\n activated = RegistrationProfile.objects.activate_user(activation_key)\n if activated:\n signals.user_activated.send(sender=self.__class__, user=\n activated, request=request)\n return activated\n\n def registration_allowed(self, request):\n \"\"\"\n\t\tIndicate whether account registration is currently permitted,\n\t\tbased on the value of the setting ``REGISTRATION_OPEN``. This\n\t\tis determined as follows:\n\n\t\t* If ``REGISTRATION_OPEN`` is not specified in settings, or is\n\t\tset to ``True``, registration is permitted.\n\n\t\t* If ``REGISTRATION_OPEN`` is both specified and set to\n\t\t``False``, registration is not permitted.\n\t\t\n\t\t\"\"\"\n return getattr(settings, 'REGISTRATION_OPEN', True)\n\n def get_form_class(self, request):\n \"\"\"\n\t\tReturn the default form class used for user registration.\n\t\t\n\t\t\"\"\"\n return RegistrationForm\n\n def post_registration_redirect(self, request, user):\n \"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\tuser registration.\n\t\t\n\t\t\"\"\"\n return 'registration_complete', (), {}\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CustomRegistrationBackend(object):\n\n def register(self, request, **kwargs):\n username, email, password = kwargs['username'], kwargs['email'\n ], kwargs['password1']\n if Site._meta.installed:\n site = Site.objects.get_current()\n else:\n site = RequestSite(request)\n new_user = RegistrationProfile.objects.create_inactive_user(username,\n email, password, site)\n signals.user_registered.send(sender=self.__class__, user=new_user,\n request=request)\n new_profile = StaffMember.objects.get(user=new_user)\n new_profile.first_name = kwargs['first_name']\n new_profile.last_name = kwargs['last_name']\n new_profile.position = kwargs['position']\n new_profile.save()\n return new_user\n\n def activate(self, request, activation_key):\n activated = RegistrationProfile.objects.activate_user(activation_key)\n if activated:\n signals.user_activated.send(sender=self.__class__, user=\n activated, request=request)\n return activated\n\n def registration_allowed(self, request):\n \"\"\"\n\t\tIndicate whether account registration is currently permitted,\n\t\tbased on the value of the setting ``REGISTRATION_OPEN``. This\n\t\tis determined as follows:\n\n\t\t* If ``REGISTRATION_OPEN`` is not specified in settings, or is\n\t\tset to ``True``, registration is permitted.\n\n\t\t* If ``REGISTRATION_OPEN`` is both specified and set to\n\t\t``False``, registration is not permitted.\n\t\t\n\t\t\"\"\"\n return getattr(settings, 'REGISTRATION_OPEN', True)\n\n def get_form_class(self, request):\n \"\"\"\n\t\tReturn the default form class used for user registration.\n\t\t\n\t\t\"\"\"\n return RegistrationForm\n\n def post_registration_redirect(self, request, user):\n \"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\tuser registration.\n\t\t\n\t\t\"\"\"\n return 'registration_complete', (), {}\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CustomRegistrationBackend(object):\n\n def register(self, request, **kwargs):\n username, email, password = kwargs['username'], kwargs['email'\n ], kwargs['password1']\n if Site._meta.installed:\n site = Site.objects.get_current()\n else:\n site = RequestSite(request)\n new_user = RegistrationProfile.objects.create_inactive_user(username,\n email, password, site)\n signals.user_registered.send(sender=self.__class__, user=new_user,\n request=request)\n new_profile = StaffMember.objects.get(user=new_user)\n new_profile.first_name = kwargs['first_name']\n new_profile.last_name = kwargs['last_name']\n new_profile.position = kwargs['position']\n new_profile.save()\n return new_user\n\n def activate(self, request, activation_key):\n activated = RegistrationProfile.objects.activate_user(activation_key)\n if activated:\n signals.user_activated.send(sender=self.__class__, user=\n activated, request=request)\n return activated\n\n def registration_allowed(self, request):\n \"\"\"\n\t\tIndicate whether account registration is currently permitted,\n\t\tbased on the value of the setting ``REGISTRATION_OPEN``. This\n\t\tis determined as follows:\n\n\t\t* If ``REGISTRATION_OPEN`` is not specified in settings, or is\n\t\tset to ``True``, registration is permitted.\n\n\t\t* If ``REGISTRATION_OPEN`` is both specified and set to\n\t\t``False``, registration is not permitted.\n\t\t\n\t\t\"\"\"\n return getattr(settings, 'REGISTRATION_OPEN', True)\n\n def get_form_class(self, request):\n \"\"\"\n\t\tReturn the default form class used for user registration.\n\t\t\n\t\t\"\"\"\n return RegistrationForm\n\n def post_registration_redirect(self, request, user):\n \"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\tuser registration.\n\t\t\n\t\t\"\"\"\n return 'registration_complete', (), {}\n\n def post_activation_redirect(self, request, user):\n \"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\taccount activation.\n\t\t\n\t\t\"\"\"\n newMember = StaffMember.objects.filter(user_id__exact=user.pk).get()\n labGroup = LabGroup.objects.filter(pk=1).get()\n newMember.lab_group = labGroup\n newMember.save()\n return 'registration_activation_complete', (), {}\n",
"step-4": "from django.conf import settings\nfrom django.contrib.sites.models import RequestSite\nfrom django.contrib.sites.models import Site\nfrom fish.labinterface.models import *\nfrom registration import signals\nfrom registration.forms import RegistrationForm\nfrom registration.models import RegistrationProfile\nfrom labinterface.models import StaffMember\n\n\nclass CustomRegistrationBackend(object):\n\n def register(self, request, **kwargs):\n username, email, password = kwargs['username'], kwargs['email'\n ], kwargs['password1']\n if Site._meta.installed:\n site = Site.objects.get_current()\n else:\n site = RequestSite(request)\n new_user = RegistrationProfile.objects.create_inactive_user(username,\n email, password, site)\n signals.user_registered.send(sender=self.__class__, user=new_user,\n request=request)\n new_profile = StaffMember.objects.get(user=new_user)\n new_profile.first_name = kwargs['first_name']\n new_profile.last_name = kwargs['last_name']\n new_profile.position = kwargs['position']\n new_profile.save()\n return new_user\n\n def activate(self, request, activation_key):\n activated = RegistrationProfile.objects.activate_user(activation_key)\n if activated:\n signals.user_activated.send(sender=self.__class__, user=\n activated, request=request)\n return activated\n\n def registration_allowed(self, request):\n \"\"\"\n\t\tIndicate whether account registration is currently permitted,\n\t\tbased on the value of the setting ``REGISTRATION_OPEN``. This\n\t\tis determined as follows:\n\n\t\t* If ``REGISTRATION_OPEN`` is not specified in settings, or is\n\t\tset to ``True``, registration is permitted.\n\n\t\t* If ``REGISTRATION_OPEN`` is both specified and set to\n\t\t``False``, registration is not permitted.\n\t\t\n\t\t\"\"\"\n return getattr(settings, 'REGISTRATION_OPEN', True)\n\n def get_form_class(self, request):\n \"\"\"\n\t\tReturn the default form class used for user registration.\n\t\t\n\t\t\"\"\"\n return RegistrationForm\n\n def post_registration_redirect(self, request, user):\n \"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\tuser registration.\n\t\t\n\t\t\"\"\"\n return 'registration_complete', (), {}\n\n def post_activation_redirect(self, request, user):\n \"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\taccount activation.\n\t\t\n\t\t\"\"\"\n newMember = StaffMember.objects.filter(user_id__exact=user.pk).get()\n labGroup = LabGroup.objects.filter(pk=1).get()\n newMember.lab_group = labGroup\n newMember.save()\n return 'registration_activation_complete', (), {}\n",
"step-5": "from django.conf import settings\nfrom django.contrib.sites.models import RequestSite\nfrom django.contrib.sites.models import Site\n\nfrom fish.labinterface.models import *\n\nfrom registration import signals\nfrom registration.forms import RegistrationForm\nfrom registration.models import RegistrationProfile\nfrom labinterface.models import StaffMember\n\n\nclass CustomRegistrationBackend(object):\n\tdef register(self, request, **kwargs):\n\t\tusername, email, password = kwargs['username'], kwargs['email'], kwargs['password1']\n\t\tif Site._meta.installed:\n\t\t\tsite = Site.objects.get_current()\n\t\telse:\n\t\t\tsite = RequestSite(request)\n\t\tnew_user = RegistrationProfile.objects.create_inactive_user(username, email, password, site)\n\t\tsignals.user_registered.send(sender=self.__class__, user=new_user, request=request)\n\t\tnew_profile = StaffMember.objects.get(user=new_user)\n\t\tnew_profile.first_name=kwargs['first_name']\n\t\tnew_profile.last_name=kwargs['last_name']\n\t\tnew_profile.position=kwargs['position']\n\t\tnew_profile.save()\n\t\treturn new_user\n\tdef activate(self, request, activation_key):\n\t\tactivated = RegistrationProfile.objects.activate_user(activation_key)\n\t\tif activated:\n\t\t\tsignals.user_activated.send(sender=self.__class__,\n\t\t\t\t\t\t\t\t\t\tuser=activated,\n\t\t\t\t\t\t\t\t\t\trequest=request)\n\t\treturn activated\n\n\tdef registration_allowed(self, request):\n\t\t\"\"\"\n\t\tIndicate whether account registration is currently permitted,\n\t\tbased on the value of the setting ``REGISTRATION_OPEN``. This\n\t\tis determined as follows:\n\n\t\t* If ``REGISTRATION_OPEN`` is not specified in settings, or is\n\t\tset to ``True``, registration is permitted.\n\n\t\t* If ``REGISTRATION_OPEN`` is both specified and set to\n\t\t``False``, registration is not permitted.\n\t\t\n\t\t\"\"\"\n\t\treturn getattr(settings, 'REGISTRATION_OPEN', True)\n\n\tdef get_form_class(self, request):\n\t\t\"\"\"\n\t\tReturn the default form class used for user registration.\n\t\t\n\t\t\"\"\"\n\t\treturn RegistrationForm\n\n\tdef post_registration_redirect(self, request, user):\n\t\t\"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\tuser registration.\n\t\t\n\t\t\"\"\"\n\t\treturn ('registration_complete', (), {})\n\n\tdef post_activation_redirect(self, request, user):\n\t\t\"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\taccount activation.\n\t\t\n\t\t\"\"\"\n\t\tnewMember = StaffMember.objects.filter(user_id__exact=user.pk).get()\n\t\tlabGroup = LabGroup.objects.filter(pk=1).get()\n\t\tnewMember.lab_group = labGroup\n\t\tnewMember.save()\n\t\treturn ('registration_activation_complete', (), {})",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
if len(sys.argv) != 2:
print('usage : %s port')
sys.exit()
port = int(sys.argv[1])
count = 0
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.settimeout(2)
sock.bind(('', port))
sock.sendto(bytes('IBORN', 'utf-8'), ('255.255.255.255', port))
lifetime = time.time() + 10
while time.time() < lifetime:
try:
message, address = sock.recvfrom(1024)
message = message.decode('utf-8')
print('Message : %s from : %s' % (message, str(address)))
if message == 'IBORN':
sock.sendto(bytes('ILIVE', 'utf-8'), address)
print(address)
me = socket.gethostbyname(socket.gethostname()
), sock.getsockname()[1]
if address != me:
count += 1
print('Current count of copies : %s' % count)
elif message == 'ILIVE':
if address != me:
count += 1
print('Current count of copies : %s' % count)
elif message == 'IEXIT':
if address != me:
count -= 1
print('Current count of copies : %s' % count)
except socket.timeout:
print('No new messages in 2 seconds.')
time.sleep(1)
sock.sendto(bytes('IEXIT', 'utf-8'), ('255.255.255.255', port))
print('Count at exit : %s' % count)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
if len(sys.argv) != 2:
print('usage : %s port')
sys.exit()
port = int(sys.argv[1])
count = 0
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.settimeout(2)
sock.bind(('', port))
sock.sendto(bytes('IBORN', 'utf-8'), ('255.255.255.255', port))
lifetime = time.time() + 10
while time.time() < lifetime:
try:
message, address = sock.recvfrom(1024)
message = message.decode('utf-8')
print('Message : %s from : %s' % (message, str(address)))
if message == 'IBORN':
sock.sendto(bytes('ILIVE', 'utf-8'), address)
print(address)
me = socket.gethostbyname(socket.gethostname()
), sock.getsockname()[1]
if address != me:
count += 1
print('Current count of copies : %s' % count)
elif message == 'ILIVE':
if address != me:
count += 1
print('Current count of copies : %s' % count)
elif message == 'IEXIT':
if address != me:
count -= 1
print('Current count of copies : %s' % count)
except socket.timeout:
print('No new messages in 2 seconds.')
time.sleep(1)
sock.sendto(bytes('IEXIT', 'utf-8'), ('255.255.255.255', port))
print('Count at exit : %s' % count)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import socket
import time
import sys
def main():
if len(sys.argv) != 2:
print('usage : %s port')
sys.exit()
port = int(sys.argv[1])
count = 0
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.settimeout(2)
sock.bind(('', port))
sock.sendto(bytes('IBORN', 'utf-8'), ('255.255.255.255', port))
lifetime = time.time() + 10
while time.time() < lifetime:
try:
message, address = sock.recvfrom(1024)
message = message.decode('utf-8')
print('Message : %s from : %s' % (message, str(address)))
if message == 'IBORN':
sock.sendto(bytes('ILIVE', 'utf-8'), address)
print(address)
me = socket.gethostbyname(socket.gethostname()
), sock.getsockname()[1]
if address != me:
count += 1
print('Current count of copies : %s' % count)
elif message == 'ILIVE':
if address != me:
count += 1
print('Current count of copies : %s' % count)
elif message == 'IEXIT':
if address != me:
count -= 1
print('Current count of copies : %s' % count)
except socket.timeout:
print('No new messages in 2 seconds.')
time.sleep(1)
sock.sendto(bytes('IEXIT', 'utf-8'), ('255.255.255.255', port))
print('Count at exit : %s' % count)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import socket
import time
import sys
def main():
if len(sys.argv) != 2:
print("usage : %s port")
sys.exit()
port = int(sys.argv[1])
count = 0
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.settimeout(2)
sock.bind(('', port))
sock.sendto(bytes("IBORN", "utf-8"), ('255.255.255.255', port))
lifetime = time.time() + 10
while time.time() < lifetime:
try:
message, address = sock.recvfrom(1024)
message = message.decode("utf-8")
print("Message : %s from : %s" % (message, str(address)))
if message == "IBORN":
sock.sendto(bytes("ILIVE", "utf-8"), address)
print(address)
me = (socket.gethostbyname(socket.gethostname()), sock.getsockname()[1])
if address != me:
count += 1
print("Current count of copies : %s" % count)
elif message == "ILIVE":
if address != me:
count += 1
print("Current count of copies : %s" % count)
elif message == "IEXIT":
if address != me:
count -= 1
print("Current count of copies : %s" % count)
except socket.timeout:
print("No new messages in 2 seconds.")
time.sleep(1)
sock.sendto(bytes("IEXIT", "utf-8"), ('255.255.255.255', port))
print("Count at exit : %s" % count)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "68b9f7317f7c6dcda791338ee642dffb653ac694",
"index": 4804,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n if len(sys.argv) != 2:\n print('usage : %s port')\n sys.exit()\n port = int(sys.argv[1])\n count = 0\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.settimeout(2)\n sock.bind(('', port))\n sock.sendto(bytes('IBORN', 'utf-8'), ('255.255.255.255', port))\n lifetime = time.time() + 10\n while time.time() < lifetime:\n try:\n message, address = sock.recvfrom(1024)\n message = message.decode('utf-8')\n print('Message : %s from : %s' % (message, str(address)))\n if message == 'IBORN':\n sock.sendto(bytes('ILIVE', 'utf-8'), address)\n print(address)\n me = socket.gethostbyname(socket.gethostname()\n ), sock.getsockname()[1]\n if address != me:\n count += 1\n print('Current count of copies : %s' % count)\n elif message == 'ILIVE':\n if address != me:\n count += 1\n print('Current count of copies : %s' % count)\n elif message == 'IEXIT':\n if address != me:\n count -= 1\n print('Current count of copies : %s' % count)\n except socket.timeout:\n print('No new messages in 2 seconds.')\n time.sleep(1)\n sock.sendto(bytes('IEXIT', 'utf-8'), ('255.255.255.255', port))\n print('Count at exit : %s' % count)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n if len(sys.argv) != 2:\n print('usage : %s port')\n sys.exit()\n port = int(sys.argv[1])\n count = 0\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.settimeout(2)\n sock.bind(('', port))\n sock.sendto(bytes('IBORN', 'utf-8'), ('255.255.255.255', port))\n lifetime = time.time() + 10\n while time.time() < lifetime:\n try:\n message, address = sock.recvfrom(1024)\n message = message.decode('utf-8')\n print('Message : %s from : %s' % (message, str(address)))\n if message == 'IBORN':\n sock.sendto(bytes('ILIVE', 'utf-8'), address)\n print(address)\n me = socket.gethostbyname(socket.gethostname()\n ), sock.getsockname()[1]\n if address != me:\n count += 1\n print('Current count of copies : %s' % count)\n elif message == 'ILIVE':\n if address != me:\n count += 1\n print('Current count of copies : %s' % count)\n elif message == 'IEXIT':\n if address != me:\n count -= 1\n print('Current count of copies : %s' % count)\n except socket.timeout:\n print('No new messages in 2 seconds.')\n time.sleep(1)\n sock.sendto(bytes('IEXIT', 'utf-8'), ('255.255.255.255', port))\n print('Count at exit : %s' % count)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import socket\nimport time\nimport sys\n\n\ndef main():\n if len(sys.argv) != 2:\n print('usage : %s port')\n sys.exit()\n port = int(sys.argv[1])\n count = 0\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.settimeout(2)\n sock.bind(('', port))\n sock.sendto(bytes('IBORN', 'utf-8'), ('255.255.255.255', port))\n lifetime = time.time() + 10\n while time.time() < lifetime:\n try:\n message, address = sock.recvfrom(1024)\n message = message.decode('utf-8')\n print('Message : %s from : %s' % (message, str(address)))\n if message == 'IBORN':\n sock.sendto(bytes('ILIVE', 'utf-8'), address)\n print(address)\n me = socket.gethostbyname(socket.gethostname()\n ), sock.getsockname()[1]\n if address != me:\n count += 1\n print('Current count of copies : %s' % count)\n elif message == 'ILIVE':\n if address != me:\n count += 1\n print('Current count of copies : %s' % count)\n elif message == 'IEXIT':\n if address != me:\n count -= 1\n print('Current count of copies : %s' % count)\n except socket.timeout:\n print('No new messages in 2 seconds.')\n time.sleep(1)\n sock.sendto(bytes('IEXIT', 'utf-8'), ('255.255.255.255', port))\n print('Count at exit : %s' % count)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import socket\nimport time\nimport sys\n\n\ndef main():\n if len(sys.argv) != 2:\n print(\"usage : %s port\")\n sys.exit()\n port = int(sys.argv[1])\n count = 0\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.settimeout(2)\n sock.bind(('', port))\n\n sock.sendto(bytes(\"IBORN\", \"utf-8\"), ('255.255.255.255', port))\n lifetime = time.time() + 10\n while time.time() < lifetime:\n try:\n message, address = sock.recvfrom(1024)\n message = message.decode(\"utf-8\")\n print(\"Message : %s from : %s\" % (message, str(address)))\n if message == \"IBORN\":\n sock.sendto(bytes(\"ILIVE\", \"utf-8\"), address)\n print(address)\n me = (socket.gethostbyname(socket.gethostname()), sock.getsockname()[1])\n if address != me:\n count += 1\n print(\"Current count of copies : %s\" % count)\n elif message == \"ILIVE\":\n if address != me:\n count += 1\n print(\"Current count of copies : %s\" % count)\n elif message == \"IEXIT\":\n if address != me:\n count -= 1\n print(\"Current count of copies : %s\" % count)\n except socket.timeout:\n print(\"No new messages in 2 seconds.\")\n time.sleep(1)\n sock.sendto(bytes(\"IEXIT\", \"utf-8\"), ('255.255.255.255', port))\n print(\"Count at exit : %s\" % count)\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
###############################################################
# Yolanda Gunter
# Lab 4
# My program uses decisions, repetition, functions, files, lists
# and exception handling that will get the input from a file to
# run program that asks User for current date, reads a contact file
# list that contains names with DOB, calculate each contact's age,
# season born in and born in a leap year or not.
# Then my program will print the calculated average age of contacts.
###############################################################
########################################################
# Function name: main
# Input: contactsLab4.txt file
# Output: table of contact, age, birth season & if born leap yr or not
# Purpose: This function reads file, makes two lists, converts strings to
# integers, calculates ages, season born & if born leap yr or not
# then lastly calculats average age of contacts in entire file.
###############################################
def main():
# start exception handling
try:
# Open a file named contactlab4.txt
contacts = open('contactsLab4.txt', 'r')
# Create empty name list
names = []
# Create empty birthday list
birthdates = []
# Read file, establish records, strip \n, append to lists,
name = contacts.readline()
while name != '':
names.append(name.rstrip('\n'))
date = contacts.readline()
birthdates.append(date.rstrip('\n'))
name = contacts.readline()
# Close the file
contacts.close()
# Call display_contacts
display_contacts(names, birthdates)
# Simple exception if file is not found
except FileNotFoundError:
print("File was not found")
except Exception as err:
print("Error:", err)
###############################################
# Function name: find_season
# Input: birthdate
# Output: a string as a season
# Purpose: Determines which season contact is born
###############################################
def find_season(birthdates):
month = birthdates.split('/', 3)
month = int(month[0])
# Assign contact birth month to a season
if month == 12 or month == 1 or month == 2:
season = "winter"
elif month == 3 or month == 4 or month == 5:
season = "spring"
elif month == 6 or month == 7 or month == 8:
season = "summer"
elif month == 9 or month == 10 or month == 11:
season = "fall"
return season
###############################################
# Function name: is_leap_year
# Input: birthdate list
# Output: value leap year (Yes) or not (No)
# Purpose: Determines if birth year is leap year or not.
###############################################
def is_leap_year(birthdates):
birthyear = birthdates.split('/')
birthyear = int(birthyear[2])
# Calculate if User's birth year is a leap year or not
if birthyear % 4 == 0 and birthyear % 100 != 0 or \
birthyear % 400 == 0:
year = "Yes"
else:
year = "No"
return year
###############################################
# Function name: get_age
# Input: current date and birthdate list
# Output: age of contact
# Purpose: Caculates age of contact
###############################################
def get_age(date, birthdates):
today = date.split('/')
todayMonth = int(today[0])
todayDay = int(today[1])
todayYear = int(today[2])
birthyear = birthdates.split('/')
birthMonth = int(birthyear[0])
birthDay = int(birthyear[1])
birthyear = int(birthyear[2])
if todayMonth > birthMonth:
age = todayYear - birthyear-1
else:
age = todayYear - birthyear
return age
###############################################
# Function name: display_contacts
# Input: name and birthdate lists
# Output: value leap year (Yes) or not (No)
# Purpose: Determines if birth year is leap year or not.
###############################################
def display_contacts(names, birthdates):
# Get current date
date = input('Enter current date in format m/d/yyyy: ')
# format display in table format with column headings
print(format("Name", '25'), format("Age", '6'),
format("Season", '8'), format("Leap Year", '10'))
print(format("----", '25'), format("---", '6'),
format("------", '8'), format("---------", '10'))
# Call functions
for i in birthdates:
age = get_age(date, birthdates)
print(age)
for i in birthdates:
season = find_season(i)
print(season)
for i in birthdates:
year = is_leap_year(i)
print(year)
for i in range(len(name)):
print(format(name[i], '25'), format(str(age[i]), '6'),
format(season[i], '8'), format(year[i], '10'))
# Call the main function
main()
|
normal
|
{
"blob_id": "661f94f5770df1026352ee344d0006466662bb3c",
"index": 2537,
"step-1": "def main():\n try:\n contacts = open('contactsLab4.txt', 'r')\n names = []\n birthdates = []\n name = contacts.readline()\n while name != '':\n names.append(name.rstrip('\\n'))\n date = contacts.readline()\n birthdates.append(date.rstrip('\\n'))\n name = contacts.readline()\n contacts.close()\n display_contacts(names, birthdates)\n except FileNotFoundError:\n print('File was not found')\n except Exception as err:\n print('Error:', err)\n\n\ndef find_season(birthdates):\n month = birthdates.split('/', 3)\n month = int(month[0])\n if month == 12 or month == 1 or month == 2:\n season = 'winter'\n elif month == 3 or month == 4 or month == 5:\n season = 'spring'\n elif month == 6 or month == 7 or month == 8:\n season = 'summer'\n elif month == 9 or month == 10 or month == 11:\n season = 'fall'\n return season\n\n\n<mask token>\n",
"step-2": "def main():\n try:\n contacts = open('contactsLab4.txt', 'r')\n names = []\n birthdates = []\n name = contacts.readline()\n while name != '':\n names.append(name.rstrip('\\n'))\n date = contacts.readline()\n birthdates.append(date.rstrip('\\n'))\n name = contacts.readline()\n contacts.close()\n display_contacts(names, birthdates)\n except FileNotFoundError:\n print('File was not found')\n except Exception as err:\n print('Error:', err)\n\n\ndef find_season(birthdates):\n month = birthdates.split('/', 3)\n month = int(month[0])\n if month == 12 or month == 1 or month == 2:\n season = 'winter'\n elif month == 3 or month == 4 or month == 5:\n season = 'spring'\n elif month == 6 or month == 7 or month == 8:\n season = 'summer'\n elif month == 9 or month == 10 or month == 11:\n season = 'fall'\n return season\n\n\n<mask token>\n\n\ndef get_age(date, birthdates):\n today = date.split('/')\n todayMonth = int(today[0])\n todayDay = int(today[1])\n todayYear = int(today[2])\n birthyear = birthdates.split('/')\n birthMonth = int(birthyear[0])\n birthDay = int(birthyear[1])\n birthyear = int(birthyear[2])\n if todayMonth > birthMonth:\n age = todayYear - birthyear - 1\n else:\n age = todayYear - birthyear\n return age\n\n\n<mask token>\n",
"step-3": "def main():\n try:\n contacts = open('contactsLab4.txt', 'r')\n names = []\n birthdates = []\n name = contacts.readline()\n while name != '':\n names.append(name.rstrip('\\n'))\n date = contacts.readline()\n birthdates.append(date.rstrip('\\n'))\n name = contacts.readline()\n contacts.close()\n display_contacts(names, birthdates)\n except FileNotFoundError:\n print('File was not found')\n except Exception as err:\n print('Error:', err)\n\n\ndef find_season(birthdates):\n month = birthdates.split('/', 3)\n month = int(month[0])\n if month == 12 or month == 1 or month == 2:\n season = 'winter'\n elif month == 3 or month == 4 or month == 5:\n season = 'spring'\n elif month == 6 or month == 7 or month == 8:\n season = 'summer'\n elif month == 9 or month == 10 or month == 11:\n season = 'fall'\n return season\n\n\n<mask token>\n\n\ndef get_age(date, birthdates):\n today = date.split('/')\n todayMonth = int(today[0])\n todayDay = int(today[1])\n todayYear = int(today[2])\n birthyear = birthdates.split('/')\n birthMonth = int(birthyear[0])\n birthDay = int(birthyear[1])\n birthyear = int(birthyear[2])\n if todayMonth > birthMonth:\n age = todayYear - birthyear - 1\n else:\n age = todayYear - birthyear\n return age\n\n\ndef display_contacts(names, birthdates):\n date = input('Enter current date in format m/d/yyyy: ')\n print(format('Name', '25'), format('Age', '6'), format('Season', '8'),\n format('Leap Year', '10'))\n print(format('----', '25'), format('---', '6'), format('------', '8'),\n format('---------', '10'))\n for i in birthdates:\n age = get_age(date, birthdates)\n print(age)\n for i in birthdates:\n season = find_season(i)\n print(season)\n for i in birthdates:\n year = is_leap_year(i)\n print(year)\n for i in range(len(name)):\n print(format(name[i], '25'), format(str(age[i]), '6'), format(\n season[i], '8'), format(year[i], '10'))\n\n\n<mask token>\n",
"step-4": "def main():\n try:\n contacts = open('contactsLab4.txt', 'r')\n names = []\n birthdates = []\n name = contacts.readline()\n while name != '':\n names.append(name.rstrip('\\n'))\n date = contacts.readline()\n birthdates.append(date.rstrip('\\n'))\n name = contacts.readline()\n contacts.close()\n display_contacts(names, birthdates)\n except FileNotFoundError:\n print('File was not found')\n except Exception as err:\n print('Error:', err)\n\n\ndef find_season(birthdates):\n month = birthdates.split('/', 3)\n month = int(month[0])\n if month == 12 or month == 1 or month == 2:\n season = 'winter'\n elif month == 3 or month == 4 or month == 5:\n season = 'spring'\n elif month == 6 or month == 7 or month == 8:\n season = 'summer'\n elif month == 9 or month == 10 or month == 11:\n season = 'fall'\n return season\n\n\ndef is_leap_year(birthdates):\n birthyear = birthdates.split('/')\n birthyear = int(birthyear[2])\n if birthyear % 4 == 0 and birthyear % 100 != 0 or birthyear % 400 == 0:\n year = 'Yes'\n else:\n year = 'No'\n return year\n\n\ndef get_age(date, birthdates):\n today = date.split('/')\n todayMonth = int(today[0])\n todayDay = int(today[1])\n todayYear = int(today[2])\n birthyear = birthdates.split('/')\n birthMonth = int(birthyear[0])\n birthDay = int(birthyear[1])\n birthyear = int(birthyear[2])\n if todayMonth > birthMonth:\n age = todayYear - birthyear - 1\n else:\n age = todayYear - birthyear\n return age\n\n\ndef display_contacts(names, birthdates):\n date = input('Enter current date in format m/d/yyyy: ')\n print(format('Name', '25'), format('Age', '6'), format('Season', '8'),\n format('Leap Year', '10'))\n print(format('----', '25'), format('---', '6'), format('------', '8'),\n format('---------', '10'))\n for i in birthdates:\n age = get_age(date, birthdates)\n print(age)\n for i in birthdates:\n season = find_season(i)\n print(season)\n for i in birthdates:\n year = is_leap_year(i)\n print(year)\n for i in range(len(name)):\n print(format(name[i], '25'), format(str(age[i]), '6'), format(\n season[i], '8'), format(year[i], '10'))\n\n\nmain()\n",
"step-5": "###############################################################\n# Yolanda Gunter\n# Lab 4 \n# My program uses decisions, repetition, functions, files, lists\n# and exception handling that will get the input from a file to\n# run program that asks User for current date, reads a contact file\n# list that contains names with DOB, calculate each contact's age,\n# season born in and born in a leap year or not.\n# Then my program will print the calculated average age of contacts.\n###############################################################\n\n\n########################################################\n# Function name: main\n# Input: contactsLab4.txt file\n# Output: table of contact, age, birth season & if born leap yr or not \n# Purpose: This function reads file, makes two lists, converts strings to \n# integers, calculates ages, season born & if born leap yr or not \n# then lastly calculats average age of contacts in entire file.\n###############################################\ndef main():\n\n # start exception handling\n try:\n\n # Open a file named contactlab4.txt\n contacts = open('contactsLab4.txt', 'r')\n\n # Create empty name list\n names = []\n\n # Create empty birthday list\n birthdates = []\n\n # Read file, establish records, strip \\n, append to lists, \n name = contacts.readline()\n while name != '':\n names.append(name.rstrip('\\n'))\n date = contacts.readline()\n birthdates.append(date.rstrip('\\n')) \n name = contacts.readline()\n \n # Close the file \n contacts.close()\n\n # Call display_contacts\n display_contacts(names, birthdates)\n\n # Simple exception if file is not found\n except FileNotFoundError:\n print(\"File was not found\")\n except Exception as err:\n print(\"Error:\", err)\n \n\n###############################################\n# Function name: find_season\n# Input: birthdate\n# Output: a string as a season\n# Purpose: Determines which season contact is born \n###############################################\n\ndef find_season(birthdates):\n month = birthdates.split('/', 3)\n month = int(month[0])\n \n # Assign contact birth month to a season\n if month == 12 or month == 1 or month == 2:\n season = \"winter\"\n elif month == 3 or month == 4 or month == 5:\n season = \"spring\"\n elif month == 6 or month == 7 or month == 8:\n season = \"summer\"\n elif month == 9 or month == 10 or month == 11:\n season = \"fall\"\n return season\n \n \n\n###############################################\n# Function name: is_leap_year\n# Input: birthdate list\n# Output: value leap year (Yes) or not (No) \n# Purpose: Determines if birth year is leap year or not. \n###############################################\n\ndef is_leap_year(birthdates):\n birthyear = birthdates.split('/')\n birthyear = int(birthyear[2])\n \n # Calculate if User's birth year is a leap year or not\n if birthyear % 4 == 0 and birthyear % 100 != 0 or \\\n birthyear % 400 == 0:\n year = \"Yes\"\n else:\n year = \"No\"\n return year\n\n###############################################\n# Function name: get_age\n# Input: current date and birthdate list\n# Output: age of contact \n# Purpose: Caculates age of contact \n###############################################\n\ndef get_age(date, birthdates):\n today = date.split('/')\n todayMonth = int(today[0])\n todayDay = int(today[1])\n todayYear = int(today[2])\n \n \n birthyear = birthdates.split('/')\n birthMonth = int(birthyear[0])\n birthDay = int(birthyear[1])\n birthyear = int(birthyear[2])\n\n if todayMonth > birthMonth:\n age = todayYear - birthyear-1\n else:\n age = todayYear - birthyear\n return age\n \n\n###############################################\n# Function name: display_contacts\n# Input: name and birthdate lists\n# Output: value leap year (Yes) or not (No) \n# Purpose: Determines if birth year is leap year or not. \n###############################################\ndef display_contacts(names, birthdates):\n # Get current date\n date = input('Enter current date in format m/d/yyyy: ')\n\n # format display in table format with column headings\n print(format(\"Name\", '25'), format(\"Age\", '6'),\n format(\"Season\", '8'), format(\"Leap Year\", '10'))\n print(format(\"----\", '25'), format(\"---\", '6'),\n format(\"------\", '8'), format(\"---------\", '10'))\n\n # Call functions\n for i in birthdates:\n age = get_age(date, birthdates)\n print(age)\n \n for i in birthdates:\n season = find_season(i)\n print(season)\n for i in birthdates:\n year = is_leap_year(i)\n print(year)\n \n for i in range(len(name)):\n print(format(name[i], '25'), format(str(age[i]), '6'),\n format(season[i], '8'), format(year[i], '10'))\n\n# Call the main function\nmain()\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.