code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
a,b,c,d=map(int,input().split())
ans=0
if a>=0:
if c>=0:
ans=b*d
elif d>=0:
ans=b*d
else:
ans=a*d
elif b>=0:
if c>=0:
ans=b*d
elif d>=0:
ans=max(b*d,a*c)
else:
ans=a*c
else:
if c>=0:
ans=b*c
elif d>=0:
ans=a*c
else:
ans=a*c
print(ans)
|
normal
|
{
"blob_id": "be37a7596850050af58f735e60bdf13594715caf",
"index": 4928,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif a >= 0:\n if c >= 0:\n ans = b * d\n elif d >= 0:\n ans = b * d\n else:\n ans = a * d\nelif b >= 0:\n if c >= 0:\n ans = b * d\n elif d >= 0:\n ans = max(b * d, a * c)\n else:\n ans = a * c\nelif c >= 0:\n ans = b * c\nelif d >= 0:\n ans = a * c\nelse:\n ans = a * c\nprint(ans)\n",
"step-3": "a, b, c, d = map(int, input().split())\nans = 0\nif a >= 0:\n if c >= 0:\n ans = b * d\n elif d >= 0:\n ans = b * d\n else:\n ans = a * d\nelif b >= 0:\n if c >= 0:\n ans = b * d\n elif d >= 0:\n ans = max(b * d, a * c)\n else:\n ans = a * c\nelif c >= 0:\n ans = b * c\nelif d >= 0:\n ans = a * c\nelse:\n ans = a * c\nprint(ans)\n",
"step-4": "a,b,c,d=map(int,input().split())\nans=0\nif a>=0:\n if c>=0:\n ans=b*d\n elif d>=0:\n ans=b*d\n else:\n ans=a*d\nelif b>=0:\n if c>=0:\n ans=b*d\n elif d>=0:\n ans=max(b*d,a*c)\n else:\n ans=a*c\nelse:\n if c>=0:\n ans=b*c\n elif d>=0:\n ans=a*c\n else:\n ans=a*c\nprint(ans)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: UTF-8 -*-
import lava
from lava.api.constants.vk import QueueType
from lava.api.device import Device
from lava.api.util import Destroyable
__all__ = ["Session"]
sessions = set()
class Session(Destroyable):
def __init__(self, physical_device, queue_index=None):
super(Session, self).__init__()
self.instance = lava.instance() # validation level might has been changed
if physical_device not in lava.devices():
raise RuntimeError("Provided invalid / outdated device object")
self.queue_index = queue_index or physical_device.get_queue_indices(QueueType.COMPUTE)[0]
self.device = Device(physical_device, [(QueueType.COMPUTE, self.queue_index)],
validation_lvl=lava.VALIDATION_LEVEL)
self.buffers = set()
self.shaders = set()
self.stages = set()
sessions.add(self)
def _destroy(self):
for stage in self.stages:
stage.destroy()
for shader in self.shaders:
shader.destroy()
for buffer in self.buffers:
buffer.destroy()
self.device.destroy()
def register_buffer(self, buffer):
self.buffers.add(buffer)
def register_shader(self, shader):
self.shaders.add(shader)
def register_stage(self, stage):
self.stages.add(stage)
|
normal
|
{
"blob_id": "193dcf7bd658f88afe0a1f2fa28605f262e45bc2",
"index": 1554,
"step-1": "<mask token>\n\n\nclass Session(Destroyable):\n\n def __init__(self, physical_device, queue_index=None):\n super(Session, self).__init__()\n self.instance = lava.instance()\n if physical_device not in lava.devices():\n raise RuntimeError('Provided invalid / outdated device object')\n self.queue_index = queue_index or physical_device.get_queue_indices(\n QueueType.COMPUTE)[0]\n self.device = Device(physical_device, [(QueueType.COMPUTE, self.\n queue_index)], validation_lvl=lava.VALIDATION_LEVEL)\n self.buffers = set()\n self.shaders = set()\n self.stages = set()\n sessions.add(self)\n\n def _destroy(self):\n for stage in self.stages:\n stage.destroy()\n for shader in self.shaders:\n shader.destroy()\n for buffer in self.buffers:\n buffer.destroy()\n self.device.destroy()\n\n def register_buffer(self, buffer):\n self.buffers.add(buffer)\n <mask token>\n\n def register_stage(self, stage):\n self.stages.add(stage)\n",
"step-2": "<mask token>\n\n\nclass Session(Destroyable):\n\n def __init__(self, physical_device, queue_index=None):\n super(Session, self).__init__()\n self.instance = lava.instance()\n if physical_device not in lava.devices():\n raise RuntimeError('Provided invalid / outdated device object')\n self.queue_index = queue_index or physical_device.get_queue_indices(\n QueueType.COMPUTE)[0]\n self.device = Device(physical_device, [(QueueType.COMPUTE, self.\n queue_index)], validation_lvl=lava.VALIDATION_LEVEL)\n self.buffers = set()\n self.shaders = set()\n self.stages = set()\n sessions.add(self)\n\n def _destroy(self):\n for stage in self.stages:\n stage.destroy()\n for shader in self.shaders:\n shader.destroy()\n for buffer in self.buffers:\n buffer.destroy()\n self.device.destroy()\n\n def register_buffer(self, buffer):\n self.buffers.add(buffer)\n\n def register_shader(self, shader):\n self.shaders.add(shader)\n\n def register_stage(self, stage):\n self.stages.add(stage)\n",
"step-3": "<mask token>\n__all__ = ['Session']\nsessions = set()\n\n\nclass Session(Destroyable):\n\n def __init__(self, physical_device, queue_index=None):\n super(Session, self).__init__()\n self.instance = lava.instance()\n if physical_device not in lava.devices():\n raise RuntimeError('Provided invalid / outdated device object')\n self.queue_index = queue_index or physical_device.get_queue_indices(\n QueueType.COMPUTE)[0]\n self.device = Device(physical_device, [(QueueType.COMPUTE, self.\n queue_index)], validation_lvl=lava.VALIDATION_LEVEL)\n self.buffers = set()\n self.shaders = set()\n self.stages = set()\n sessions.add(self)\n\n def _destroy(self):\n for stage in self.stages:\n stage.destroy()\n for shader in self.shaders:\n shader.destroy()\n for buffer in self.buffers:\n buffer.destroy()\n self.device.destroy()\n\n def register_buffer(self, buffer):\n self.buffers.add(buffer)\n\n def register_shader(self, shader):\n self.shaders.add(shader)\n\n def register_stage(self, stage):\n self.stages.add(stage)\n",
"step-4": "import lava\nfrom lava.api.constants.vk import QueueType\nfrom lava.api.device import Device\nfrom lava.api.util import Destroyable\n__all__ = ['Session']\nsessions = set()\n\n\nclass Session(Destroyable):\n\n def __init__(self, physical_device, queue_index=None):\n super(Session, self).__init__()\n self.instance = lava.instance()\n if physical_device not in lava.devices():\n raise RuntimeError('Provided invalid / outdated device object')\n self.queue_index = queue_index or physical_device.get_queue_indices(\n QueueType.COMPUTE)[0]\n self.device = Device(physical_device, [(QueueType.COMPUTE, self.\n queue_index)], validation_lvl=lava.VALIDATION_LEVEL)\n self.buffers = set()\n self.shaders = set()\n self.stages = set()\n sessions.add(self)\n\n def _destroy(self):\n for stage in self.stages:\n stage.destroy()\n for shader in self.shaders:\n shader.destroy()\n for buffer in self.buffers:\n buffer.destroy()\n self.device.destroy()\n\n def register_buffer(self, buffer):\n self.buffers.add(buffer)\n\n def register_shader(self, shader):\n self.shaders.add(shader)\n\n def register_stage(self, stage):\n self.stages.add(stage)\n",
"step-5": "# -*- coding: UTF-8 -*-\n\nimport lava\nfrom lava.api.constants.vk import QueueType\nfrom lava.api.device import Device\nfrom lava.api.util import Destroyable\n\n__all__ = [\"Session\"]\n\nsessions = set()\n\n\nclass Session(Destroyable):\n\n def __init__(self, physical_device, queue_index=None):\n super(Session, self).__init__()\n\n self.instance = lava.instance() # validation level might has been changed\n if physical_device not in lava.devices():\n raise RuntimeError(\"Provided invalid / outdated device object\")\n\n self.queue_index = queue_index or physical_device.get_queue_indices(QueueType.COMPUTE)[0]\n self.device = Device(physical_device, [(QueueType.COMPUTE, self.queue_index)],\n validation_lvl=lava.VALIDATION_LEVEL)\n\n self.buffers = set()\n self.shaders = set()\n self.stages = set()\n\n sessions.add(self)\n\n def _destroy(self):\n for stage in self.stages:\n stage.destroy()\n for shader in self.shaders:\n shader.destroy()\n for buffer in self.buffers:\n buffer.destroy()\n self.device.destroy()\n\n def register_buffer(self, buffer):\n self.buffers.add(buffer)\n\n def register_shader(self, shader):\n self.shaders.add(shader)\n\n def register_stage(self, stage):\n self.stages.add(stage)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_email():
assert email('barney@purpledino.com') == True
assert email('barney.10.WHATDINO@purple.com') == True
assert type(email('barney')) == str
assert type(email('barney@dino')) == str
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_url():
assert url('ixmat.us') == True
assert url('http://bleh.net') == True
assert type(url('://ixmat.us')) == str
assert type(url('ixmat')) == str
def test_email():
assert email('barney@purpledino.com') == True
assert email('barney.10.WHATDINO@purple.com') == True
assert type(email('barney')) == str
assert type(email('barney@dino')) == str
<|reserved_special_token_1|>
from colander_validators import email, url
def test_url():
assert url('ixmat.us') == True
assert url('http://bleh.net') == True
assert type(url('://ixmat.us')) == str
assert type(url('ixmat')) == str
def test_email():
assert email('barney@purpledino.com') == True
assert email('barney.10.WHATDINO@purple.com') == True
assert type(email('barney')) == str
assert type(email('barney@dino')) == str
<|reserved_special_token_1|>
from colander_validators import (
email,
url)
def test_url():
assert url("ixmat.us") == True
assert url("http://bleh.net") == True
assert type(url("://ixmat.us")) == str
assert type(url("ixmat")) == str
def test_email():
assert email("barney@purpledino.com") == True
assert email("barney.10.WHATDINO@purple.com") == True
assert type(email("barney")) == str
assert type(email("barney@dino")) == str
|
flexible
|
{
"blob_id": "40637c7a5e45d0fe4184478a1be2e08e5040c93b",
"index": 8931,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_email():\n assert email('barney@purpledino.com') == True\n assert email('barney.10.WHATDINO@purple.com') == True\n assert type(email('barney')) == str\n assert type(email('barney@dino')) == str\n",
"step-3": "<mask token>\n\n\ndef test_url():\n assert url('ixmat.us') == True\n assert url('http://bleh.net') == True\n assert type(url('://ixmat.us')) == str\n assert type(url('ixmat')) == str\n\n\ndef test_email():\n assert email('barney@purpledino.com') == True\n assert email('barney.10.WHATDINO@purple.com') == True\n assert type(email('barney')) == str\n assert type(email('barney@dino')) == str\n",
"step-4": "from colander_validators import email, url\n\n\ndef test_url():\n assert url('ixmat.us') == True\n assert url('http://bleh.net') == True\n assert type(url('://ixmat.us')) == str\n assert type(url('ixmat')) == str\n\n\ndef test_email():\n assert email('barney@purpledino.com') == True\n assert email('barney.10.WHATDINO@purple.com') == True\n assert type(email('barney')) == str\n assert type(email('barney@dino')) == str\n",
"step-5": "from colander_validators import (\n email,\n url)\n\n\ndef test_url():\n\n assert url(\"ixmat.us\") == True\n assert url(\"http://bleh.net\") == True\n assert type(url(\"://ixmat.us\")) == str\n assert type(url(\"ixmat\")) == str\n\n\ndef test_email():\n\n assert email(\"barney@purpledino.com\") == True\n assert email(\"barney.10.WHATDINO@purple.com\") == True\n assert type(email(\"barney\")) == str\n assert type(email(\"barney@dino\")) == str\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class CreateProjectForm(forms.ModelForm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
<|reserved_special_token_0|>
class UpdateProjectForm(forms.ModelForm):
project_name = forms.CharField(label='项目名', widget=forms.TextInput(
attrs={'class': 'form-control'}))
project_desc = forms.CharField(label='项目说明', required=False, widget=
forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))
auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=
False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={
'class': 'form-control selectpicker', 'data-live-search': 'true',
'data-size': '5', 'data-width': '100%'}))
assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=
False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),
widget=forms.SelectMultiple(attrs={'class': 'selectpicker',
'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CreateProjectForm(forms.ModelForm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
def clean_project_name(self):
pro_name = self.cleaned_data['project_name']
name = Project.get_by_name(pro_name)
if name:
raise forms.ValidationError('该项目已存在')
return pro_name
class UpdateProjectForm(forms.ModelForm):
project_name = forms.CharField(label='项目名', widget=forms.TextInput(
attrs={'class': 'form-control'}))
project_desc = forms.CharField(label='项目说明', required=False, widget=
forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))
auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=
False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={
'class': 'form-control selectpicker', 'data-live-search': 'true',
'data-size': '5', 'data-width': '100%'}))
assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=
False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),
widget=forms.SelectMultiple(attrs={'class': 'selectpicker',
'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CreateProjectForm(forms.ModelForm):
project_name = forms.CharField(label='项目名', widget=forms.TextInput(
attrs={'class': 'form-control'}))
project_desc = forms.CharField(label='项目说明', required=False, widget=
forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))
auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=
False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={
'class': 'form-control selectpicker', 'data-live-search': 'true',
'data-size': '5', 'data-width': '100%'}))
assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=
False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),
widget=forms.SelectMultiple(attrs={'class': 'selectpicker',
'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
def clean_project_name(self):
pro_name = self.cleaned_data['project_name']
name = Project.get_by_name(pro_name)
if name:
raise forms.ValidationError('该项目已存在')
return pro_name
class UpdateProjectForm(forms.ModelForm):
project_name = forms.CharField(label='项目名', widget=forms.TextInput(
attrs={'class': 'form-control'}))
project_desc = forms.CharField(label='项目说明', required=False, widget=
forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))
auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=
False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={
'class': 'form-control selectpicker', 'data-live-search': 'true',
'data-size': '5', 'data-width': '100%'}))
assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=
False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),
widget=forms.SelectMultiple(attrs={'class': 'selectpicker',
'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
<|reserved_special_token_1|>
from django import forms
from .models import Project
from user.models import User
from assets.models import Assets
class CreateProjectForm(forms.ModelForm):
project_name = forms.CharField(label='项目名', widget=forms.TextInput(
attrs={'class': 'form-control'}))
project_desc = forms.CharField(label='项目说明', required=False, widget=
forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))
auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=
False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={
'class': 'form-control selectpicker', 'data-live-search': 'true',
'data-size': '5', 'data-width': '100%'}))
assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=
False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),
widget=forms.SelectMultiple(attrs={'class': 'selectpicker',
'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
def clean_project_name(self):
pro_name = self.cleaned_data['project_name']
name = Project.get_by_name(pro_name)
if name:
raise forms.ValidationError('该项目已存在')
return pro_name
class UpdateProjectForm(forms.ModelForm):
project_name = forms.CharField(label='项目名', widget=forms.TextInput(
attrs={'class': 'form-control'}))
project_desc = forms.CharField(label='项目说明', required=False, widget=
forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))
auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=
False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={
'class': 'form-control selectpicker', 'data-live-search': 'true',
'data-size': '5', 'data-width': '100%'}))
assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=
False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),
widget=forms.SelectMultiple(attrs={'class': 'selectpicker',
'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
<|reserved_special_token_1|>
from django import forms
from .models import Project
from user.models import User
from assets.models import Assets
class CreateProjectForm(forms.ModelForm):
project_name = forms.CharField(
label='项目名',
widget=forms.TextInput(
attrs={"class": "form-control"}
)
)
project_desc = forms.CharField(
label='项目说明',
required=False,
widget=forms.Textarea(
attrs={"class": "form-control", "cols": 40, "rows": 5}
)
)
auth_users = forms.ModelMultipleChoiceField(
label='授权用户',
required=False,
queryset=User.get_all(),
widget=forms.SelectMultiple(
attrs={"class": "form-control selectpicker", "data-live-search": "true", "data-size": "5",
"data-width": "100%", }
)
)
assets_set = forms.ModelMultipleChoiceField(
label="旗下资产",
required=False,
help_text="如果你从资产创建打开此页面,晴忽略该项内容",
queryset=Assets.get_all(),
widget=forms.SelectMultiple(
attrs={
"class": "selectpicker", "data-live-search": "true", "data-size": "5",
"data-width": "100%",
}
)
)
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
def clean_project_name(self):
pro_name = self.cleaned_data['project_name']
name = Project.get_by_name(pro_name)
if name:
raise forms.ValidationError("该项目已存在")
return pro_name
class UpdateProjectForm(forms.ModelForm):
project_name = forms.CharField(
label='项目名',
widget=forms.TextInput(
attrs={"class": "form-control"}
)
)
project_desc = forms.CharField(
label='项目说明',
required=False,
widget=forms.Textarea(
attrs={"class": "form-control", "cols": 40, "rows": 5}
)
)
auth_users = forms.ModelMultipleChoiceField(
label='授权用户',
required=False,
queryset=User.get_all(),
widget=forms.SelectMultiple(
attrs={"class": "form-control selectpicker", "data-live-search": "true", "data-size": "5",
"data-width": "100%", }
)
)
assets_set = forms.ModelMultipleChoiceField(
label="旗下资产",
required=False,
help_text="如果你从资产创建打开此页面,晴忽略该项内容",
queryset=Assets.get_all(),
widget=forms.SelectMultiple(
attrs={
"class": "selectpicker", "data-live-search": "true", "data-size": "5",
"data-width": "100%",
}
)
)
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
|
flexible
|
{
"blob_id": "599c5c02397f283eb00f7343e65c5cb977442e38",
"index": 3848,
"step-1": "<mask token>\n\n\nclass CreateProjectForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n <mask token>\n\n\nclass UpdateProjectForm(forms.ModelForm):\n project_name = forms.CharField(label='项目名', widget=forms.TextInput(\n attrs={'class': 'form-control'}))\n project_desc = forms.CharField(label='项目说明', required=False, widget=\n forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))\n auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=\n False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={\n 'class': 'form-control selectpicker', 'data-live-search': 'true',\n 'data-size': '5', 'data-width': '100%'}))\n assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=\n False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),\n widget=forms.SelectMultiple(attrs={'class': 'selectpicker',\n 'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n",
"step-2": "<mask token>\n\n\nclass CreateProjectForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n\n def clean_project_name(self):\n pro_name = self.cleaned_data['project_name']\n name = Project.get_by_name(pro_name)\n if name:\n raise forms.ValidationError('该项目已存在')\n return pro_name\n\n\nclass UpdateProjectForm(forms.ModelForm):\n project_name = forms.CharField(label='项目名', widget=forms.TextInput(\n attrs={'class': 'form-control'}))\n project_desc = forms.CharField(label='项目说明', required=False, widget=\n forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))\n auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=\n False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={\n 'class': 'form-control selectpicker', 'data-live-search': 'true',\n 'data-size': '5', 'data-width': '100%'}))\n assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=\n False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),\n widget=forms.SelectMultiple(attrs={'class': 'selectpicker',\n 'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n",
"step-3": "<mask token>\n\n\nclass CreateProjectForm(forms.ModelForm):\n project_name = forms.CharField(label='项目名', widget=forms.TextInput(\n attrs={'class': 'form-control'}))\n project_desc = forms.CharField(label='项目说明', required=False, widget=\n forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))\n auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=\n False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={\n 'class': 'form-control selectpicker', 'data-live-search': 'true',\n 'data-size': '5', 'data-width': '100%'}))\n assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=\n False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),\n widget=forms.SelectMultiple(attrs={'class': 'selectpicker',\n 'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n\n def clean_project_name(self):\n pro_name = self.cleaned_data['project_name']\n name = Project.get_by_name(pro_name)\n if name:\n raise forms.ValidationError('该项目已存在')\n return pro_name\n\n\nclass UpdateProjectForm(forms.ModelForm):\n project_name = forms.CharField(label='项目名', widget=forms.TextInput(\n attrs={'class': 'form-control'}))\n project_desc = forms.CharField(label='项目说明', required=False, widget=\n forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))\n auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=\n False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={\n 'class': 'form-control selectpicker', 'data-live-search': 'true',\n 'data-size': '5', 'data-width': '100%'}))\n assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=\n False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),\n widget=forms.SelectMultiple(attrs={'class': 'selectpicker',\n 'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n",
"step-4": "from django import forms\nfrom .models import Project\nfrom user.models import User\nfrom assets.models import Assets\n\n\nclass CreateProjectForm(forms.ModelForm):\n project_name = forms.CharField(label='项目名', widget=forms.TextInput(\n attrs={'class': 'form-control'}))\n project_desc = forms.CharField(label='项目说明', required=False, widget=\n forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))\n auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=\n False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={\n 'class': 'form-control selectpicker', 'data-live-search': 'true',\n 'data-size': '5', 'data-width': '100%'}))\n assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=\n False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),\n widget=forms.SelectMultiple(attrs={'class': 'selectpicker',\n 'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n\n def clean_project_name(self):\n pro_name = self.cleaned_data['project_name']\n name = Project.get_by_name(pro_name)\n if name:\n raise forms.ValidationError('该项目已存在')\n return pro_name\n\n\nclass UpdateProjectForm(forms.ModelForm):\n project_name = forms.CharField(label='项目名', widget=forms.TextInput(\n attrs={'class': 'form-control'}))\n project_desc = forms.CharField(label='项目说明', required=False, widget=\n forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))\n auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=\n False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={\n 'class': 'form-control selectpicker', 'data-live-search': 'true',\n 'data-size': '5', 'data-width': '100%'}))\n assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=\n False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),\n widget=forms.SelectMultiple(attrs={'class': 'selectpicker',\n 'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n",
"step-5": "from django import forms\nfrom .models import Project\nfrom user.models import User\nfrom assets.models import Assets\n\n\nclass CreateProjectForm(forms.ModelForm):\n project_name = forms.CharField(\n label='项目名',\n widget=forms.TextInput(\n attrs={\"class\": \"form-control\"}\n )\n )\n project_desc = forms.CharField(\n label='项目说明',\n required=False,\n widget=forms.Textarea(\n attrs={\"class\": \"form-control\", \"cols\": 40, \"rows\": 5}\n )\n )\n auth_users = forms.ModelMultipleChoiceField(\n label='授权用户',\n required=False,\n queryset=User.get_all(),\n widget=forms.SelectMultiple(\n attrs={\"class\": \"form-control selectpicker\", \"data-live-search\": \"true\", \"data-size\": \"5\",\n \"data-width\": \"100%\", }\n )\n )\n assets_set = forms.ModelMultipleChoiceField(\n label=\"旗下资产\",\n required=False,\n help_text=\"如果你从资产创建打开此页面,晴忽略该项内容\",\n queryset=Assets.get_all(),\n widget=forms.SelectMultiple(\n attrs={\n \"class\": \"selectpicker\", \"data-live-search\": \"true\", \"data-size\": \"5\",\n \"data-width\": \"100%\",\n }\n )\n )\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n\n def clean_project_name(self):\n pro_name = self.cleaned_data['project_name']\n name = Project.get_by_name(pro_name)\n if name:\n raise forms.ValidationError(\"该项目已存在\")\n return pro_name\n\n\nclass UpdateProjectForm(forms.ModelForm):\n project_name = forms.CharField(\n label='项目名',\n widget=forms.TextInput(\n attrs={\"class\": \"form-control\"}\n )\n )\n project_desc = forms.CharField(\n label='项目说明',\n required=False,\n widget=forms.Textarea(\n attrs={\"class\": \"form-control\", \"cols\": 40, \"rows\": 5}\n )\n )\n auth_users = forms.ModelMultipleChoiceField(\n label='授权用户',\n required=False,\n queryset=User.get_all(),\n widget=forms.SelectMultiple(\n attrs={\"class\": \"form-control selectpicker\", \"data-live-search\": \"true\", \"data-size\": \"5\",\n \"data-width\": \"100%\", }\n )\n )\n assets_set = forms.ModelMultipleChoiceField(\n label=\"旗下资产\",\n required=False,\n help_text=\"如果你从资产创建打开此页面,晴忽略该项内容\",\n queryset=Assets.get_all(),\n widget=forms.SelectMultiple(\n attrs={\n \"class\": \"selectpicker\", \"data-live-search\": \"true\", \"data-size\": \"5\",\n \"data-width\": \"100%\",\n }\n )\n )\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/python3
max_integer = __import__('9-max_integer').max_integer
my_list = [1, 90, 2, 13, 34, 5, -13, 3]
my_list1 = []
my_list2 = [1, 90, 2, 13, 34, 100, -13, 3]
max_value = max_integer(my_list)
max_value1 = max_integer(my_list1)
max_value2 = max_integer(my_list2)
max_value3 = max_integer()
print("Max: {}".format(max_value))
print("Max: {}".format(max_value1))
print("Max: {}".format(max_value2))
print("Max: {}".format(max_value3))
|
normal
|
{
"blob_id": "f5b74ca95cb368d70139b5d36e3c8d553b8c5393",
"index": 1393,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Max: {}'.format(max_value))\nprint('Max: {}'.format(max_value1))\nprint('Max: {}'.format(max_value2))\nprint('Max: {}'.format(max_value3))\n",
"step-3": "max_integer = __import__('9-max_integer').max_integer\nmy_list = [1, 90, 2, 13, 34, 5, -13, 3]\nmy_list1 = []\nmy_list2 = [1, 90, 2, 13, 34, 100, -13, 3]\nmax_value = max_integer(my_list)\nmax_value1 = max_integer(my_list1)\nmax_value2 = max_integer(my_list2)\nmax_value3 = max_integer()\nprint('Max: {}'.format(max_value))\nprint('Max: {}'.format(max_value1))\nprint('Max: {}'.format(max_value2))\nprint('Max: {}'.format(max_value3))\n",
"step-4": "#!/usr/bin/python3\nmax_integer = __import__('9-max_integer').max_integer\n\nmy_list = [1, 90, 2, 13, 34, 5, -13, 3]\nmy_list1 = []\nmy_list2 = [1, 90, 2, 13, 34, 100, -13, 3]\nmax_value = max_integer(my_list)\nmax_value1 = max_integer(my_list1)\nmax_value2 = max_integer(my_list2)\nmax_value3 = max_integer()\nprint(\"Max: {}\".format(max_value))\nprint(\"Max: {}\".format(max_value1))\nprint(\"Max: {}\".format(max_value2))\nprint(\"Max: {}\".format(max_value3))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import sqlite3 as db
os.system('clear')
persons = []
class Person:
def __init__(self, name, surname, job, salary):
self.name = name
self.surname = surname
self.job = job
self.salary = salary
def create(name):
conn = db.connect(name + '.db')
c = conn.cursor()
c.execute("""CREATE TABLE first(
id integer PRIMARY KEY AUTOINCREMENT,
name text,
surname text
)""")
c.execute("""CREATE TABLE second(
id integer PRIMARY KEY AUTOINCREMENT,
surname text,
job text,
salary integer,
FOREIGN KEY(id) REFERENCES first(id),
FOREIGN KEY(surname) REFERENCES first(surname)
)""")
conn.commit()
conn.close()
def database(s):
conn = db.connect(sqldb+'.db')
c = conn.cursor()
c.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.surname))
c.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (s.surname, s.job, s.salary))
conn.commit()
conn.close()
def insert():
name = input('Enter your name: ')
surname = input('Enter your surname: ')
confirm = input('Have you got a job? ')
if 'y' in confirm:
job = input('What kind of job you have? ')
salary = input('How much they pay for you? ')
surname = Person(name, surname, job, salary)
persons.append(surname)
database(surname)
else:
print('We need a humans with job, bye')
while True:
command = input(">> ")
if command == 'insert':
insert()
elif command == 'list':
for i in persons:
print(i.surname)
continue
elif command == 'create database':
sqldb = input('Enter the name of new database: ')
create(sqldb)
elif command == 'clear' or command == 'cls':
loc = os.getcwd()
if 'C:' in loc or 'D:' in loc:
os.system('cls')
else:
os.system('clear')
else:
print('No command found')
continue
|
normal
|
{
"blob_id": "7ff19ee35422395f78dca1e17a736df20a40ea98",
"index": 7569,
"step-1": "<mask token>\n\n\nclass Person:\n\n def __init__(self, name, surname, job, salary):\n self.name = name\n self.surname = surname\n self.job = job\n self.salary = salary\n\n\ndef create(name):\n conn = db.connect(name + '.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE first(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tname text,\n\t\t\tsurname text\n\t\t)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE second(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tsurname text,\n\t\t\tjob text,\n\t\t\tsalary integer,\n\t\t\tFOREIGN KEY(id) REFERENCES first(id),\n\t\t\tFOREIGN KEY(surname) REFERENCES first(surname)\n\t\t)\"\"\"\n )\n conn.commit()\n conn.close()\n\n\n<mask token>\n\n\ndef insert():\n name = input('Enter your name: ')\n surname = input('Enter your surname: ')\n confirm = input('Have you got a job? ')\n if 'y' in confirm:\n job = input('What kind of job you have? ')\n salary = input('How much they pay for you? ')\n surname = Person(name, surname, job, salary)\n persons.append(surname)\n database(surname)\n else:\n print('We need a humans with job, bye')\n\n\n<mask token>\n",
"step-2": "<mask token>\nos.system('clear')\n<mask token>\n\n\nclass Person:\n\n def __init__(self, name, surname, job, salary):\n self.name = name\n self.surname = surname\n self.job = job\n self.salary = salary\n\n\ndef create(name):\n conn = db.connect(name + '.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE first(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tname text,\n\t\t\tsurname text\n\t\t)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE second(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tsurname text,\n\t\t\tjob text,\n\t\t\tsalary integer,\n\t\t\tFOREIGN KEY(id) REFERENCES first(id),\n\t\t\tFOREIGN KEY(surname) REFERENCES first(surname)\n\t\t)\"\"\"\n )\n conn.commit()\n conn.close()\n\n\ndef database(s):\n conn = db.connect(sqldb + '.db')\n c = conn.cursor()\n c.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.\n surname))\n c.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (\n s.surname, s.job, s.salary))\n conn.commit()\n conn.close()\n\n\ndef insert():\n name = input('Enter your name: ')\n surname = input('Enter your surname: ')\n confirm = input('Have you got a job? ')\n if 'y' in confirm:\n job = input('What kind of job you have? ')\n salary = input('How much they pay for you? ')\n surname = Person(name, surname, job, salary)\n persons.append(surname)\n database(surname)\n else:\n print('We need a humans with job, bye')\n\n\nwhile True:\n command = input('>> ')\n if command == 'insert':\n insert()\n elif command == 'list':\n for i in persons:\n print(i.surname)\n continue\n elif command == 'create database':\n sqldb = input('Enter the name of new database: ')\n create(sqldb)\n elif command == 'clear' or command == 'cls':\n loc = os.getcwd()\n if 'C:' in loc or 'D:' in loc:\n os.system('cls')\n else:\n os.system('clear')\n else:\n print('No command found')\n continue\n",
"step-3": "<mask token>\nos.system('clear')\npersons = []\n\n\nclass Person:\n\n def __init__(self, name, surname, job, salary):\n self.name = name\n self.surname = surname\n self.job = job\n self.salary = salary\n\n\ndef create(name):\n conn = db.connect(name + '.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE first(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tname text,\n\t\t\tsurname text\n\t\t)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE second(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tsurname text,\n\t\t\tjob text,\n\t\t\tsalary integer,\n\t\t\tFOREIGN KEY(id) REFERENCES first(id),\n\t\t\tFOREIGN KEY(surname) REFERENCES first(surname)\n\t\t)\"\"\"\n )\n conn.commit()\n conn.close()\n\n\ndef database(s):\n conn = db.connect(sqldb + '.db')\n c = conn.cursor()\n c.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.\n surname))\n c.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (\n s.surname, s.job, s.salary))\n conn.commit()\n conn.close()\n\n\ndef insert():\n name = input('Enter your name: ')\n surname = input('Enter your surname: ')\n confirm = input('Have you got a job? ')\n if 'y' in confirm:\n job = input('What kind of job you have? ')\n salary = input('How much they pay for you? ')\n surname = Person(name, surname, job, salary)\n persons.append(surname)\n database(surname)\n else:\n print('We need a humans with job, bye')\n\n\nwhile True:\n command = input('>> ')\n if command == 'insert':\n insert()\n elif command == 'list':\n for i in persons:\n print(i.surname)\n continue\n elif command == 'create database':\n sqldb = input('Enter the name of new database: ')\n create(sqldb)\n elif command == 'clear' or command == 'cls':\n loc = os.getcwd()\n if 'C:' in loc or 'D:' in loc:\n os.system('cls')\n else:\n os.system('clear')\n else:\n print('No command found')\n continue\n",
"step-4": "import os\nimport sqlite3 as db\nos.system('clear')\npersons = []\n\n\nclass Person:\n\n def __init__(self, name, surname, job, salary):\n self.name = name\n self.surname = surname\n self.job = job\n self.salary = salary\n\n\ndef create(name):\n conn = db.connect(name + '.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE first(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tname text,\n\t\t\tsurname text\n\t\t)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE second(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tsurname text,\n\t\t\tjob text,\n\t\t\tsalary integer,\n\t\t\tFOREIGN KEY(id) REFERENCES first(id),\n\t\t\tFOREIGN KEY(surname) REFERENCES first(surname)\n\t\t)\"\"\"\n )\n conn.commit()\n conn.close()\n\n\ndef database(s):\n conn = db.connect(sqldb + '.db')\n c = conn.cursor()\n c.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.\n surname))\n c.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (\n s.surname, s.job, s.salary))\n conn.commit()\n conn.close()\n\n\ndef insert():\n name = input('Enter your name: ')\n surname = input('Enter your surname: ')\n confirm = input('Have you got a job? ')\n if 'y' in confirm:\n job = input('What kind of job you have? ')\n salary = input('How much they pay for you? ')\n surname = Person(name, surname, job, salary)\n persons.append(surname)\n database(surname)\n else:\n print('We need a humans with job, bye')\n\n\nwhile True:\n command = input('>> ')\n if command == 'insert':\n insert()\n elif command == 'list':\n for i in persons:\n print(i.surname)\n continue\n elif command == 'create database':\n sqldb = input('Enter the name of new database: ')\n create(sqldb)\n elif command == 'clear' or command == 'cls':\n loc = os.getcwd()\n if 'C:' in loc or 'D:' in loc:\n os.system('cls')\n else:\n os.system('clear')\n else:\n print('No command found')\n continue\n",
"step-5": "import os\nimport sqlite3 as db\n\nos.system('clear')\npersons = []\n\nclass Person:\n\tdef __init__(self, name, surname, job, salary):\n\t\tself.name = name\n\t\tself.surname = surname\n\t\tself.job = job\n\t\tself.salary = salary\n\ndef create(name):\n\tconn = db.connect(name + '.db')\n\tc = conn.cursor()\n\n\tc.execute(\"\"\"CREATE TABLE first(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tname text,\n\t\t\tsurname text\n\t\t)\"\"\")\n\n\tc.execute(\"\"\"CREATE TABLE second(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tsurname text,\n\t\t\tjob text,\n\t\t\tsalary integer,\n\t\t\tFOREIGN KEY(id) REFERENCES first(id),\n\t\t\tFOREIGN KEY(surname) REFERENCES first(surname)\n\t\t)\"\"\")\n\n\tconn.commit()\n\tconn.close()\t\n\ndef database(s):\n\tconn = db.connect(sqldb+'.db')\n\tc = conn.cursor()\n\tc.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.surname))\n\tc.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (s.surname, s.job, s.salary))\n\tconn.commit()\n\tconn.close()\n\ndef insert():\n\tname = input('Enter your name: ')\n\tsurname = input('Enter your surname: ')\n\tconfirm = input('Have you got a job? ')\n\tif 'y' in confirm:\n\t\tjob = input('What kind of job you have? ')\n\t\tsalary = input('How much they pay for you? ')\n\t\tsurname = Person(name, surname, job, salary)\n\t\tpersons.append(surname)\n\t\tdatabase(surname)\n\telse:\n\t\tprint('We need a humans with job, bye')\n\n\nwhile True:\n\tcommand = input(\">> \")\n\tif command == 'insert':\n\t\tinsert()\n\telif command == 'list':\n\t\tfor i in persons:\n\t\t\tprint(i.surname)\n\t\tcontinue\n\telif command == 'create database':\n\t\tsqldb = input('Enter the name of new database: ')\n\t\tcreate(sqldb)\n\telif command == 'clear' or command == 'cls':\n\t\tloc = os.getcwd()\n\t\tif 'C:' in loc or 'D:' in loc:\n\t\t\tos.system('cls')\n\t\telse:\n\t\t\tos.system('clear')\n\telse:\n\t\tprint('No command found')\n\t\tcontinue",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
from typing import Dict, Any
from urllib import request
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from .models import Product
from cart.forms import CartAddProductForm
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from .forms import UserForm, UserLogInForm
from django.views import generic
from django.views.generic import View
def product_list(request):
products = Product.objects.filter(available=True)
context = {'products': products,
'user': request.user}
return render(request, 'shop/product/list.html', context)
def product_detail(request, id, slug):
product = get_object_or_404(Product, id=id, slug=slug, available=True)
cart_product_form = CartAddProductForm()
context = {'product': product,
'cart_product_form': cart_product_form}
return render(request, 'shop/product/detail.html', context)
class UserFormView(View):
form_class = UserForm
template_name = 'shop/signup.html'
# display blank form
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
# process form data
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
#print(request.user.is_authenticated())
return redirect('/shop/')
return render(request, self.template_name, {'form': form})
def user_login(request):
context = {
'form': UserLogInForm
}
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user:
login(request, user)
return redirect('/shop/')
else:
context['error'] = "Provide valid credentials"
return render(request, 'shop/login.html', context)
else:
return render(request, 'shop/login.html', context)
def user_logout(request):
if request.method == 'POST':
logout(request)
return render(request, "shop/login.html")
|
normal
|
{
"blob_id": "1d72a9882aea1e0f808969828ed2e69ecd79ac71",
"index": 7522,
"step-1": "<mask token>\n\n\nclass UserFormView(View):\n form_class = UserForm\n template_name = 'shop/signup.html'\n\n def get(self, request):\n form = self.form_class(None)\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user.set_password(password)\n user.save()\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('/shop/')\n return render(request, self.template_name, {'form': form})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef product_detail(request, id, slug):\n product = get_object_or_404(Product, id=id, slug=slug, available=True)\n cart_product_form = CartAddProductForm()\n context = {'product': product, 'cart_product_form': cart_product_form}\n return render(request, 'shop/product/detail.html', context)\n\n\nclass UserFormView(View):\n form_class = UserForm\n template_name = 'shop/signup.html'\n\n def get(self, request):\n form = self.form_class(None)\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user.set_password(password)\n user.save()\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('/shop/')\n return render(request, self.template_name, {'form': form})\n\n\n<mask token>\n\n\ndef user_logout(request):\n if request.method == 'POST':\n logout(request)\n return render(request, 'shop/login.html')\n",
"step-3": "<mask token>\n\n\ndef product_list(request):\n products = Product.objects.filter(available=True)\n context = {'products': products, 'user': request.user}\n return render(request, 'shop/product/list.html', context)\n\n\ndef product_detail(request, id, slug):\n product = get_object_or_404(Product, id=id, slug=slug, available=True)\n cart_product_form = CartAddProductForm()\n context = {'product': product, 'cart_product_form': cart_product_form}\n return render(request, 'shop/product/detail.html', context)\n\n\nclass UserFormView(View):\n form_class = UserForm\n template_name = 'shop/signup.html'\n\n def get(self, request):\n form = self.form_class(None)\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user.set_password(password)\n user.save()\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('/shop/')\n return render(request, self.template_name, {'form': form})\n\n\ndef user_login(request):\n context = {'form': UserLogInForm}\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n return redirect('/shop/')\n else:\n context['error'] = 'Provide valid credentials'\n return render(request, 'shop/login.html', context)\n else:\n return render(request, 'shop/login.html', context)\n\n\ndef user_logout(request):\n if request.method == 'POST':\n logout(request)\n return render(request, 'shop/login.html')\n",
"step-4": "from typing import Dict, Any\nfrom urllib import request\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse\nfrom .models import Product\nfrom cart.forms import CartAddProductForm\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom .forms import UserForm, UserLogInForm\nfrom django.views import generic\nfrom django.views.generic import View\n\n\ndef product_list(request):\n products = Product.objects.filter(available=True)\n context = {'products': products, 'user': request.user}\n return render(request, 'shop/product/list.html', context)\n\n\ndef product_detail(request, id, slug):\n product = get_object_or_404(Product, id=id, slug=slug, available=True)\n cart_product_form = CartAddProductForm()\n context = {'product': product, 'cart_product_form': cart_product_form}\n return render(request, 'shop/product/detail.html', context)\n\n\nclass UserFormView(View):\n form_class = UserForm\n template_name = 'shop/signup.html'\n\n def get(self, request):\n form = self.form_class(None)\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user.set_password(password)\n user.save()\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('/shop/')\n return render(request, self.template_name, {'form': form})\n\n\ndef user_login(request):\n context = {'form': UserLogInForm}\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n return redirect('/shop/')\n else:\n context['error'] = 'Provide valid credentials'\n return render(request, 'shop/login.html', context)\n else:\n return render(request, 'shop/login.html', context)\n\n\ndef user_logout(request):\n if request.method == 'POST':\n logout(request)\n return render(request, 'shop/login.html')\n",
"step-5": "from typing import Dict, Any\nfrom urllib import request\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse\n\nfrom .models import Product\nfrom cart.forms import CartAddProductForm\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom .forms import UserForm, UserLogInForm\nfrom django.views import generic\nfrom django.views.generic import View\n\n\ndef product_list(request):\n products = Product.objects.filter(available=True)\n\n context = {'products': products,\n 'user': request.user}\n return render(request, 'shop/product/list.html', context)\n\n\ndef product_detail(request, id, slug):\n product = get_object_or_404(Product, id=id, slug=slug, available=True)\n cart_product_form = CartAddProductForm()\n context = {'product': product,\n 'cart_product_form': cart_product_form}\n return render(request, 'shop/product/detail.html', context)\n\n\nclass UserFormView(View):\n form_class = UserForm\n template_name = 'shop/signup.html'\n\n # display blank form\n def get(self, request):\n form = self.form_class(None)\n return render(request, self.template_name, {'form': form})\n\n # process form data\n def post(self, request):\n form = self.form_class(request.POST)\n\n if form.is_valid():\n\n user = form.save(commit=False)\n\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user.set_password(password)\n user.save()\n\n user = authenticate(username=username, password=password)\n\n if user is not None:\n if user.is_active:\n login(request, user)\n #print(request.user.is_authenticated())\n return redirect('/shop/')\n\n return render(request, self.template_name, {'form': form})\n\n\ndef user_login(request):\n context = {\n 'form': UserLogInForm\n }\n if request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n return redirect('/shop/')\n else:\n context['error'] = \"Provide valid credentials\"\n return render(request, 'shop/login.html', context)\n else:\n return render(request, 'shop/login.html', context)\n\n\ndef user_logout(request):\n if request.method == 'POST':\n logout(request)\n\n return render(request, \"shop/login.html\")\n\n\n\n\n\n\n",
"step-ids": [
4,
6,
8,
9,
10
]
}
|
[
4,
6,
8,
9,
10
] |
import numpy as np
import math
a = [
[0.54, -0.04, 0.10],
[-0.04, 0.50, 0.12],
[0.10, 0.12, 0.71]
]
b = [0.33, -0.05, 0.28]
# Метод Гаусса
def gauss(left, right, prec=3):
# Создаем расширенную матрицу
arr = np.concatenate((np.array(left), np.array([right]).T), axis=1)
print('\nИсходная матрица:')
print(arr)
# Проверка совместности
if np.linalg.matrix_rank(left) != np.linalg.matrix_rank(arr):
return 'Решений нет!'
# Приводим к ступенчатому виду
for j in range(len(arr)):
# Находим ведущий элемент
lead = j
for i in range(j, len(arr)):
if (arr[i][j] > arr[lead][j] and arr[i][j] != 0):
lead = i
# Если все элементы строки - 0, пропускаем итерацию
if arr[lead][j] == 0:
continue
# Выносим строку с ведущим элементом вверх
arr[[j, lead]] = arr[[lead, j]]
# Обнуляем нижестоящие элементы
arr[j] = arr[j] / arr[j][j]
for i in range(j + 1, len(arr)):
arr[i] = arr[i] - arr[j] * arr[i][j]
print('\nШаг ', j)
print(arr)
# Приводим матрицу к единичной
for j in reversed(range(len(arr))):
for i in reversed(range(j)):
arr[i] = arr[i] - arr[j] * arr[i][j]
print('\nМатрица в единичном виде')
print(arr)
# Формируем и возвращаем результат
answer = {('x' + str(i + 1))
: format(arr[:, -1][i], f'.{prec}f') for i in range(len(arr))}
return answer
def norm_1(matrix):
data = np.array(matrix)
return max([np.sum(np.absolute(data[i])) for i in range(len(data))])
def norm_2(matrix):
data = np.array(matrix).T
data = np.array(data)
return max([np.sum(np.absolute(data[i])) for i in range(len(data))])
def norm_3(matrix):
data = np.square(np.array(matrix).flatten())
return math.sqrt(np.sum(data))
def converges(matrix):
return norm_1(matrix) < 1 or norm_2(matrix) < 1 or norm_3(matrix) < 1
# Метод простой итерации
def iteration(left, right, eps=0.0001, prec=5):
# Формируем матрицу Альфа
alpha = [[(-left[i][j] / left[i][i]) if (i != j)
else 0 for j in range(len(left))] for i in range(len(left[0]))]
# Формируем вектор Бета
beta = np.array([right[i] / left[i][i] for i in range(len(left))])
# Задаем текущую точность
norm_alpha = min(norm_1(alpha), norm_2(alpha), norm_3(alpha))
norm_beta = norm_1(beta)
cur_eps = norm_alpha / (1 - norm_alpha) * norm_beta
# Если решение сходится
if converges(alpha):
# Выбираем за начальное приближение вектор Бэта
x = np.copy(beta)
it = 0
# Выходим из цикла при достижении указанной точности
while cur_eps > eps:
# Запоминаем предыдущее значение
prev_x = np.copy(x)
# Считаем следующее приблеженное значение
x = np.dot(alpha, prev_x) + beta
# Считаем точность
cur_eps = cur_eps * norm_alpha
it += 1
print('Итерация', it, ': X =', x)
# Формируем и возвращаем результат
answer = {('x' + str(i + 1))
: format(x[i], f'.{prec}f') for i in range(len(x))}
return answer
# Если решение не сходится - ошибка
else:
return 'Решение не сходится!'
print('Метод Гаусса')
res = gauss(a, b, prec=5)
print('Решение:', res)
print('\nМетод простой итерации')
res = iteration(a, b, eps=0.01, prec=5)
print('Решение:', res)
|
normal
|
{
"blob_id": "bd0530b6f3f7b1a5d72a5b11803d5bb82f85105d",
"index": 6587,
"step-1": "<mask token>\n\n\ndef gauss(left, right, prec=3):\n arr = np.concatenate((np.array(left), np.array([right]).T), axis=1)\n print('\\nИсходная матрица:')\n print(arr)\n if np.linalg.matrix_rank(left) != np.linalg.matrix_rank(arr):\n return 'Решений нет!'\n for j in range(len(arr)):\n lead = j\n for i in range(j, len(arr)):\n if arr[i][j] > arr[lead][j] and arr[i][j] != 0:\n lead = i\n if arr[lead][j] == 0:\n continue\n arr[[j, lead]] = arr[[lead, j]]\n arr[j] = arr[j] / arr[j][j]\n for i in range(j + 1, len(arr)):\n arr[i] = arr[i] - arr[j] * arr[i][j]\n print('\\nШаг ', j)\n print(arr)\n for j in reversed(range(len(arr))):\n for i in reversed(range(j)):\n arr[i] = arr[i] - arr[j] * arr[i][j]\n print('\\nМатрица в единичном виде')\n print(arr)\n answer = {('x' + str(i + 1)): format(arr[:, -1][i], f'.{prec}f') for i in\n range(len(arr))}\n return answer\n\n\n<mask token>\n\n\ndef norm_2(matrix):\n data = np.array(matrix).T\n data = np.array(data)\n return max([np.sum(np.absolute(data[i])) for i in range(len(data))])\n\n\ndef norm_3(matrix):\n data = np.square(np.array(matrix).flatten())\n return math.sqrt(np.sum(data))\n\n\n<mask token>\n\n\ndef iteration(left, right, eps=0.0001, prec=5):\n alpha = [[(-left[i][j] / left[i][i] if i != j else 0) for j in range(\n len(left))] for i in range(len(left[0]))]\n beta = np.array([(right[i] / left[i][i]) for i in range(len(left))])\n norm_alpha = min(norm_1(alpha), norm_2(alpha), norm_3(alpha))\n norm_beta = norm_1(beta)\n cur_eps = norm_alpha / (1 - norm_alpha) * norm_beta\n if converges(alpha):\n x = np.copy(beta)\n it = 0\n while cur_eps > eps:\n prev_x = np.copy(x)\n x = np.dot(alpha, prev_x) + beta\n cur_eps = cur_eps * norm_alpha\n it += 1\n print('Итерация', it, ': X =', x)\n answer = {('x' + str(i + 1)): format(x[i], f'.{prec}f') for i in\n range(len(x))}\n return answer\n else:\n return 'Решение не сходится!'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gauss(left, right, prec=3):\n arr = np.concatenate((np.array(left), np.array([right]).T), axis=1)\n print('\\nИсходная матрица:')\n print(arr)\n if np.linalg.matrix_rank(left) != np.linalg.matrix_rank(arr):\n return 'Решений нет!'\n for j in range(len(arr)):\n lead = j\n for i in range(j, len(arr)):\n if arr[i][j] > arr[lead][j] and arr[i][j] != 0:\n lead = i\n if arr[lead][j] == 0:\n continue\n arr[[j, lead]] = arr[[lead, j]]\n arr[j] = arr[j] / arr[j][j]\n for i in range(j + 1, len(arr)):\n arr[i] = arr[i] - arr[j] * arr[i][j]\n print('\\nШаг ', j)\n print(arr)\n for j in reversed(range(len(arr))):\n for i in reversed(range(j)):\n arr[i] = arr[i] - arr[j] * arr[i][j]\n print('\\nМатрица в единичном виде')\n print(arr)\n answer = {('x' + str(i + 1)): format(arr[:, -1][i], f'.{prec}f') for i in\n range(len(arr))}\n return answer\n\n\ndef norm_1(matrix):\n data = np.array(matrix)\n return max([np.sum(np.absolute(data[i])) for i in range(len(data))])\n\n\ndef norm_2(matrix):\n data = np.array(matrix).T\n data = np.array(data)\n return max([np.sum(np.absolute(data[i])) for i in range(len(data))])\n\n\ndef norm_3(matrix):\n data = np.square(np.array(matrix).flatten())\n return math.sqrt(np.sum(data))\n\n\ndef converges(matrix):\n return norm_1(matrix) < 1 or norm_2(matrix) < 1 or norm_3(matrix) < 1\n\n\ndef iteration(left, right, eps=0.0001, prec=5):\n alpha = [[(-left[i][j] / left[i][i] if i != j else 0) for j in range(\n len(left))] for i in range(len(left[0]))]\n beta = np.array([(right[i] / left[i][i]) for i in range(len(left))])\n norm_alpha = min(norm_1(alpha), norm_2(alpha), norm_3(alpha))\n norm_beta = norm_1(beta)\n cur_eps = norm_alpha / (1 - norm_alpha) * norm_beta\n if converges(alpha):\n x = np.copy(beta)\n it = 0\n while cur_eps > eps:\n prev_x = np.copy(x)\n x = np.dot(alpha, prev_x) + beta\n cur_eps = cur_eps * norm_alpha\n it += 1\n print('Итерация', it, ': X =', x)\n answer = {('x' + str(i + 1)): format(x[i], f'.{prec}f') for i in\n range(len(x))}\n return answer\n else:\n return 'Решение не сходится!'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gauss(left, right, prec=3):\n arr = np.concatenate((np.array(left), np.array([right]).T), axis=1)\n print('\\nИсходная матрица:')\n print(arr)\n if np.linalg.matrix_rank(left) != np.linalg.matrix_rank(arr):\n return 'Решений нет!'\n for j in range(len(arr)):\n lead = j\n for i in range(j, len(arr)):\n if arr[i][j] > arr[lead][j] and arr[i][j] != 0:\n lead = i\n if arr[lead][j] == 0:\n continue\n arr[[j, lead]] = arr[[lead, j]]\n arr[j] = arr[j] / arr[j][j]\n for i in range(j + 1, len(arr)):\n arr[i] = arr[i] - arr[j] * arr[i][j]\n print('\\nШаг ', j)\n print(arr)\n for j in reversed(range(len(arr))):\n for i in reversed(range(j)):\n arr[i] = arr[i] - arr[j] * arr[i][j]\n print('\\nМатрица в единичном виде')\n print(arr)\n answer = {('x' + str(i + 1)): format(arr[:, -1][i], f'.{prec}f') for i in\n range(len(arr))}\n return answer\n\n\ndef norm_1(matrix):\n data = np.array(matrix)\n return max([np.sum(np.absolute(data[i])) for i in range(len(data))])\n\n\ndef norm_2(matrix):\n data = np.array(matrix).T\n data = np.array(data)\n return max([np.sum(np.absolute(data[i])) for i in range(len(data))])\n\n\ndef norm_3(matrix):\n data = np.square(np.array(matrix).flatten())\n return math.sqrt(np.sum(data))\n\n\ndef converges(matrix):\n return norm_1(matrix) < 1 or norm_2(matrix) < 1 or norm_3(matrix) < 1\n\n\ndef iteration(left, right, eps=0.0001, prec=5):\n alpha = [[(-left[i][j] / left[i][i] if i != j else 0) for j in range(\n len(left))] for i in range(len(left[0]))]\n beta = np.array([(right[i] / left[i][i]) for i in range(len(left))])\n norm_alpha = min(norm_1(alpha), norm_2(alpha), norm_3(alpha))\n norm_beta = norm_1(beta)\n cur_eps = norm_alpha / (1 - norm_alpha) * norm_beta\n if converges(alpha):\n x = np.copy(beta)\n it = 0\n while cur_eps > eps:\n prev_x = np.copy(x)\n x = np.dot(alpha, prev_x) + beta\n cur_eps = cur_eps * norm_alpha\n it += 1\n print('Итерация', it, ': X =', x)\n answer = {('x' + str(i + 1)): format(x[i], f'.{prec}f') for i in\n range(len(x))}\n return answer\n else:\n return 'Решение не сходится!'\n\n\nprint('Метод Гаусса')\n<mask token>\nprint('Решение:', res)\nprint(\"\"\"\nМетод простой итерации\"\"\")\n<mask token>\nprint('Решение:', res)\n",
"step-4": "import numpy as np\nimport math\na = [[0.54, -0.04, 0.1], [-0.04, 0.5, 0.12], [0.1, 0.12, 0.71]]\nb = [0.33, -0.05, 0.28]\n\n\ndef gauss(left, right, prec=3):\n arr = np.concatenate((np.array(left), np.array([right]).T), axis=1)\n print('\\nИсходная матрица:')\n print(arr)\n if np.linalg.matrix_rank(left) != np.linalg.matrix_rank(arr):\n return 'Решений нет!'\n for j in range(len(arr)):\n lead = j\n for i in range(j, len(arr)):\n if arr[i][j] > arr[lead][j] and arr[i][j] != 0:\n lead = i\n if arr[lead][j] == 0:\n continue\n arr[[j, lead]] = arr[[lead, j]]\n arr[j] = arr[j] / arr[j][j]\n for i in range(j + 1, len(arr)):\n arr[i] = arr[i] - arr[j] * arr[i][j]\n print('\\nШаг ', j)\n print(arr)\n for j in reversed(range(len(arr))):\n for i in reversed(range(j)):\n arr[i] = arr[i] - arr[j] * arr[i][j]\n print('\\nМатрица в единичном виде')\n print(arr)\n answer = {('x' + str(i + 1)): format(arr[:, -1][i], f'.{prec}f') for i in\n range(len(arr))}\n return answer\n\n\ndef norm_1(matrix):\n data = np.array(matrix)\n return max([np.sum(np.absolute(data[i])) for i in range(len(data))])\n\n\ndef norm_2(matrix):\n data = np.array(matrix).T\n data = np.array(data)\n return max([np.sum(np.absolute(data[i])) for i in range(len(data))])\n\n\ndef norm_3(matrix):\n data = np.square(np.array(matrix).flatten())\n return math.sqrt(np.sum(data))\n\n\ndef converges(matrix):\n return norm_1(matrix) < 1 or norm_2(matrix) < 1 or norm_3(matrix) < 1\n\n\ndef iteration(left, right, eps=0.0001, prec=5):\n alpha = [[(-left[i][j] / left[i][i] if i != j else 0) for j in range(\n len(left))] for i in range(len(left[0]))]\n beta = np.array([(right[i] / left[i][i]) for i in range(len(left))])\n norm_alpha = min(norm_1(alpha), norm_2(alpha), norm_3(alpha))\n norm_beta = norm_1(beta)\n cur_eps = norm_alpha / (1 - norm_alpha) * norm_beta\n if converges(alpha):\n x = np.copy(beta)\n it = 0\n while cur_eps > eps:\n prev_x = np.copy(x)\n x = np.dot(alpha, prev_x) + beta\n cur_eps = cur_eps * norm_alpha\n it += 1\n print('Итерация', it, ': X =', x)\n answer = {('x' + str(i + 1)): format(x[i], f'.{prec}f') for i in\n range(len(x))}\n return answer\n else:\n return 'Решение не сходится!'\n\n\nprint('Метод Гаусса')\nres = gauss(a, b, prec=5)\nprint('Решение:', res)\nprint(\"\"\"\nМетод простой итерации\"\"\")\nres = iteration(a, b, eps=0.01, prec=5)\nprint('Решение:', res)\n",
"step-5": "import numpy as np\nimport math\n\n\na = [\n [0.54, -0.04, 0.10],\n [-0.04, 0.50, 0.12],\n [0.10, 0.12, 0.71]\n]\nb = [0.33, -0.05, 0.28]\n\n# Метод Гаусса\ndef gauss(left, right, prec=3):\n # Создаем расширенную матрицу\n arr = np.concatenate((np.array(left), np.array([right]).T), axis=1)\n print('\\nИсходная матрица:')\n print(arr)\n # Проверка совместности\n if np.linalg.matrix_rank(left) != np.linalg.matrix_rank(arr):\n return 'Решений нет!'\n # Приводим к ступенчатому виду\n for j in range(len(arr)):\n # Находим ведущий элемент\n lead = j\n for i in range(j, len(arr)):\n if (arr[i][j] > arr[lead][j] and arr[i][j] != 0):\n lead = i\n # Если все элементы строки - 0, пропускаем итерацию\n if arr[lead][j] == 0:\n continue\n # Выносим строку с ведущим элементом вверх\n arr[[j, lead]] = arr[[lead, j]]\n # Обнуляем нижестоящие элементы\n arr[j] = arr[j] / arr[j][j]\n for i in range(j + 1, len(arr)):\n arr[i] = arr[i] - arr[j] * arr[i][j]\n print('\\nШаг ', j)\n print(arr)\n # Приводим матрицу к единичной\n for j in reversed(range(len(arr))):\n for i in reversed(range(j)):\n arr[i] = arr[i] - arr[j] * arr[i][j]\n print('\\nМатрица в единичном виде')\n print(arr)\n # Формируем и возвращаем результат\n answer = {('x' + str(i + 1))\n : format(arr[:, -1][i], f'.{prec}f') for i in range(len(arr))}\n return answer\n\n\ndef norm_1(matrix):\n data = np.array(matrix)\n return max([np.sum(np.absolute(data[i])) for i in range(len(data))])\n\n\ndef norm_2(matrix):\n data = np.array(matrix).T\n data = np.array(data)\n return max([np.sum(np.absolute(data[i])) for i in range(len(data))])\n\n\ndef norm_3(matrix):\n data = np.square(np.array(matrix).flatten())\n return math.sqrt(np.sum(data))\n\n\ndef converges(matrix):\n return norm_1(matrix) < 1 or norm_2(matrix) < 1 or norm_3(matrix) < 1\n\n# Метод простой итерации\ndef iteration(left, right, eps=0.0001, prec=5):\n # Формируем матрицу Альфа\n alpha = [[(-left[i][j] / left[i][i]) if (i != j)\n else 0 for j in range(len(left))] for i in range(len(left[0]))]\n # Формируем вектор Бета\n beta = np.array([right[i] / left[i][i] for i in range(len(left))])\n # Задаем текущую точность\n norm_alpha = min(norm_1(alpha), norm_2(alpha), norm_3(alpha))\n norm_beta = norm_1(beta)\n cur_eps = norm_alpha / (1 - norm_alpha) * norm_beta\n # Если решение сходится\n if converges(alpha):\n # Выбираем за начальное приближение вектор Бэта\n x = np.copy(beta)\n it = 0\n # Выходим из цикла при достижении указанной точности\n while cur_eps > eps:\n # Запоминаем предыдущее значение\n prev_x = np.copy(x)\n # Считаем следующее приблеженное значение\n x = np.dot(alpha, prev_x) + beta\n # Считаем точность\n cur_eps = cur_eps * norm_alpha\n it += 1\n print('Итерация', it, ': X =', x)\n # Формируем и возвращаем результат\n answer = {('x' + str(i + 1))\n : format(x[i], f'.{prec}f') for i in range(len(x))}\n return answer\n # Если решение не сходится - ошибка\n else:\n return 'Решение не сходится!'\n\nprint('Метод Гаусса')\nres = gauss(a, b, prec=5)\nprint('Решение:', res)\nprint('\\nМетод простой итерации')\nres = iteration(a, b, eps=0.01, prec=5)\nprint('Решение:', res)\n\n",
"step-ids": [
4,
6,
7,
9,
10
]
}
|
[
4,
6,
7,
9,
10
] |
# 6. Evaluate Classifier: you can use any metric you choose for this assignment
# (accuracy is the easiest one). Feel free to evaluate it on the same data you
# built the model on (this is not a good idea in general but for this assignment,
# it is fine). We haven't covered models and evaluation yet, so don't worry about
# creating validation sets or cross-validation.
import pandas as pd
import matplotlib.pyplot as plt
import pylab as pl
from sklearn.metrics import roc_curve, auc, classification_report, confusion_matrix
# credits to https://github.com/yhat/DataGotham2013/blob/master/notebooks/8%20-%20Fitting%20and%20Evaluating%20Your%20Model.ipynb
def evaluate(model, X_te, y_te):
'''
Given the model and independent and dependent testing data,
print out statements that evaluate classifier
'''
probs = model.predict_proba(X_te)
plt.hist(probs[:,1])
plt.xlabel('Likelihood of Significant Financial')
plt.ylabel('Frequency')
# We should also look at Accuracy
print("Accuracy = " + str(model.score(X_te, y_te)))
# Finally -- Precision & Recall
y_hat = model.predict(X_te)
print(classification_report(y_te, y_hat, labels=[0, 1]))
y_hat = model.predict(X_te)
confusion_matrix = pd.crosstab(y_hat,
y_te,
rownames=["Actual"],
colnames=["Predicted"])
print(confusion_matrix)
def plot_roc(probs, y_te):
'''
Plots ROC curve.
'''
plt.figure()
fpr, tpr, thresholds = roc_curve(y_te, probs)
roc_auc = auc(fpr, tpr)
pl.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
pl.plot([0, 1], [0, 1], 'k--')
pl.xlim([0.0, 1.05])
pl.ylim([0.0, 1.05])
pl.xlabel('False Positive Rate')
pl.ylabel('True Positive Rate')
pl.title("ROC Curve")
pl.legend(loc="lower right")
pl.show()
|
normal
|
{
"blob_id": "62de629d8f28435ea8dc3dc093cac95e7cedf128",
"index": 7859,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef evaluate(model, X_te, y_te):\n \"\"\"\n Given the model and independent and dependent testing data,\n print out statements that evaluate classifier\n \"\"\"\n probs = model.predict_proba(X_te)\n plt.hist(probs[:, 1])\n plt.xlabel('Likelihood of Significant Financial')\n plt.ylabel('Frequency')\n print('Accuracy = ' + str(model.score(X_te, y_te)))\n y_hat = model.predict(X_te)\n print(classification_report(y_te, y_hat, labels=[0, 1]))\n y_hat = model.predict(X_te)\n confusion_matrix = pd.crosstab(y_hat, y_te, rownames=['Actual'],\n colnames=['Predicted'])\n print(confusion_matrix)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef evaluate(model, X_te, y_te):\n \"\"\"\n Given the model and independent and dependent testing data,\n print out statements that evaluate classifier\n \"\"\"\n probs = model.predict_proba(X_te)\n plt.hist(probs[:, 1])\n plt.xlabel('Likelihood of Significant Financial')\n plt.ylabel('Frequency')\n print('Accuracy = ' + str(model.score(X_te, y_te)))\n y_hat = model.predict(X_te)\n print(classification_report(y_te, y_hat, labels=[0, 1]))\n y_hat = model.predict(X_te)\n confusion_matrix = pd.crosstab(y_hat, y_te, rownames=['Actual'],\n colnames=['Predicted'])\n print(confusion_matrix)\n\n\ndef plot_roc(probs, y_te):\n \"\"\"\n Plots ROC curve.\n \"\"\"\n plt.figure()\n fpr, tpr, thresholds = roc_curve(y_te, probs)\n roc_auc = auc(fpr, tpr)\n pl.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n pl.plot([0, 1], [0, 1], 'k--')\n pl.xlim([0.0, 1.05])\n pl.ylim([0.0, 1.05])\n pl.xlabel('False Positive Rate')\n pl.ylabel('True Positive Rate')\n pl.title('ROC Curve')\n pl.legend(loc='lower right')\n pl.show()\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport pylab as pl\nfrom sklearn.metrics import roc_curve, auc, classification_report, confusion_matrix\n\n\ndef evaluate(model, X_te, y_te):\n \"\"\"\n Given the model and independent and dependent testing data,\n print out statements that evaluate classifier\n \"\"\"\n probs = model.predict_proba(X_te)\n plt.hist(probs[:, 1])\n plt.xlabel('Likelihood of Significant Financial')\n plt.ylabel('Frequency')\n print('Accuracy = ' + str(model.score(X_te, y_te)))\n y_hat = model.predict(X_te)\n print(classification_report(y_te, y_hat, labels=[0, 1]))\n y_hat = model.predict(X_te)\n confusion_matrix = pd.crosstab(y_hat, y_te, rownames=['Actual'],\n colnames=['Predicted'])\n print(confusion_matrix)\n\n\ndef plot_roc(probs, y_te):\n \"\"\"\n Plots ROC curve.\n \"\"\"\n plt.figure()\n fpr, tpr, thresholds = roc_curve(y_te, probs)\n roc_auc = auc(fpr, tpr)\n pl.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n pl.plot([0, 1], [0, 1], 'k--')\n pl.xlim([0.0, 1.05])\n pl.ylim([0.0, 1.05])\n pl.xlabel('False Positive Rate')\n pl.ylabel('True Positive Rate')\n pl.title('ROC Curve')\n pl.legend(loc='lower right')\n pl.show()\n",
"step-5": "# 6. Evaluate Classifier: you can use any metric you choose for this assignment \n# (accuracy is the easiest one). Feel free to evaluate it on the same data you \n# built the model on (this is not a good idea in general but for this assignment, \n# it is fine). We haven't covered models and evaluation yet, so don't worry about \n# creating validation sets or cross-validation. \n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pylab as pl\nfrom sklearn.metrics import roc_curve, auc, classification_report, confusion_matrix\n\n# credits to https://github.com/yhat/DataGotham2013/blob/master/notebooks/8%20-%20Fitting%20and%20Evaluating%20Your%20Model.ipynb\n\ndef evaluate(model, X_te, y_te):\n '''\n Given the model and independent and dependent testing data,\n print out statements that evaluate classifier\n '''\n probs = model.predict_proba(X_te)\n \n plt.hist(probs[:,1])\n plt.xlabel('Likelihood of Significant Financial')\n plt.ylabel('Frequency')\n\n # We should also look at Accuracy\n print(\"Accuracy = \" + str(model.score(X_te, y_te)))\n\n # Finally -- Precision & Recall\n y_hat = model.predict(X_te)\n print(classification_report(y_te, y_hat, labels=[0, 1]))\n \n y_hat = model.predict(X_te) \n confusion_matrix = pd.crosstab(y_hat, \n y_te, \n rownames=[\"Actual\"], \n colnames=[\"Predicted\"])\n print(confusion_matrix)\n\ndef plot_roc(probs, y_te):\n '''\n Plots ROC curve.\n '''\n plt.figure()\n fpr, tpr, thresholds = roc_curve(y_te, probs)\n roc_auc = auc(fpr, tpr)\n pl.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n pl.plot([0, 1], [0, 1], 'k--')\n pl.xlim([0.0, 1.05])\n pl.ylim([0.0, 1.05])\n pl.xlabel('False Positive Rate')\n pl.ylabel('True Positive Rate')\n pl.title(\"ROC Curve\")\n pl.legend(loc=\"lower right\")\n pl.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class ModelInfo:
def __init__(self, name: str, path: str, filter: str):
self.name: str = name
self.path: str = path
self.filter: str = filter
|
normal
|
{
"blob_id": "def089c2749444797ac3079809c082dacab08554",
"index": 1167,
"step-1": "<mask token>\n",
"step-2": "class ModelInfo:\n <mask token>\n",
"step-3": "class ModelInfo:\n\n def __init__(self, name: str, path: str, filter: str):\n self.name: str = name\n self.path: str = path\n self.filter: str = filter\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
import tensorflow as tf
from tfrecords_handler.moving_window.tfrecord_mean_reader import TFRecordReader
from configs.global_configs import training_data_configs
class StackingModelTester:
def __init__(self, **kwargs):
self.__use_bias = kwargs["use_bias"]
self.__use_peepholes = kwargs["use_peepholes"]
self.__input_size = kwargs["input_size"]
self.__output_size = kwargs["output_size"]
self.__binary_train_file_path = kwargs["binary_train_file_path"]
self.__binary_test_file_path = kwargs["binary_test_file_path"]
self.__seed = kwargs["seed"]
self.__cell_type = kwargs["cell_type"]
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
def __l2_loss(selfself, z, t):
loss = tf.losses.mean_squared_error(labels=t, predictions=z)
return loss
# Training the time series
def test_model(self, **kwargs):
# extract the parameters from the kwargs
num_hidden_layers = kwargs['num_hidden_layers']
cell_dimension = kwargs['cell_dimension']
minibatch_size = kwargs['minibatch_size']
max_epoch_size = kwargs['max_epoch_size']
max_num_epochs = kwargs['max_num_epochs']
l2_regularization = kwargs['l2_regularization']
gaussian_noise_stdev = kwargs['gaussian_noise_stdev']
optimizer_fn = kwargs['optimizer_fn']
random_normal_initializer_stdev = kwargs['random_normal_initializer_stdev']
# reset the tensorflow graph
tf.reset_default_graph()
tf.set_random_seed(self.__seed)
# declare the input and output placeholders
input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__input_size])
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=gaussian_noise_stdev, dtype=tf.float32)
training_input = input + noise
testing_input = input
# output format [batch_size, sequence_length, dimension]
true_output = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__output_size])
sequence_lengths = tf.placeholder(dtype=tf.int64, shape=[None])
weight_initializer = tf.truncated_normal_initializer(stddev=random_normal_initializer_stdev)
# RNN with the layer of cells
def cell():
if self.__cell_type == "LSTM":
cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension), use_peepholes=self.__use_peepholes,
initializer=weight_initializer)
elif self.__cell_type == "GRU":
cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension), kernel_initializer=weight_initializer)
elif self.__cell_type == "RNN":
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(cell_dimension))
return cell
multi_layered_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell() for _ in range(int(num_hidden_layers))])
with tf.variable_scope('train_scope') as train_scope:
training_rnn_outputs, training_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,
inputs=training_input,
sequence_length=sequence_lengths,
dtype=tf.float32)
# connect the dense layer to the RNN
training_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=training_rnn_outputs, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer')
with tf.variable_scope(train_scope, reuse=tf.AUTO_REUSE) as inference_scope:
inference_rnn_outputs, inference_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,
inputs=testing_input,
sequence_length=sequence_lengths,
dtype=tf.float32)
# connect the dense layer to the RNN
inference_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=inference_rnn_outputs, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer', reuse=True)
# error that should be minimized in the training process
error = self.__l1_loss(training_prediction_output, true_output)
# l2 regularization of the trainable model parameters
l2_loss = 0.0
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64), tf.cast(l2_loss, dtype=tf.float64))
total_loss = tf.cast(error, dtype=tf.float64) + l2_loss
# create the adagrad optimizer
optimizer = optimizer_fn(total_loss)
# create the Dataset objects for the training and test data
training_dataset = tf.data.TFRecordDataset(filenames=[self.__binary_train_file_path], compression_type="ZLIB")
test_dataset = tf.data.TFRecordDataset([self.__binary_test_file_path], compression_type="ZLIB")
# parse the records
tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size)
# prepare the training data into batches
# randomly shuffle the time series within the dataset
shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])
# training_dataset = training_dataset.apply(
# tf.data.experimental.shuffle_and_repeat(buffer_size=training_data_configs.SHUFFLE_BUFFER_SIZE,
# count=int(max_epoch_size), seed=shuffle_seed))
training_dataset = training_dataset.repeat(count=int(max_epoch_size))
training_dataset = training_dataset.map(tfrecord_reader.validation_data_parser)
# create the batches by padding the datasets to make the variable sequence lengths fixed within the individual batches
padded_training_data_batches = training_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=(
[], [tf.Dimension(None), self.__input_size],
[tf.Dimension(None), self.__output_size],
[tf.Dimension(None), self.__output_size + 2]))
# get an iterator to the batches
training_data_batch_iterator = padded_training_data_batches.make_initializable_iterator()
# access each batch using the iterator
next_training_data_batch = training_data_batch_iterator.get_next()
# preparing the test data
test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)
# create a single batch from all the test time series by padding the datasets to make the variable sequence lengths fixed
padded_test_input_data = test_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=([], [tf.Dimension(None), self.__input_size],
[tf.Dimension(None), self.__output_size + 2]))
# get an iterator to the test input data batch
test_input_iterator = padded_test_input_data.make_one_shot_iterator()
# access the test input batch using the iterator
test_input_data_batch = test_input_iterator.get_next()
# setup variable initialization
init_op = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init_op)
for epoch in range(int(max_num_epochs)):
print("Epoch->", epoch)
session.run(training_data_batch_iterator.initializer, feed_dict={shuffle_seed: epoch})
while True:
try:
training_data_batch_value = session.run(next_training_data_batch,
feed_dict={shuffle_seed: epoch})
session.run(optimizer,
feed_dict={input: training_data_batch_value[1],
true_output: training_data_batch_value[2],
sequence_lengths: training_data_batch_value[0]})
except tf.errors.OutOfRangeError:
break
# applying the model to the test data
list_of_forecasts = []
while True:
try:
# get the batch of test inputs
test_input_batch_value = session.run(test_input_data_batch)
# get the output of the network for the test input data batch
test_output = session.run(inference_prediction_output,
feed_dict={input: test_input_batch_value[1],
sequence_lengths: test_input_batch_value[0]})
last_output_index = test_input_batch_value[0] - 1
array_first_dimension = np.array(range(0, test_input_batch_value[0].shape[0]))
forecasts = test_output[array_first_dimension, last_output_index]
list_of_forecasts.extend(forecasts.tolist())
except tf.errors.OutOfRangeError:
break
session.close()
return list_of_forecasts
|
normal
|
{
"blob_id": "3b7839347f24d39904d29d40e688a5dfd63534d7",
"index": 3560,
"step-1": "<mask token>\n\n\nclass StackingModelTester:\n\n def __init__(self, **kwargs):\n self.__use_bias = kwargs['use_bias']\n self.__use_peepholes = kwargs['use_peepholes']\n self.__input_size = kwargs['input_size']\n self.__output_size = kwargs['output_size']\n self.__binary_train_file_path = kwargs['binary_train_file_path']\n self.__binary_test_file_path = kwargs['binary_test_file_path']\n self.__seed = kwargs['seed']\n self.__cell_type = kwargs['cell_type']\n <mask token>\n\n def __l2_loss(selfself, z, t):\n loss = tf.losses.mean_squared_error(labels=t, predictions=z)\n return loss\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass StackingModelTester:\n\n def __init__(self, **kwargs):\n self.__use_bias = kwargs['use_bias']\n self.__use_peepholes = kwargs['use_peepholes']\n self.__input_size = kwargs['input_size']\n self.__output_size = kwargs['output_size']\n self.__binary_train_file_path = kwargs['binary_train_file_path']\n self.__binary_test_file_path = kwargs['binary_test_file_path']\n self.__seed = kwargs['seed']\n self.__cell_type = kwargs['cell_type']\n\n def __l1_loss(self, z, t):\n loss = tf.reduce_mean(tf.abs(t - z))\n return loss\n\n def __l2_loss(selfself, z, t):\n loss = tf.losses.mean_squared_error(labels=t, predictions=z)\n return loss\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass StackingModelTester:\n\n def __init__(self, **kwargs):\n self.__use_bias = kwargs['use_bias']\n self.__use_peepholes = kwargs['use_peepholes']\n self.__input_size = kwargs['input_size']\n self.__output_size = kwargs['output_size']\n self.__binary_train_file_path = kwargs['binary_train_file_path']\n self.__binary_test_file_path = kwargs['binary_test_file_path']\n self.__seed = kwargs['seed']\n self.__cell_type = kwargs['cell_type']\n\n def __l1_loss(self, z, t):\n loss = tf.reduce_mean(tf.abs(t - z))\n return loss\n\n def __l2_loss(selfself, z, t):\n loss = tf.losses.mean_squared_error(labels=t, predictions=z)\n return loss\n\n def test_model(self, **kwargs):\n num_hidden_layers = kwargs['num_hidden_layers']\n cell_dimension = kwargs['cell_dimension']\n minibatch_size = kwargs['minibatch_size']\n max_epoch_size = kwargs['max_epoch_size']\n max_num_epochs = kwargs['max_num_epochs']\n l2_regularization = kwargs['l2_regularization']\n gaussian_noise_stdev = kwargs['gaussian_noise_stdev']\n optimizer_fn = kwargs['optimizer_fn']\n random_normal_initializer_stdev = kwargs[\n 'random_normal_initializer_stdev']\n tf.reset_default_graph()\n tf.set_random_seed(self.__seed)\n input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.\n __input_size])\n noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=\n gaussian_noise_stdev, dtype=tf.float32)\n training_input = input + noise\n testing_input = input\n true_output = tf.placeholder(dtype=tf.float32, shape=[None, None,\n self.__output_size])\n sequence_lengths = tf.placeholder(dtype=tf.int64, shape=[None])\n weight_initializer = tf.truncated_normal_initializer(stddev=\n random_normal_initializer_stdev)\n\n def cell():\n if self.__cell_type == 'LSTM':\n cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension\n ), use_peepholes=self.__use_peepholes, initializer=\n weight_initializer)\n elif self.__cell_type == 'GRU':\n cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension),\n kernel_initializer=weight_initializer)\n elif self.__cell_type == 'RNN':\n cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(\n cell_dimension))\n return cell\n multi_layered_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell() for\n _ in range(int(num_hidden_layers))])\n with tf.variable_scope('train_scope') as train_scope:\n training_rnn_outputs, training_rnn_states = tf.nn.dynamic_rnn(cell\n =multi_layered_cell, inputs=training_input, sequence_length\n =sequence_lengths, dtype=tf.float32)\n training_prediction_output = tf.layers.dense(inputs=tf.\n convert_to_tensor(value=training_rnn_outputs, dtype=tf.\n float32), units=self.__output_size, use_bias=self.\n __use_bias, kernel_initializer=weight_initializer, name=\n 'dense_layer')\n with tf.variable_scope(train_scope, reuse=tf.AUTO_REUSE\n ) as inference_scope:\n inference_rnn_outputs, inference_rnn_states = tf.nn.dynamic_rnn(\n cell=multi_layered_cell, inputs=testing_input,\n sequence_length=sequence_lengths, dtype=tf.float32)\n inference_prediction_output = tf.layers.dense(inputs=tf.\n convert_to_tensor(value=inference_rnn_outputs, dtype=tf.\n float32), units=self.__output_size, use_bias=self.\n __use_bias, kernel_initializer=weight_initializer, name=\n 'dense_layer', reuse=True)\n error = self.__l1_loss(training_prediction_output, true_output)\n l2_loss = 0.0\n for var in tf.trainable_variables():\n l2_loss += tf.nn.l2_loss(var)\n l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64),\n tf.cast(l2_loss, dtype=tf.float64))\n total_loss = tf.cast(error, dtype=tf.float64) + l2_loss\n optimizer = optimizer_fn(total_loss)\n training_dataset = tf.data.TFRecordDataset(filenames=[self.\n __binary_train_file_path], compression_type='ZLIB')\n test_dataset = tf.data.TFRecordDataset([self.\n __binary_test_file_path], compression_type='ZLIB')\n tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size)\n shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])\n training_dataset = training_dataset.repeat(count=int(max_epoch_size))\n training_dataset = training_dataset.map(tfrecord_reader.\n validation_data_parser)\n padded_training_data_batches = training_dataset.padded_batch(batch_size\n =int(minibatch_size), padded_shapes=([], [tf.Dimension(None),\n self.__input_size], [tf.Dimension(None), self.__output_size], [\n tf.Dimension(None), self.__output_size + 2]))\n training_data_batch_iterator = (padded_training_data_batches.\n make_initializable_iterator())\n next_training_data_batch = training_data_batch_iterator.get_next()\n test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)\n padded_test_input_data = test_dataset.padded_batch(batch_size=int(\n minibatch_size), padded_shapes=([], [tf.Dimension(None), self.\n __input_size], [tf.Dimension(None), self.__output_size + 2]))\n test_input_iterator = padded_test_input_data.make_one_shot_iterator()\n test_input_data_batch = test_input_iterator.get_next()\n init_op = tf.global_variables_initializer()\n with tf.Session() as session:\n session.run(init_op)\n for epoch in range(int(max_num_epochs)):\n print('Epoch->', epoch)\n session.run(training_data_batch_iterator.initializer,\n feed_dict={shuffle_seed: epoch})\n while True:\n try:\n training_data_batch_value = session.run(\n next_training_data_batch, feed_dict={\n shuffle_seed: epoch})\n session.run(optimizer, feed_dict={input:\n training_data_batch_value[1], true_output:\n training_data_batch_value[2], sequence_lengths:\n training_data_batch_value[0]})\n except tf.errors.OutOfRangeError:\n break\n list_of_forecasts = []\n while True:\n try:\n test_input_batch_value = session.run(test_input_data_batch)\n test_output = session.run(inference_prediction_output,\n feed_dict={input: test_input_batch_value[1],\n sequence_lengths: test_input_batch_value[0]})\n last_output_index = test_input_batch_value[0] - 1\n array_first_dimension = np.array(range(0,\n test_input_batch_value[0].shape[0]))\n forecasts = test_output[array_first_dimension,\n last_output_index]\n list_of_forecasts.extend(forecasts.tolist())\n except tf.errors.OutOfRangeError:\n break\n session.close()\n return list_of_forecasts\n",
"step-4": "import numpy as np\nimport tensorflow as tf\nfrom tfrecords_handler.moving_window.tfrecord_mean_reader import TFRecordReader\nfrom configs.global_configs import training_data_configs\n\n\nclass StackingModelTester:\n\n def __init__(self, **kwargs):\n self.__use_bias = kwargs['use_bias']\n self.__use_peepholes = kwargs['use_peepholes']\n self.__input_size = kwargs['input_size']\n self.__output_size = kwargs['output_size']\n self.__binary_train_file_path = kwargs['binary_train_file_path']\n self.__binary_test_file_path = kwargs['binary_test_file_path']\n self.__seed = kwargs['seed']\n self.__cell_type = kwargs['cell_type']\n\n def __l1_loss(self, z, t):\n loss = tf.reduce_mean(tf.abs(t - z))\n return loss\n\n def __l2_loss(selfself, z, t):\n loss = tf.losses.mean_squared_error(labels=t, predictions=z)\n return loss\n\n def test_model(self, **kwargs):\n num_hidden_layers = kwargs['num_hidden_layers']\n cell_dimension = kwargs['cell_dimension']\n minibatch_size = kwargs['minibatch_size']\n max_epoch_size = kwargs['max_epoch_size']\n max_num_epochs = kwargs['max_num_epochs']\n l2_regularization = kwargs['l2_regularization']\n gaussian_noise_stdev = kwargs['gaussian_noise_stdev']\n optimizer_fn = kwargs['optimizer_fn']\n random_normal_initializer_stdev = kwargs[\n 'random_normal_initializer_stdev']\n tf.reset_default_graph()\n tf.set_random_seed(self.__seed)\n input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.\n __input_size])\n noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=\n gaussian_noise_stdev, dtype=tf.float32)\n training_input = input + noise\n testing_input = input\n true_output = tf.placeholder(dtype=tf.float32, shape=[None, None,\n self.__output_size])\n sequence_lengths = tf.placeholder(dtype=tf.int64, shape=[None])\n weight_initializer = tf.truncated_normal_initializer(stddev=\n random_normal_initializer_stdev)\n\n def cell():\n if self.__cell_type == 'LSTM':\n cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension\n ), use_peepholes=self.__use_peepholes, initializer=\n weight_initializer)\n elif self.__cell_type == 'GRU':\n cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension),\n kernel_initializer=weight_initializer)\n elif self.__cell_type == 'RNN':\n cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(\n cell_dimension))\n return cell\n multi_layered_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell() for\n _ in range(int(num_hidden_layers))])\n with tf.variable_scope('train_scope') as train_scope:\n training_rnn_outputs, training_rnn_states = tf.nn.dynamic_rnn(cell\n =multi_layered_cell, inputs=training_input, sequence_length\n =sequence_lengths, dtype=tf.float32)\n training_prediction_output = tf.layers.dense(inputs=tf.\n convert_to_tensor(value=training_rnn_outputs, dtype=tf.\n float32), units=self.__output_size, use_bias=self.\n __use_bias, kernel_initializer=weight_initializer, name=\n 'dense_layer')\n with tf.variable_scope(train_scope, reuse=tf.AUTO_REUSE\n ) as inference_scope:\n inference_rnn_outputs, inference_rnn_states = tf.nn.dynamic_rnn(\n cell=multi_layered_cell, inputs=testing_input,\n sequence_length=sequence_lengths, dtype=tf.float32)\n inference_prediction_output = tf.layers.dense(inputs=tf.\n convert_to_tensor(value=inference_rnn_outputs, dtype=tf.\n float32), units=self.__output_size, use_bias=self.\n __use_bias, kernel_initializer=weight_initializer, name=\n 'dense_layer', reuse=True)\n error = self.__l1_loss(training_prediction_output, true_output)\n l2_loss = 0.0\n for var in tf.trainable_variables():\n l2_loss += tf.nn.l2_loss(var)\n l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64),\n tf.cast(l2_loss, dtype=tf.float64))\n total_loss = tf.cast(error, dtype=tf.float64) + l2_loss\n optimizer = optimizer_fn(total_loss)\n training_dataset = tf.data.TFRecordDataset(filenames=[self.\n __binary_train_file_path], compression_type='ZLIB')\n test_dataset = tf.data.TFRecordDataset([self.\n __binary_test_file_path], compression_type='ZLIB')\n tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size)\n shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])\n training_dataset = training_dataset.repeat(count=int(max_epoch_size))\n training_dataset = training_dataset.map(tfrecord_reader.\n validation_data_parser)\n padded_training_data_batches = training_dataset.padded_batch(batch_size\n =int(minibatch_size), padded_shapes=([], [tf.Dimension(None),\n self.__input_size], [tf.Dimension(None), self.__output_size], [\n tf.Dimension(None), self.__output_size + 2]))\n training_data_batch_iterator = (padded_training_data_batches.\n make_initializable_iterator())\n next_training_data_batch = training_data_batch_iterator.get_next()\n test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)\n padded_test_input_data = test_dataset.padded_batch(batch_size=int(\n minibatch_size), padded_shapes=([], [tf.Dimension(None), self.\n __input_size], [tf.Dimension(None), self.__output_size + 2]))\n test_input_iterator = padded_test_input_data.make_one_shot_iterator()\n test_input_data_batch = test_input_iterator.get_next()\n init_op = tf.global_variables_initializer()\n with tf.Session() as session:\n session.run(init_op)\n for epoch in range(int(max_num_epochs)):\n print('Epoch->', epoch)\n session.run(training_data_batch_iterator.initializer,\n feed_dict={shuffle_seed: epoch})\n while True:\n try:\n training_data_batch_value = session.run(\n next_training_data_batch, feed_dict={\n shuffle_seed: epoch})\n session.run(optimizer, feed_dict={input:\n training_data_batch_value[1], true_output:\n training_data_batch_value[2], sequence_lengths:\n training_data_batch_value[0]})\n except tf.errors.OutOfRangeError:\n break\n list_of_forecasts = []\n while True:\n try:\n test_input_batch_value = session.run(test_input_data_batch)\n test_output = session.run(inference_prediction_output,\n feed_dict={input: test_input_batch_value[1],\n sequence_lengths: test_input_batch_value[0]})\n last_output_index = test_input_batch_value[0] - 1\n array_first_dimension = np.array(range(0,\n test_input_batch_value[0].shape[0]))\n forecasts = test_output[array_first_dimension,\n last_output_index]\n list_of_forecasts.extend(forecasts.tolist())\n except tf.errors.OutOfRangeError:\n break\n session.close()\n return list_of_forecasts\n",
"step-5": "import numpy as np\nimport tensorflow as tf\nfrom tfrecords_handler.moving_window.tfrecord_mean_reader import TFRecordReader\nfrom configs.global_configs import training_data_configs\n\n\nclass StackingModelTester:\n\n def __init__(self, **kwargs):\n self.__use_bias = kwargs[\"use_bias\"]\n self.__use_peepholes = kwargs[\"use_peepholes\"]\n self.__input_size = kwargs[\"input_size\"]\n self.__output_size = kwargs[\"output_size\"]\n self.__binary_train_file_path = kwargs[\"binary_train_file_path\"]\n self.__binary_test_file_path = kwargs[\"binary_test_file_path\"]\n self.__seed = kwargs[\"seed\"]\n self.__cell_type = kwargs[\"cell_type\"]\n\n def __l1_loss(self, z, t):\n loss = tf.reduce_mean(tf.abs(t - z))\n return loss\n\n def __l2_loss(selfself, z, t):\n loss = tf.losses.mean_squared_error(labels=t, predictions=z)\n return loss\n\n # Training the time series\n def test_model(self, **kwargs):\n\n # extract the parameters from the kwargs\n num_hidden_layers = kwargs['num_hidden_layers']\n cell_dimension = kwargs['cell_dimension']\n minibatch_size = kwargs['minibatch_size']\n max_epoch_size = kwargs['max_epoch_size']\n max_num_epochs = kwargs['max_num_epochs']\n l2_regularization = kwargs['l2_regularization']\n gaussian_noise_stdev = kwargs['gaussian_noise_stdev']\n optimizer_fn = kwargs['optimizer_fn']\n random_normal_initializer_stdev = kwargs['random_normal_initializer_stdev']\n\n # reset the tensorflow graph\n tf.reset_default_graph()\n\n tf.set_random_seed(self.__seed)\n\n # declare the input and output placeholders\n input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__input_size])\n noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=gaussian_noise_stdev, dtype=tf.float32)\n training_input = input + noise\n\n testing_input = input\n\n # output format [batch_size, sequence_length, dimension]\n true_output = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__output_size])\n sequence_lengths = tf.placeholder(dtype=tf.int64, shape=[None])\n\n weight_initializer = tf.truncated_normal_initializer(stddev=random_normal_initializer_stdev)\n\n # RNN with the layer of cells\n def cell():\n if self.__cell_type == \"LSTM\":\n cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension), use_peepholes=self.__use_peepholes,\n initializer=weight_initializer)\n elif self.__cell_type == \"GRU\":\n cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension), kernel_initializer=weight_initializer)\n elif self.__cell_type == \"RNN\":\n cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(cell_dimension))\n return cell\n\n multi_layered_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell() for _ in range(int(num_hidden_layers))])\n\n with tf.variable_scope('train_scope') as train_scope:\n training_rnn_outputs, training_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,\n inputs=training_input,\n sequence_length=sequence_lengths,\n dtype=tf.float32)\n\n # connect the dense layer to the RNN\n training_prediction_output = tf.layers.dense(\n inputs=tf.convert_to_tensor(value=training_rnn_outputs, dtype=tf.float32),\n units=self.__output_size,\n use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer')\n\n with tf.variable_scope(train_scope, reuse=tf.AUTO_REUSE) as inference_scope:\n inference_rnn_outputs, inference_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,\n inputs=testing_input,\n sequence_length=sequence_lengths,\n dtype=tf.float32)\n # connect the dense layer to the RNN\n inference_prediction_output = tf.layers.dense(\n inputs=tf.convert_to_tensor(value=inference_rnn_outputs, dtype=tf.float32),\n units=self.__output_size,\n use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer', reuse=True)\n\n # error that should be minimized in the training process\n error = self.__l1_loss(training_prediction_output, true_output)\n\n # l2 regularization of the trainable model parameters\n l2_loss = 0.0\n for var in tf.trainable_variables():\n l2_loss += tf.nn.l2_loss(var)\n\n l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64), tf.cast(l2_loss, dtype=tf.float64))\n\n total_loss = tf.cast(error, dtype=tf.float64) + l2_loss\n\n # create the adagrad optimizer\n optimizer = optimizer_fn(total_loss)\n\n # create the Dataset objects for the training and test data\n training_dataset = tf.data.TFRecordDataset(filenames=[self.__binary_train_file_path], compression_type=\"ZLIB\")\n test_dataset = tf.data.TFRecordDataset([self.__binary_test_file_path], compression_type=\"ZLIB\")\n\n # parse the records\n tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size)\n\n # prepare the training data into batches\n # randomly shuffle the time series within the dataset\n shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])\n # training_dataset = training_dataset.apply(\n # tf.data.experimental.shuffle_and_repeat(buffer_size=training_data_configs.SHUFFLE_BUFFER_SIZE,\n # count=int(max_epoch_size), seed=shuffle_seed))\n training_dataset = training_dataset.repeat(count=int(max_epoch_size))\n training_dataset = training_dataset.map(tfrecord_reader.validation_data_parser)\n\n # create the batches by padding the datasets to make the variable sequence lengths fixed within the individual batches\n padded_training_data_batches = training_dataset.padded_batch(batch_size=int(minibatch_size),\n padded_shapes=(\n [], [tf.Dimension(None), self.__input_size],\n [tf.Dimension(None), self.__output_size],\n [tf.Dimension(None), self.__output_size + 2]))\n\n # get an iterator to the batches\n training_data_batch_iterator = padded_training_data_batches.make_initializable_iterator()\n\n # access each batch using the iterator\n next_training_data_batch = training_data_batch_iterator.get_next()\n\n # preparing the test data\n test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)\n\n # create a single batch from all the test time series by padding the datasets to make the variable sequence lengths fixed\n padded_test_input_data = test_dataset.padded_batch(batch_size=int(minibatch_size),\n padded_shapes=([], [tf.Dimension(None), self.__input_size],\n [tf.Dimension(None), self.__output_size + 2]))\n\n # get an iterator to the test input data batch\n test_input_iterator = padded_test_input_data.make_one_shot_iterator()\n\n # access the test input batch using the iterator\n test_input_data_batch = test_input_iterator.get_next()\n\n # setup variable initialization\n init_op = tf.global_variables_initializer()\n\n with tf.Session() as session:\n session.run(init_op)\n\n for epoch in range(int(max_num_epochs)):\n print(\"Epoch->\", epoch)\n session.run(training_data_batch_iterator.initializer, feed_dict={shuffle_seed: epoch})\n while True:\n try:\n training_data_batch_value = session.run(next_training_data_batch,\n feed_dict={shuffle_seed: epoch})\n\n session.run(optimizer,\n feed_dict={input: training_data_batch_value[1],\n true_output: training_data_batch_value[2],\n sequence_lengths: training_data_batch_value[0]})\n\n except tf.errors.OutOfRangeError:\n break\n\n # applying the model to the test data\n\n list_of_forecasts = []\n while True:\n try:\n\n # get the batch of test inputs\n test_input_batch_value = session.run(test_input_data_batch)\n\n # get the output of the network for the test input data batch\n test_output = session.run(inference_prediction_output,\n feed_dict={input: test_input_batch_value[1],\n sequence_lengths: test_input_batch_value[0]})\n\n last_output_index = test_input_batch_value[0] - 1\n array_first_dimension = np.array(range(0, test_input_batch_value[0].shape[0]))\n forecasts = test_output[array_first_dimension, last_output_index]\n list_of_forecasts.extend(forecasts.tolist())\n\n except tf.errors.OutOfRangeError:\n break\n\n session.close()\n return list_of_forecasts\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from odoo import models,fields, api
class director(models.Model):
#Clasica
_inherit = 'base.entidad'
_name = 'cinemateca.director'
name = fields.Char(string="name", required=True, help="Nombre del director")
apellidos = fields.Char(string="apellidos", required=True, help="Apellidos del director")
pelicula_ids = fields.One2many("cinemateca.pelicula", "director_id", string="sesion")
|
normal
|
{
"blob_id": "006f499eed7cd5d73bb0cb9b242c90726fff35c1",
"index": 3185,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass director(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass director(models.Model):\n _inherit = 'base.entidad'\n _name = 'cinemateca.director'\n name = fields.Char(string='name', required=True, help='Nombre del director'\n )\n apellidos = fields.Char(string='apellidos', required=True, help=\n 'Apellidos del director')\n pelicula_ids = fields.One2many('cinemateca.pelicula', 'director_id',\n string='sesion')\n",
"step-4": "from odoo import models, fields, api\n\n\nclass director(models.Model):\n _inherit = 'base.entidad'\n _name = 'cinemateca.director'\n name = fields.Char(string='name', required=True, help='Nombre del director'\n )\n apellidos = fields.Char(string='apellidos', required=True, help=\n 'Apellidos del director')\n pelicula_ids = fields.One2many('cinemateca.pelicula', 'director_id',\n string='sesion')\n",
"step-5": "from odoo import models,fields, api\n\nclass director(models.Model):\n #Clasica\n _inherit = 'base.entidad'\n _name = 'cinemateca.director'\n name = fields.Char(string=\"name\", required=True, help=\"Nombre del director\")\n apellidos = fields.Char(string=\"apellidos\", required=True, help=\"Apellidos del director\")\n pelicula_ids = fields.One2many(\"cinemateca.pelicula\", \"director_id\", string=\"sesion\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def loadModel(name):
model = load_model('./Model/%s.h5' % name)
return model
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loadModel(name):
model = load_model('./Model/%s.h5' % name)
return model
def predict(tag):
test = getPIData(tag, '2019-11-05', '2019-11-06')
test_arg = addFeature(test)
test_norm = normalize(test_arg)
X_test, Y_test = buildTrain(test_norm, 12 * 12, 1)
model = loadModel(tag)
return model.predict(X_test)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loadModel(name):
model = load_model('./Model/%s.h5' % name)
return model
def predict(tag):
test = getPIData(tag, '2019-11-05', '2019-11-06')
test_arg = addFeature(test)
test_norm = normalize(test_arg)
X_test, Y_test = buildTrain(test_norm, 12 * 12, 1)
model = loadModel(tag)
return model.predict(X_test)
print(predict('USG60_eth0_ifInOctets'))
<|reserved_special_token_1|>
from keras.models import load_model
from DataManager import *
def loadModel(name):
model = load_model('./Model/%s.h5' % name)
return model
def predict(tag):
test = getPIData(tag, '2019-11-05', '2019-11-06')
test_arg = addFeature(test)
test_norm = normalize(test_arg)
X_test, Y_test = buildTrain(test_norm, 12 * 12, 1)
model = loadModel(tag)
return model.predict(X_test)
print(predict('USG60_eth0_ifInOctets'))
|
flexible
|
{
"blob_id": "a6154c5d855dc53d73db08bbb5b5d7437056e156",
"index": 1566,
"step-1": "<mask token>\n\n\ndef loadModel(name):\n model = load_model('./Model/%s.h5' % name)\n return model\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loadModel(name):\n model = load_model('./Model/%s.h5' % name)\n return model\n\n\ndef predict(tag):\n test = getPIData(tag, '2019-11-05', '2019-11-06')\n test_arg = addFeature(test)\n test_norm = normalize(test_arg)\n X_test, Y_test = buildTrain(test_norm, 12 * 12, 1)\n model = loadModel(tag)\n return model.predict(X_test)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef loadModel(name):\n model = load_model('./Model/%s.h5' % name)\n return model\n\n\ndef predict(tag):\n test = getPIData(tag, '2019-11-05', '2019-11-06')\n test_arg = addFeature(test)\n test_norm = normalize(test_arg)\n X_test, Y_test = buildTrain(test_norm, 12 * 12, 1)\n model = loadModel(tag)\n return model.predict(X_test)\n\n\nprint(predict('USG60_eth0_ifInOctets'))\n",
"step-4": "from keras.models import load_model\nfrom DataManager import *\n\n\ndef loadModel(name):\n model = load_model('./Model/%s.h5' % name)\n return model\n\n\ndef predict(tag):\n test = getPIData(tag, '2019-11-05', '2019-11-06')\n test_arg = addFeature(test)\n test_norm = normalize(test_arg)\n X_test, Y_test = buildTrain(test_norm, 12 * 12, 1)\n model = loadModel(tag)\n return model.predict(X_test)\n\n\nprint(predict('USG60_eth0_ifInOctets'))\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
c = Client()
<|reserved_special_token_1|>
from end import Client
c = Client()
|
flexible
|
{
"blob_id": "1be510e6715d21e814c48fe05496704e9a65d554",
"index": 308,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nc = Client()\n",
"step-3": "from end import Client\nc = Client()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import base
import telebot
import markups
from starter import start_bot, bot
@bot.message_handler(commands=['start'])
def start(message):
chat = message.chat
# welcome(msg)
msg = bot.send_message(chat.id, "Select a language in the list", reply_markup=markups.language())
bot.register_next_step_handler(msg, llanguage)
# base.create_user(chat.id)
def llanguage(msg):
chat = msg.chat
base.create_user(msg.chat.id, msg.text)
markup = telebot.types.ReplyKeyboardMarkup(True, True)
markup.row("ok")
str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,"confirm"), reply_markup=markup)
bot.register_next_step_handler(str, welcome)
def welcome(msg):
bot.send_message(msg.chat.id, "Чат-поддержка", reply_markup=markups.addWelcome())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') % msg.from_user.first_name,
reply_markup=markups.welcome(), parse_mode='html')
@bot.callback_query_handler(func=lambda call: call.data == 'currency')
def select_currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id,'currency'), chat.id, call.message.message_id, reply_markup=markups.currency())
@bot.message_handler(regexp="Выбор валюты")
def select_currency(msg):
chat = msg.chat
bot.send_message(chat.id, base.get_text(chat.id,'currency'), reply_markup=markups.currency())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'ccur')
def currency(call):
current_currency = call.data[4:] # Выбранная валюта
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id,'operations'), chat.id,
call.message.message_id, reply_markup=markups.menu())
def langg():
markup = telebot.types.InlineKeyboardMarkup()
bt_eng = telebot.types.InlineKeyboardButton(text="English", callback_data="langeng")
bt_rus = telebot.types.InlineKeyboardButton(text="Русский", callback_data="langrus")
bt_ukr = telebot.types.InlineKeyboardButton(text="Украiнський", callback_data="langukr")
markup.add(bt_eng)
markup.add(bt_rus)
markup.add(bt_ukr)
return markup
@bot.callback_query_handler(func=lambda call: call.data[:4] == "lang")
def lan(call):
chat = call.message.chat
new_lan = call.data[4:]
bot.edit_message_text( "Вы выбрали язык",chat.id,call.message.message_id,reply_markup=markups.settings())
@bot.callback_query_handler(func=lambda call: call.data == 'requests')
def my_requests(call):
text = base.get_text(call.message.chat.id, 'no_req')
bot.edit_message_text(text, call.message.chat.id, call.message.message_id)
bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id,
reply_markup=markups.add_request(call.message.chat.id))
@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')
def currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id,'operations'), chat.id,
call.message.message_id, reply_markup=markups.menu())
@bot.message_handler(regexp="Назад")
def back(msg):
bot.send_message(msg.chat.id, "Операции покупки или продажи", reply_markup=markups.addWelcome())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id,"operations"), reply_markup=markups.menu())
@bot.message_handler(regexp="Обменные операции")
def exchange(msg):
bot.send_message(msg.chat.id, "Купить/Продать", reply_markup=markups.exchangeR())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id,"exchamge"), reply_markup=markups.exchangeI())
@bot.callback_query_handler(func=lambda call: call.data == 'buy')
def buy(call):
chat = call.message.chat
bot.send_message(chat.id, "Покупка", reply_markup=markups.exchangeR())
bot.send_message(chat.id, base.get_text(chat.id,'buycur'), reply_markup=markups.buyI_sellI())
@bot.callback_query_handler(func=lambda call: call.data == 'monero')
def monero(call):
chat = call.message.chat
bot.send_message(chat.id, "Покупка/Продажа Monero", reply_markup=markups.payments())
@bot.callback_query_handler(func=lambda call: call.data == 'sell')
def sell(call):
chat = call.message.chat
bot.send_message(chat.id, "Продажа", reply_markup=markups.exchangeR())
bot.send_message(chat.id, base.get_text(chat.id,'sellcur'), reply_markup=markups.buyI_sellI())
@bot.message_handler(regexp="Кошелёк")
def wallet(msg):
bot.send_message(msg.chat.id, "Кошелёк", reply_markup=markups.exchangeR())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id,'wallet'), reply_markup=markups.wallet())
@bot.callback_query_handler(func=lambda call: call.data == 'bringin')
def bring_in(call):
msg = call.message
bot.edit_message_text("Выберете валюту на счёт которой придут бабосы", msg.chat.id,
msg.message_id, reply_markup=markups.bringin())
@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')
def bbring(call):
msg = call.message
bot.edit_message_text("Внесите " + call.data[6:], msg.chat.id, msg.message_id)
@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')
def withdraw(call):
msg=call.message
bot.edit_message_text("С какой валюты списать бобосы",msg.chat.id,msg.message_id,reply_markup=markups.withdraw())
@bot.callback_query_handler(func=lambda call: call.data[:5] == 'wwith')
def wwithdraw(call):
msg=call.message
bot.edit_message_text("Введите сколько вывести" + call.data[5:],msg.chat.id,msg.message_id)
@bot.callback_query_handler(func=lambda call: call.data == "my requests")
def user_requests(call):
bot.send_message(call.message.chat.id, "Если нужно,то просто раскомменти")
# markup = telebot.types.InlineKeyboardMarkup()
# data = base.get_user_requests(call.message.chat.id)
# val = base.get_user_value(call.message.chat.id)
# if not data:
# btn_add = telebot.types.InlineKeyboardButton("📝 Добавить объявление", callback_data='add request')
# back = telebot.types.InlineKeyboardButton(text="Назад",
# callback_data='exchange')
# markup.row(btn_add, back)
# bot.edit_message_text("У вас нет объявлений", call.message.chat.id, call.message.message_id)
# bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id,
# reply_markup=markup)
#
#
# else:
# for each in data:
# btn = telebot.types.InlineKeyboardButton(
# text=each.rType + ", " + each.paymentMethod + ", " + each.rate + " " + each.currency,
# callback_data=each.currency + "->" + each.rid)
# markup.row(btn)
# btn_add = telebot.types.InlineKeyboardButton("📝 Добавить объявление", callback_data='add request')
# back = telebot.types.InlineKeyboardButton(text="Назад",
# callback_data='exchange')
# markup.row(btn_add, back)
# bot.edit_message_text("Что-то там про объявления",
# call.message.chat.id, call.message.message_id, parse_mode="markdown")
# bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id, reply_markup=markup)
@bot.callback_query_handler(func=lambda call: call.data == 'add request')
def add_request(call):
msg = call.message
bot.edit_message_text("Выберите валюту", msg.chat.id, msg.message_id, reply_markup=markups.request_curr())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')
def req_cur(call):
cur = call.data[4:]
msg = call.message
bot.edit_message_text("Выберите тип объявления", msg.chat.id, msg.message_id, reply_markup=markups.request_type())
@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')
@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')
def req_buy(call):
msg = call.message
ms = bot.send_message(msg.chat.id, "Метод оплаты", reply_markup=markups.pay_method())
bot.register_next_step_handler(ms, rate)
def rate(msg):
bot.send_message(msg.chat.id, "Курс")
@bot.message_handler(regexp="Настройки")
def settings(msg):
bot.send_message(msg.chat.id, base.get_text(msg.chat.id,'settings'), reply_markup=markups.settings())
@bot.callback_query_handler(func=lambda call: call.data == 'settings')
def setings(call):
msg = call.message
bot.edit_message_text(base.get_text(msg.chat.id,'settings'), msg.chat.id, msg.message_id, reply_markup=markups.settings())
@bot.callback_query_handler(func=lambda call: call.data == "chooselanguage")
def lang(call):
chat = call.message.chat
bot.edit_message_text( "Выберите язык",chat.id,call.message.message_id, reply_markup=langg())
@bot.callback_query_handler(func=lambda call: call.data == 'rate')
def rat(call):
msg = call.message
bot.edit_message_text("Выберите источник актульного курса", msg.chat.id, msg.message_id,
reply_markup=markups.rate())
@bot.callback_query_handler(func=lambda call: call.data[:5] == 'burse')
def burses(call):
number_of_burse = call.data[5:]
msg = call.message
markup = telebot.types.InlineKeyboardMarkup()
bt_back_to_rates = telebot.types.InlineKeyboardButton(text="Вернуться к выбору биржы", callback_data='rate')
markup.add(bt_back_to_rates)
bot.edit_message_text("Для пары BTC/RUB теперь используются котировки биржи ...название...", msg.chat.id,
msg.message_id, reply_markup=markup)
@bot.callback_query_handler(func=lambda call: call.data == 'address')
def address_cur(call):
msg = call.message
bot.edit_message_text("Выберите валюту", msg.chat.id, msg.message_id, reply_markup=markups.address())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')
def address(call):
msg = call.message
mes = bot.edit_message_text("Введите адрес", msg.chat.id, msg.message_id)
bot.register_next_step_handler(mes, enter_address)
def enter_address(msg):
new_address = msg
bot.send_message(msg.chat.id, "Информация сохранена")
@bot.message_handler(regexp="О сервисе")
def service(msg):
bot.send_message(msg.chat.id,"Нужно придумать")
if __name__ == "__main__":
bot.polling()
# start_bot()
|
normal
|
{
"blob_id": "7cc77de31adff5b4a394f117fc743cd6dd4bc06c",
"index": 6065,
"step-1": "<mask token>\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row('ok')\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\n 'confirm'), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.\n addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %\n msg.from_user.first_name, reply_markup=markups.welcome(),\n parse_mode='html')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call\n .message.message_id, reply_markup=markups.currency())\n\n\n@bot.message_handler(regexp='Выбор валюты')\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id, 'currency'),\n reply_markup=markups.currency())\n\n\n<mask token>\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text='English',\n callback_data='langeng')\n bt_rus = telebot.types.InlineKeyboardButton(text='Русский',\n callback_data='langrus')\n bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',\n callback_data='langukr')\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=\n markups.payments())\n\n\n<mask token>\n\n\n@bot.message_handler(regexp='Кошелёк')\ndef wallet(msg):\n bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),\n reply_markup=markups.wallet())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',\n msg.chat.id, msg.message_id, reply_markup=markups.bringin())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.\n message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg = call.message\n bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg\n .message_id, reply_markup=markups.withdraw())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.request_curr())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.\n message_id, reply_markup=markups.request_type())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')\n@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups\n .pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, 'Курс')\n\n\n@bot.message_handler(regexp='Настройки')\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),\n reply_markup=markups.settings())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'chooselanguage')\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,\n reply_markup=langg())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,\n msg.message_id, reply_markup=markups.rate())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row('ok')\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\n 'confirm'), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.\n addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %\n msg.from_user.first_name, reply_markup=markups.welcome(),\n parse_mode='html')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call\n .message.message_id, reply_markup=markups.currency())\n\n\n@bot.message_handler(regexp='Выбор валюты')\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id, 'currency'),\n reply_markup=markups.currency())\n\n\n<mask token>\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text='English',\n callback_data='langeng')\n bt_rus = telebot.types.InlineKeyboardButton(text='Русский',\n callback_data='langrus')\n bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',\n callback_data='langukr')\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp='Назад')\ndef back(msg):\n bot.send_message(msg.chat.id, 'Операции покупки или продажи',\n reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'operations'),\n reply_markup=markups.menu())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=\n markups.payments())\n\n\n<mask token>\n\n\n@bot.message_handler(regexp='Кошелёк')\ndef wallet(msg):\n bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),\n reply_markup=markups.wallet())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',\n msg.chat.id, msg.message_id, reply_markup=markups.bringin())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.\n message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg = call.message\n bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg\n .message_id, reply_markup=markups.withdraw())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.request_curr())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.\n message_id, reply_markup=markups.request_type())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')\n@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups\n .pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, 'Курс')\n\n\n@bot.message_handler(regexp='Настройки')\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),\n reply_markup=markups.settings())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'chooselanguage')\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,\n reply_markup=langg())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,\n msg.message_id, reply_markup=markups.rate())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n chat = message.chat\n msg = bot.send_message(chat.id, 'Select a language in the list',\n reply_markup=markups.language())\n bot.register_next_step_handler(msg, llanguage)\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row('ok')\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\n 'confirm'), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.\n addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %\n msg.from_user.first_name, reply_markup=markups.welcome(),\n parse_mode='html')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call\n .message.message_id, reply_markup=markups.currency())\n\n\n@bot.message_handler(regexp='Выбор валюты')\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id, 'currency'),\n reply_markup=markups.currency())\n\n\n<mask token>\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text='English',\n callback_data='langeng')\n bt_rus = telebot.types.InlineKeyboardButton(text='Русский',\n callback_data='langrus')\n bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',\n callback_data='langukr')\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'requests')\ndef my_requests(call):\n text = base.get_text(call.message.chat.id, 'no_req')\n bot.edit_message_text(text, call.message.chat.id, call.message.message_id)\n bot.edit_message_reply_markup(call.message.chat.id, call.message.\n message_id, reply_markup=markups.add_request(call.message.chat.id))\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp='Назад')\ndef back(msg):\n bot.send_message(msg.chat.id, 'Операции покупки или продажи',\n reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'operations'),\n reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp='Обменные операции')\ndef exchange(msg):\n bot.send_message(msg.chat.id, 'Купить/Продать', reply_markup=markups.\n exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'exchamge'),\n reply_markup=markups.exchangeI())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'buy')\ndef buy(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка', reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id, 'buycur'),\n reply_markup=markups.buyI_sellI())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=\n markups.payments())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'sell')\ndef sell(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Продажа', reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id, 'sellcur'),\n reply_markup=markups.buyI_sellI())\n\n\n@bot.message_handler(regexp='Кошелёк')\ndef wallet(msg):\n bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),\n reply_markup=markups.wallet())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',\n msg.chat.id, msg.message_id, reply_markup=markups.bringin())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.\n message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg = call.message\n bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg\n .message_id, reply_markup=markups.withdraw())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:5] == 'wwith')\ndef wwithdraw(call):\n msg = call.message\n bot.edit_message_text('Введите сколько вывести' + call.data[5:], msg.\n chat.id, msg.message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'my requests')\ndef user_requests(call):\n bot.send_message(call.message.chat.id, 'Если нужно,то просто раскомменти')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.request_curr())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.\n message_id, reply_markup=markups.request_type())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')\n@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups\n .pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, 'Курс')\n\n\n@bot.message_handler(regexp='Настройки')\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),\n reply_markup=markups.settings())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'chooselanguage')\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,\n reply_markup=langg())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,\n msg.message_id, reply_markup=markups.rate())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:5] == 'burse')\ndef burses(call):\n number_of_burse = call.data[5:]\n msg = call.message\n markup = telebot.types.InlineKeyboardMarkup()\n bt_back_to_rates = telebot.types.InlineKeyboardButton(text=\n 'Вернуться к выбору биржы', callback_data='rate')\n markup.add(bt_back_to_rates)\n bot.edit_message_text(\n 'Для пары BTC/RUB теперь используются котировки биржи ...название...',\n msg.chat.id, msg.message_id, reply_markup=markup)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'address')\ndef address_cur(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.address())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\n<mask token>\n\n\n@bot.message_handler(regexp='О сервисе')\ndef service(msg):\n bot.send_message(msg.chat.id, 'Нужно придумать')\n\n\n<mask token>\n",
"step-4": "import base\nimport telebot\nimport markups\nfrom starter import start_bot, bot\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n chat = message.chat\n msg = bot.send_message(chat.id, 'Select a language in the list',\n reply_markup=markups.language())\n bot.register_next_step_handler(msg, llanguage)\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row('ok')\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\n 'confirm'), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.\n addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %\n msg.from_user.first_name, reply_markup=markups.welcome(),\n parse_mode='html')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call\n .message.message_id, reply_markup=markups.currency())\n\n\n@bot.message_handler(regexp='Выбор валюты')\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id, 'currency'),\n reply_markup=markups.currency())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'ccur')\ndef currency(call):\n current_currency = call.data[4:]\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text='English',\n callback_data='langeng')\n bt_rus = telebot.types.InlineKeyboardButton(text='Русский',\n callback_data='langrus')\n bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',\n callback_data='langukr')\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'lang')\ndef lan(call):\n chat = call.message.chat\n new_lan = call.data[4:]\n bot.edit_message_text('Вы выбрали язык', chat.id, call.message.\n message_id, reply_markup=markups.settings())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'requests')\ndef my_requests(call):\n text = base.get_text(call.message.chat.id, 'no_req')\n bot.edit_message_text(text, call.message.chat.id, call.message.message_id)\n bot.edit_message_reply_markup(call.message.chat.id, call.message.\n message_id, reply_markup=markups.add_request(call.message.chat.id))\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp='Назад')\ndef back(msg):\n bot.send_message(msg.chat.id, 'Операции покупки или продажи',\n reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'operations'),\n reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp='Обменные операции')\ndef exchange(msg):\n bot.send_message(msg.chat.id, 'Купить/Продать', reply_markup=markups.\n exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'exchamge'),\n reply_markup=markups.exchangeI())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'buy')\ndef buy(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка', reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id, 'buycur'),\n reply_markup=markups.buyI_sellI())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=\n markups.payments())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'sell')\ndef sell(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Продажа', reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id, 'sellcur'),\n reply_markup=markups.buyI_sellI())\n\n\n@bot.message_handler(regexp='Кошелёк')\ndef wallet(msg):\n bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),\n reply_markup=markups.wallet())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',\n msg.chat.id, msg.message_id, reply_markup=markups.bringin())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.\n message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg = call.message\n bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg\n .message_id, reply_markup=markups.withdraw())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:5] == 'wwith')\ndef wwithdraw(call):\n msg = call.message\n bot.edit_message_text('Введите сколько вывести' + call.data[5:], msg.\n chat.id, msg.message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'my requests')\ndef user_requests(call):\n bot.send_message(call.message.chat.id, 'Если нужно,то просто раскомменти')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.request_curr())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.\n message_id, reply_markup=markups.request_type())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')\n@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups\n .pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, 'Курс')\n\n\n@bot.message_handler(regexp='Настройки')\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),\n reply_markup=markups.settings())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'settings')\ndef setings(call):\n msg = call.message\n bot.edit_message_text(base.get_text(msg.chat.id, 'settings'), msg.chat.\n id, msg.message_id, reply_markup=markups.settings())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'chooselanguage')\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,\n reply_markup=langg())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,\n msg.message_id, reply_markup=markups.rate())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:5] == 'burse')\ndef burses(call):\n number_of_burse = call.data[5:]\n msg = call.message\n markup = telebot.types.InlineKeyboardMarkup()\n bt_back_to_rates = telebot.types.InlineKeyboardButton(text=\n 'Вернуться к выбору биржы', callback_data='rate')\n markup.add(bt_back_to_rates)\n bot.edit_message_text(\n 'Для пары BTC/RUB теперь используются котировки биржи ...название...',\n msg.chat.id, msg.message_id, reply_markup=markup)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'address')\ndef address_cur(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.address())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\ndef enter_address(msg):\n new_address = msg\n bot.send_message(msg.chat.id, 'Информация сохранена')\n\n\n@bot.message_handler(regexp='О сервисе')\ndef service(msg):\n bot.send_message(msg.chat.id, 'Нужно придумать')\n\n\nif __name__ == '__main__':\n bot.polling()\n",
"step-5": "import base\nimport telebot\nimport markups\nfrom starter import start_bot, bot\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n chat = message.chat\n # welcome(msg)\n msg = bot.send_message(chat.id, \"Select a language in the list\", reply_markup=markups.language())\n bot.register_next_step_handler(msg, llanguage)\n # base.create_user(chat.id)\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row(\"ok\")\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\"confirm\"), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, \"Чат-поддержка\", reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') % msg.from_user.first_name,\n reply_markup=markups.welcome(), parse_mode='html')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id,'currency'), chat.id, call.message.message_id, reply_markup=markups.currency())\n\n\n@bot.message_handler(regexp=\"Выбор валюты\")\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id,'currency'), reply_markup=markups.currency())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'ccur')\ndef currency(call):\n current_currency = call.data[4:] # Выбранная валюта\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id,'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text=\"English\", callback_data=\"langeng\")\n bt_rus = telebot.types.InlineKeyboardButton(text=\"Русский\", callback_data=\"langrus\")\n bt_ukr = telebot.types.InlineKeyboardButton(text=\"Украiнський\", callback_data=\"langukr\")\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == \"lang\")\ndef lan(call):\n chat = call.message.chat\n new_lan = call.data[4:]\n bot.edit_message_text( \"Вы выбрали язык\",chat.id,call.message.message_id,reply_markup=markups.settings())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'requests')\ndef my_requests(call):\n text = base.get_text(call.message.chat.id, 'no_req')\n bot.edit_message_text(text, call.message.chat.id, call.message.message_id)\n bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id,\n reply_markup=markups.add_request(call.message.chat.id))\n\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id,'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp=\"Назад\")\ndef back(msg):\n bot.send_message(msg.chat.id, \"Операции покупки или продажи\", reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\"operations\"), reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp=\"Обменные операции\")\ndef exchange(msg):\n bot.send_message(msg.chat.id, \"Купить/Продать\", reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\"exchamge\"), reply_markup=markups.exchangeI())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'buy')\ndef buy(call):\n chat = call.message.chat\n bot.send_message(chat.id, \"Покупка\", reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id,'buycur'), reply_markup=markups.buyI_sellI())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, \"Покупка/Продажа Monero\", reply_markup=markups.payments())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'sell')\ndef sell(call):\n chat = call.message.chat\n bot.send_message(chat.id, \"Продажа\", reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id,'sellcur'), reply_markup=markups.buyI_sellI())\n\n\n@bot.message_handler(regexp=\"Кошелёк\")\ndef wallet(msg):\n bot.send_message(msg.chat.id, \"Кошелёк\", reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id,'wallet'), reply_markup=markups.wallet())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text(\"Выберете валюту на счёт которой придут бабосы\", msg.chat.id,\n msg.message_id, reply_markup=markups.bringin())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text(\"Внесите \" + call.data[6:], msg.chat.id, msg.message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg=call.message\n bot.edit_message_text(\"С какой валюты списать бобосы\",msg.chat.id,msg.message_id,reply_markup=markups.withdraw())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:5] == 'wwith')\ndef wwithdraw(call):\n msg=call.message\n bot.edit_message_text(\"Введите сколько вывести\" + call.data[5:],msg.chat.id,msg.message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == \"my requests\")\ndef user_requests(call):\n bot.send_message(call.message.chat.id, \"Если нужно,то просто раскомменти\")\n # markup = telebot.types.InlineKeyboardMarkup()\n # data = base.get_user_requests(call.message.chat.id)\n # val = base.get_user_value(call.message.chat.id)\n # if not data:\n # btn_add = telebot.types.InlineKeyboardButton(\"📝 Добавить объявление\", callback_data='add request')\n # back = telebot.types.InlineKeyboardButton(text=\"Назад\",\n # callback_data='exchange')\n # markup.row(btn_add, back)\n # bot.edit_message_text(\"У вас нет объявлений\", call.message.chat.id, call.message.message_id)\n # bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id,\n # reply_markup=markup)\n #\n #\n # else:\n # for each in data:\n # btn = telebot.types.InlineKeyboardButton(\n # text=each.rType + \", \" + each.paymentMethod + \", \" + each.rate + \" \" + each.currency,\n # callback_data=each.currency + \"->\" + each.rid)\n # markup.row(btn)\n # btn_add = telebot.types.InlineKeyboardButton(\"📝 Добавить объявление\", callback_data='add request')\n # back = telebot.types.InlineKeyboardButton(text=\"Назад\",\n # callback_data='exchange')\n # markup.row(btn_add, back)\n # bot.edit_message_text(\"Что-то там про объявления\",\n # call.message.chat.id, call.message.message_id, parse_mode=\"markdown\")\n # bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id, reply_markup=markup)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text(\"Выберите валюту\", msg.chat.id, msg.message_id, reply_markup=markups.request_curr())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text(\"Выберите тип объявления\", msg.chat.id, msg.message_id, reply_markup=markups.request_type())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')\n@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, \"Метод оплаты\", reply_markup=markups.pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, \"Курс\")\n\n\n@bot.message_handler(regexp=\"Настройки\")\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id,'settings'), reply_markup=markups.settings())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'settings')\ndef setings(call):\n msg = call.message\n bot.edit_message_text(base.get_text(msg.chat.id,'settings'), msg.chat.id, msg.message_id, reply_markup=markups.settings())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == \"chooselanguage\")\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text( \"Выберите язык\",chat.id,call.message.message_id, reply_markup=langg())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text(\"Выберите источник актульного курса\", msg.chat.id, msg.message_id,\n reply_markup=markups.rate())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:5] == 'burse')\ndef burses(call):\n number_of_burse = call.data[5:]\n msg = call.message\n markup = telebot.types.InlineKeyboardMarkup()\n bt_back_to_rates = telebot.types.InlineKeyboardButton(text=\"Вернуться к выбору биржы\", callback_data='rate')\n markup.add(bt_back_to_rates)\n bot.edit_message_text(\"Для пары BTC/RUB теперь используются котировки биржи ...название...\", msg.chat.id,\n msg.message_id, reply_markup=markup)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'address')\ndef address_cur(call):\n msg = call.message\n bot.edit_message_text(\"Выберите валюту\", msg.chat.id, msg.message_id, reply_markup=markups.address())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text(\"Введите адрес\", msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\ndef enter_address(msg):\n new_address = msg\n bot.send_message(msg.chat.id, \"Информация сохранена\")\n\n\n@bot.message_handler(regexp=\"О сервисе\")\ndef service(msg):\n bot.send_message(msg.chat.id,\"Нужно придумать\")\n\n\nif __name__ == \"__main__\":\n bot.polling()\n # start_bot()\n",
"step-ids": [
19,
20,
30,
36,
37
]
}
|
[
19,
20,
30,
36,
37
] |
<|reserved_special_token_0|>
class Dscanner(Linter):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Dscanner(Linter):
<|reserved_special_token_0|>
cmd = 'dscanner', '-S', '${file}'
regex = (
'^.+?\\((?P<line>\\d+):(?P<col>\\d+)\\)\\[((?P<warning>warn)|(?P<error>error))\\]: (?P<message>.+)$'
)
multiline = False
tempfile_suffix = '-'
word_re = None
defaults = {'selector': 'source.d'}
name = 'D-Scanner'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Dscanner(Linter):
"""Provides an interface to dscanner."""
cmd = 'dscanner', '-S', '${file}'
regex = (
'^.+?\\((?P<line>\\d+):(?P<col>\\d+)\\)\\[((?P<warning>warn)|(?P<error>error))\\]: (?P<message>.+)$'
)
multiline = False
tempfile_suffix = '-'
word_re = None
defaults = {'selector': 'source.d'}
name = 'D-Scanner'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from SublimeLinter.lint import Linter, STREAM_STDOUT
class Dscanner(Linter):
"""Provides an interface to dscanner."""
cmd = 'dscanner', '-S', '${file}'
regex = (
'^.+?\\((?P<line>\\d+):(?P<col>\\d+)\\)\\[((?P<warning>warn)|(?P<error>error))\\]: (?P<message>.+)$'
)
multiline = False
tempfile_suffix = '-'
word_re = None
defaults = {'selector': 'source.d'}
name = 'D-Scanner'
<|reserved_special_token_1|>
#
# linter.py
# Linter for SublimeLinter version 4.
#
# Written by Brian Schott (Hackerpilot)
# Copyright © 2014-2019 Economic Modeling Specialists, Intl.
#
# License: MIT
#
"""This module exports the D-Scanner plugin class."""
from SublimeLinter.lint import Linter, STREAM_STDOUT
class Dscanner(Linter):
"""Provides an interface to dscanner."""
cmd = ("dscanner", "-S", "${file}")
regex = r'^.+?\((?P<line>\d+):(?P<col>\d+)\)\[((?P<warning>warn)|(?P<error>error))\]: (?P<message>.+)$'
multiline = False
tempfile_suffix = "-"
word_re = None
defaults = {
"selector": "source.d"
}
name = "D-Scanner"
|
flexible
|
{
"blob_id": "fda73b5dac038f077da460d6ebfb432b756909d9",
"index": 3125,
"step-1": "<mask token>\n\n\nclass Dscanner(Linter):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Dscanner(Linter):\n <mask token>\n cmd = 'dscanner', '-S', '${file}'\n regex = (\n '^.+?\\\\((?P<line>\\\\d+):(?P<col>\\\\d+)\\\\)\\\\[((?P<warning>warn)|(?P<error>error))\\\\]: (?P<message>.+)$'\n )\n multiline = False\n tempfile_suffix = '-'\n word_re = None\n defaults = {'selector': 'source.d'}\n name = 'D-Scanner'\n",
"step-3": "<mask token>\n\n\nclass Dscanner(Linter):\n \"\"\"Provides an interface to dscanner.\"\"\"\n cmd = 'dscanner', '-S', '${file}'\n regex = (\n '^.+?\\\\((?P<line>\\\\d+):(?P<col>\\\\d+)\\\\)\\\\[((?P<warning>warn)|(?P<error>error))\\\\]: (?P<message>.+)$'\n )\n multiline = False\n tempfile_suffix = '-'\n word_re = None\n defaults = {'selector': 'source.d'}\n name = 'D-Scanner'\n",
"step-4": "<mask token>\nfrom SublimeLinter.lint import Linter, STREAM_STDOUT\n\n\nclass Dscanner(Linter):\n \"\"\"Provides an interface to dscanner.\"\"\"\n cmd = 'dscanner', '-S', '${file}'\n regex = (\n '^.+?\\\\((?P<line>\\\\d+):(?P<col>\\\\d+)\\\\)\\\\[((?P<warning>warn)|(?P<error>error))\\\\]: (?P<message>.+)$'\n )\n multiline = False\n tempfile_suffix = '-'\n word_re = None\n defaults = {'selector': 'source.d'}\n name = 'D-Scanner'\n",
"step-5": "#\n# linter.py\n# Linter for SublimeLinter version 4.\n#\n# Written by Brian Schott (Hackerpilot)\n# Copyright © 2014-2019 Economic Modeling Specialists, Intl.\n#\n# License: MIT\n#\n\n\"\"\"This module exports the D-Scanner plugin class.\"\"\"\n\nfrom SublimeLinter.lint import Linter, STREAM_STDOUT\n\n\nclass Dscanner(Linter):\n\n \"\"\"Provides an interface to dscanner.\"\"\"\n\n cmd = (\"dscanner\", \"-S\", \"${file}\")\n regex = r'^.+?\\((?P<line>\\d+):(?P<col>\\d+)\\)\\[((?P<warning>warn)|(?P<error>error))\\]: (?P<message>.+)$'\n multiline = False\n tempfile_suffix = \"-\"\n word_re = None\n defaults = {\n \"selector\": \"source.d\"\n }\n name = \"D-Scanner\"\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def buzz(pitch, duration):
peroid = 1.0 / pitch
delay = peroid / 2.0
cycles = int(duration * pitch)
for i in range(cycles):
gpio.output(buzzer_pin, True)
sleep(delay)
gpio.output(buzzer_pin, False)
sleep(delay)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
gpio.setmode(gpio.BCM)
gpio.setup(buzzer_pin, gpio.OUT)
def buzz(pitch, duration):
peroid = 1.0 / pitch
delay = peroid / 2.0
cycles = int(duration * pitch)
for i in range(cycles):
gpio.output(buzzer_pin, True)
sleep(delay)
gpio.output(buzzer_pin, False)
sleep(delay)
<|reserved_special_token_0|>
buzz(pitch, duration)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
buzzer_pin = 18
gpio.setmode(gpio.BCM)
gpio.setup(buzzer_pin, gpio.OUT)
def buzz(pitch, duration):
peroid = 1.0 / pitch
delay = peroid / 2.0
cycles = int(duration * pitch)
for i in range(cycles):
gpio.output(buzzer_pin, True)
sleep(delay)
gpio.output(buzzer_pin, False)
sleep(delay)
pitch = float(1000)
duration = float(2)
buzz(pitch, duration)
<|reserved_special_token_1|>
from time import sleep
import RPi.GPIO as gpio
buzzer_pin = 18
gpio.setmode(gpio.BCM)
gpio.setup(buzzer_pin, gpio.OUT)
def buzz(pitch, duration):
peroid = 1.0 / pitch
delay = peroid / 2.0
cycles = int(duration * pitch)
for i in range(cycles):
gpio.output(buzzer_pin, True)
sleep(delay)
gpio.output(buzzer_pin, False)
sleep(delay)
pitch = float(1000)
duration = float(2)
buzz(pitch, duration)
|
flexible
|
{
"blob_id": "149ac778a552fac4499d7146db8600c91c68c60e",
"index": 4479,
"step-1": "<mask token>\n\n\ndef buzz(pitch, duration):\n peroid = 1.0 / pitch\n delay = peroid / 2.0\n cycles = int(duration * pitch)\n for i in range(cycles):\n gpio.output(buzzer_pin, True)\n sleep(delay)\n gpio.output(buzzer_pin, False)\n sleep(delay)\n\n\n<mask token>\n",
"step-2": "<mask token>\ngpio.setmode(gpio.BCM)\ngpio.setup(buzzer_pin, gpio.OUT)\n\n\ndef buzz(pitch, duration):\n peroid = 1.0 / pitch\n delay = peroid / 2.0\n cycles = int(duration * pitch)\n for i in range(cycles):\n gpio.output(buzzer_pin, True)\n sleep(delay)\n gpio.output(buzzer_pin, False)\n sleep(delay)\n\n\n<mask token>\nbuzz(pitch, duration)\n",
"step-3": "<mask token>\nbuzzer_pin = 18\ngpio.setmode(gpio.BCM)\ngpio.setup(buzzer_pin, gpio.OUT)\n\n\ndef buzz(pitch, duration):\n peroid = 1.0 / pitch\n delay = peroid / 2.0\n cycles = int(duration * pitch)\n for i in range(cycles):\n gpio.output(buzzer_pin, True)\n sleep(delay)\n gpio.output(buzzer_pin, False)\n sleep(delay)\n\n\npitch = float(1000)\nduration = float(2)\nbuzz(pitch, duration)\n",
"step-4": "from time import sleep\nimport RPi.GPIO as gpio\nbuzzer_pin = 18\ngpio.setmode(gpio.BCM)\ngpio.setup(buzzer_pin, gpio.OUT)\n\n\ndef buzz(pitch, duration):\n peroid = 1.0 / pitch\n delay = peroid / 2.0\n cycles = int(duration * pitch)\n for i in range(cycles):\n gpio.output(buzzer_pin, True)\n sleep(delay)\n gpio.output(buzzer_pin, False)\n sleep(delay)\n\n\npitch = float(1000)\nduration = float(2)\nbuzz(pitch, duration)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class CopyResAction:
<|reserved_special_token_0|>
default_option = None
res_root = None
packing_root = None
ignore_list = []
def setResRoot(self, root):
self.res_root = root
pass
def setPackingRoot(self, root):
self.packing_root = root
pass
def setDefaultOption(self, option):
self.default_option = option
pass
def go(self, config):
ext_list = []
input_list = config['input']
if not config['options']['cpall']:
if 'cpextlist' in config['options']:
ext_list = config['options']['cpextlist'].split(',')
for input_file_path in input_list:
basedir, filename = os.path.split(input_file_path)
name, fext = os.path.splitext(filename)
for ext in ext_list:
if ext == fext:
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],
os.path.relpath(input_file_dir, config[
'config-root']))
dest_dir = config['output-root']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'
], d_dir, os.path.relpath(
input_file_dir, config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug('[CopyRes]copy ' + input_file_path +
' to ' + dest_dir)
shutil.copy2(input_file_path, dest_dir)
if 'filenames' in config['options']:
filenames_list = config['options']['filenames'].split(',')
for filename in filenames_list:
for input_file_path in input_list:
dirname, input_file_name = os.path.split(
input_file_path)
if filename == input_file_name:
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],
os.path.relpath(input_file_dir, config[
'config-root']))
dest_dir = config['output-root']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'
], d_dir, os.path.relpath(
input_file_dir, config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug('[CopyRes]copy ' + input_file_path +
' to ' + dest_dir)
shutil.copy2(input_file_path, dest_dir)
else:
for input_file_path in input_list:
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'], os.path.
relpath(input_file_dir, config['config-root']))
dest_dir = config['output-root']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'], d_dir, os
.path.relpath(input_file_dir, config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug('[CopyRes]copy ' + input_file_path + ' to ' +
dest_dir)
shutil.copy2(input_file_path, dest_dir)
pass
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run():
logger.debug('CopyRes')
pass
<|reserved_special_token_0|>
class CopyResAction:
"""根据资源配置文件直接复制资源到目标目录"""
default_option = None
res_root = None
packing_root = None
ignore_list = []
def setResRoot(self, root):
self.res_root = root
pass
def setPackingRoot(self, root):
self.packing_root = root
pass
def setDefaultOption(self, option):
self.default_option = option
pass
def go(self, config):
ext_list = []
input_list = config['input']
if not config['options']['cpall']:
if 'cpextlist' in config['options']:
ext_list = config['options']['cpextlist'].split(',')
for input_file_path in input_list:
basedir, filename = os.path.split(input_file_path)
name, fext = os.path.splitext(filename)
for ext in ext_list:
if ext == fext:
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],
os.path.relpath(input_file_dir, config[
'config-root']))
dest_dir = config['output-root']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'
], d_dir, os.path.relpath(
input_file_dir, config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug('[CopyRes]copy ' + input_file_path +
' to ' + dest_dir)
shutil.copy2(input_file_path, dest_dir)
if 'filenames' in config['options']:
filenames_list = config['options']['filenames'].split(',')
for filename in filenames_list:
for input_file_path in input_list:
dirname, input_file_name = os.path.split(
input_file_path)
if filename == input_file_name:
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],
os.path.relpath(input_file_dir, config[
'config-root']))
dest_dir = config['output-root']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'
], d_dir, os.path.relpath(
input_file_dir, config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug('[CopyRes]copy ' + input_file_path +
' to ' + dest_dir)
shutil.copy2(input_file_path, dest_dir)
else:
for input_file_path in input_list:
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'], os.path.
relpath(input_file_dir, config['config-root']))
dest_dir = config['output-root']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'], d_dir, os
.path.relpath(input_file_dir, config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug('[CopyRes]copy ' + input_file_path + ' to ' +
dest_dir)
shutil.copy2(input_file_path, dest_dir)
pass
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(os.path.split(os.path.realpath(__file__))[0])
<|reserved_special_token_0|>
logger = utils.getLogger('CopyRes')
def run():
logger.debug('CopyRes')
pass
def run_with_configs(configs, tp=None):
logger.debug('Executing NCopyRes')
apaction = CopyResAction()
apaction.go(configs)
pass
def safeRemoveDir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
pass
def clean_output(configs):
default_output_path = configs['output-root']
safeRemoveDir(default_output_path)
pass
class CopyResAction:
"""根据资源配置文件直接复制资源到目标目录"""
default_option = None
res_root = None
packing_root = None
ignore_list = []
def setResRoot(self, root):
self.res_root = root
pass
def setPackingRoot(self, root):
self.packing_root = root
pass
def setDefaultOption(self, option):
self.default_option = option
pass
def go(self, config):
ext_list = []
input_list = config['input']
if not config['options']['cpall']:
if 'cpextlist' in config['options']:
ext_list = config['options']['cpextlist'].split(',')
for input_file_path in input_list:
basedir, filename = os.path.split(input_file_path)
name, fext = os.path.splitext(filename)
for ext in ext_list:
if ext == fext:
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],
os.path.relpath(input_file_dir, config[
'config-root']))
dest_dir = config['output-root']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'
], d_dir, os.path.relpath(
input_file_dir, config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug('[CopyRes]copy ' + input_file_path +
' to ' + dest_dir)
shutil.copy2(input_file_path, dest_dir)
if 'filenames' in config['options']:
filenames_list = config['options']['filenames'].split(',')
for filename in filenames_list:
for input_file_path in input_list:
dirname, input_file_name = os.path.split(
input_file_path)
if filename == input_file_name:
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],
os.path.relpath(input_file_dir, config[
'config-root']))
dest_dir = config['output-root']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'
], d_dir, os.path.relpath(
input_file_dir, config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug('[CopyRes]copy ' + input_file_path +
' to ' + dest_dir)
shutil.copy2(input_file_path, dest_dir)
else:
for input_file_path in input_list:
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'], os.path.
relpath(input_file_dir, config['config-root']))
dest_dir = config['output-root']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'], d_dir, os
.path.relpath(input_file_dir, config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug('[CopyRes]copy ' + input_file_path + ' to ' +
dest_dir)
shutil.copy2(input_file_path, dest_dir)
pass
pass
<|reserved_special_token_1|>
import yaml
import os
import os.path
import shutil
import json
import subprocess
import sys
sys.path.append(os.path.split(os.path.realpath(__file__))[0])
import rtool.taskplugin.plugin.MultiProcessRunner as MultiProcessRunner
import rtool.utils as utils
logger = utils.getLogger('CopyRes')
def run():
logger.debug('CopyRes')
pass
def run_with_configs(configs, tp=None):
logger.debug('Executing NCopyRes')
apaction = CopyResAction()
apaction.go(configs)
pass
def safeRemoveDir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
pass
def clean_output(configs):
default_output_path = configs['output-root']
safeRemoveDir(default_output_path)
pass
class CopyResAction:
"""根据资源配置文件直接复制资源到目标目录"""
default_option = None
res_root = None
packing_root = None
ignore_list = []
def setResRoot(self, root):
self.res_root = root
pass
def setPackingRoot(self, root):
self.packing_root = root
pass
def setDefaultOption(self, option):
self.default_option = option
pass
def go(self, config):
ext_list = []
input_list = config['input']
if not config['options']['cpall']:
if 'cpextlist' in config['options']:
ext_list = config['options']['cpextlist'].split(',')
for input_file_path in input_list:
basedir, filename = os.path.split(input_file_path)
name, fext = os.path.splitext(filename)
for ext in ext_list:
if ext == fext:
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],
os.path.relpath(input_file_dir, config[
'config-root']))
dest_dir = config['output-root']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'
], d_dir, os.path.relpath(
input_file_dir, config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug('[CopyRes]copy ' + input_file_path +
' to ' + dest_dir)
shutil.copy2(input_file_path, dest_dir)
if 'filenames' in config['options']:
filenames_list = config['options']['filenames'].split(',')
for filename in filenames_list:
for input_file_path in input_list:
dirname, input_file_name = os.path.split(
input_file_path)
if filename == input_file_name:
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],
os.path.relpath(input_file_dir, config[
'config-root']))
dest_dir = config['output-root']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'
], d_dir, os.path.relpath(
input_file_dir, config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug('[CopyRes]copy ' + input_file_path +
' to ' + dest_dir)
shutil.copy2(input_file_path, dest_dir)
else:
for input_file_path in input_list:
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'], os.path.
relpath(input_file_dir, config['config-root']))
dest_dir = config['output-root']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'], d_dir, os
.path.relpath(input_file_dir, config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug('[CopyRes]copy ' + input_file_path + ' to ' +
dest_dir)
shutil.copy2(input_file_path, dest_dir)
pass
pass
<|reserved_special_token_1|>
#coding=utf-8
import yaml
import os
import os.path
import shutil
import json
import subprocess
import sys
sys.path.append(os.path.split(os.path.realpath(__file__))[0])
import rtool.taskplugin.plugin.MultiProcessRunner as MultiProcessRunner
import rtool.utils as utils
logger = utils.getLogger('CopyRes')
def run():
logger.debug("CopyRes")
pass
def run_with_configs(configs,tp=None):
logger.debug("Executing NCopyRes")
apaction = CopyResAction()
apaction.go(configs)
pass
def safeRemoveDir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
pass
def clean_output(configs):
default_output_path = configs["output-root"]
safeRemoveDir(default_output_path)
pass
class CopyResAction:
"""根据资源配置文件直接复制资源到目标目录"""
default_option = None
res_root = None
packing_root = None
ignore_list=[]
def setResRoot(self,root):
self.res_root = root
pass
def setPackingRoot(self,root):
self.packing_root = root
pass
def setDefaultOption(self,option):
self.default_option = option
pass
def go(self,config):
ext_list = []
input_list = config['input']
if not config['options']['cpall']:
if 'cpextlist' in config['options']:
ext_list = config['options']['cpextlist'].split(',')
for input_file_path in input_list:
basedir,filename = os.path.split(input_file_path)
name,fext = os.path.splitext(filename)
for ext in ext_list:
if ext == fext:
# 保留目录结构的为相对于配置项根目录的层级
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root']))
dest_dir = config['output-root']
# d_dir = config['output']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug("[CopyRes]copy "+input_file_path+" to "+dest_dir)
shutil.copy2(input_file_path,dest_dir)
if 'filenames' in config['options']:
filenames_list = config['options']['filenames'].split(',')
for filename in filenames_list:
for input_file_path in input_list:
dirname,input_file_name = os.path.split(input_file_path)
if filename==input_file_name:
# 保留目录结构的为相对于配置项根目录的层级
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root']))
dest_dir = config['output-root']
# d_dir = config['output']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug("[CopyRes]copy "+input_file_path+" to "+dest_dir)
shutil.copy2(input_file_path,dest_dir)
else:
for input_file_path in input_list:
# 保留目录结构的为相对于配置项根目录的层级
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root']))
dest_dir = config['output-root']
# d_dir = config['output']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug("[CopyRes]copy "+input_file_path+" to "+dest_dir)
shutil.copy2(input_file_path,dest_dir)
pass
pass
|
flexible
|
{
"blob_id": "364150d6f37329c43bead0d18da90f0f6ce9cd1b",
"index": 4886,
"step-1": "<mask token>\n\n\nclass CopyResAction:\n <mask token>\n default_option = None\n res_root = None\n packing_root = None\n ignore_list = []\n\n def setResRoot(self, root):\n self.res_root = root\n pass\n\n def setPackingRoot(self, root):\n self.packing_root = root\n pass\n\n def setDefaultOption(self, option):\n self.default_option = option\n pass\n\n def go(self, config):\n ext_list = []\n input_list = config['input']\n if not config['options']['cpall']:\n if 'cpextlist' in config['options']:\n ext_list = config['options']['cpextlist'].split(',')\n for input_file_path in input_list:\n basedir, filename = os.path.split(input_file_path)\n name, fext = os.path.splitext(filename)\n for ext in ext_list:\n if ext == fext:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n if 'filenames' in config['options']:\n filenames_list = config['options']['filenames'].split(',')\n for filename in filenames_list:\n for input_file_path in input_list:\n dirname, input_file_name = os.path.split(\n input_file_path)\n if filename == input_file_name:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n else:\n for input_file_path in input_list:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'], os.path.\n relpath(input_file_dir, config['config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'], d_dir, os\n .path.relpath(input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path + ' to ' +\n dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n pass\n pass\n",
"step-2": "<mask token>\n\n\ndef run():\n logger.debug('CopyRes')\n pass\n\n\n<mask token>\n\n\nclass CopyResAction:\n \"\"\"根据资源配置文件直接复制资源到目标目录\"\"\"\n default_option = None\n res_root = None\n packing_root = None\n ignore_list = []\n\n def setResRoot(self, root):\n self.res_root = root\n pass\n\n def setPackingRoot(self, root):\n self.packing_root = root\n pass\n\n def setDefaultOption(self, option):\n self.default_option = option\n pass\n\n def go(self, config):\n ext_list = []\n input_list = config['input']\n if not config['options']['cpall']:\n if 'cpextlist' in config['options']:\n ext_list = config['options']['cpextlist'].split(',')\n for input_file_path in input_list:\n basedir, filename = os.path.split(input_file_path)\n name, fext = os.path.splitext(filename)\n for ext in ext_list:\n if ext == fext:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n if 'filenames' in config['options']:\n filenames_list = config['options']['filenames'].split(',')\n for filename in filenames_list:\n for input_file_path in input_list:\n dirname, input_file_name = os.path.split(\n input_file_path)\n if filename == input_file_name:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n else:\n for input_file_path in input_list:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'], os.path.\n relpath(input_file_dir, config['config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'], d_dir, os\n .path.relpath(input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path + ' to ' +\n dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n pass\n pass\n",
"step-3": "<mask token>\nsys.path.append(os.path.split(os.path.realpath(__file__))[0])\n<mask token>\nlogger = utils.getLogger('CopyRes')\n\n\ndef run():\n logger.debug('CopyRes')\n pass\n\n\ndef run_with_configs(configs, tp=None):\n logger.debug('Executing NCopyRes')\n apaction = CopyResAction()\n apaction.go(configs)\n pass\n\n\ndef safeRemoveDir(dir_path):\n if os.path.exists(dir_path):\n shutil.rmtree(dir_path)\n pass\n\n\ndef clean_output(configs):\n default_output_path = configs['output-root']\n safeRemoveDir(default_output_path)\n pass\n\n\nclass CopyResAction:\n \"\"\"根据资源配置文件直接复制资源到目标目录\"\"\"\n default_option = None\n res_root = None\n packing_root = None\n ignore_list = []\n\n def setResRoot(self, root):\n self.res_root = root\n pass\n\n def setPackingRoot(self, root):\n self.packing_root = root\n pass\n\n def setDefaultOption(self, option):\n self.default_option = option\n pass\n\n def go(self, config):\n ext_list = []\n input_list = config['input']\n if not config['options']['cpall']:\n if 'cpextlist' in config['options']:\n ext_list = config['options']['cpextlist'].split(',')\n for input_file_path in input_list:\n basedir, filename = os.path.split(input_file_path)\n name, fext = os.path.splitext(filename)\n for ext in ext_list:\n if ext == fext:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n if 'filenames' in config['options']:\n filenames_list = config['options']['filenames'].split(',')\n for filename in filenames_list:\n for input_file_path in input_list:\n dirname, input_file_name = os.path.split(\n input_file_path)\n if filename == input_file_name:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n else:\n for input_file_path in input_list:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'], os.path.\n relpath(input_file_dir, config['config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'], d_dir, os\n .path.relpath(input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path + ' to ' +\n dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n pass\n pass\n",
"step-4": "import yaml\nimport os\nimport os.path\nimport shutil\nimport json\nimport subprocess\nimport sys\nsys.path.append(os.path.split(os.path.realpath(__file__))[0])\nimport rtool.taskplugin.plugin.MultiProcessRunner as MultiProcessRunner\nimport rtool.utils as utils\nlogger = utils.getLogger('CopyRes')\n\n\ndef run():\n logger.debug('CopyRes')\n pass\n\n\ndef run_with_configs(configs, tp=None):\n logger.debug('Executing NCopyRes')\n apaction = CopyResAction()\n apaction.go(configs)\n pass\n\n\ndef safeRemoveDir(dir_path):\n if os.path.exists(dir_path):\n shutil.rmtree(dir_path)\n pass\n\n\ndef clean_output(configs):\n default_output_path = configs['output-root']\n safeRemoveDir(default_output_path)\n pass\n\n\nclass CopyResAction:\n \"\"\"根据资源配置文件直接复制资源到目标目录\"\"\"\n default_option = None\n res_root = None\n packing_root = None\n ignore_list = []\n\n def setResRoot(self, root):\n self.res_root = root\n pass\n\n def setPackingRoot(self, root):\n self.packing_root = root\n pass\n\n def setDefaultOption(self, option):\n self.default_option = option\n pass\n\n def go(self, config):\n ext_list = []\n input_list = config['input']\n if not config['options']['cpall']:\n if 'cpextlist' in config['options']:\n ext_list = config['options']['cpextlist'].split(',')\n for input_file_path in input_list:\n basedir, filename = os.path.split(input_file_path)\n name, fext = os.path.splitext(filename)\n for ext in ext_list:\n if ext == fext:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n if 'filenames' in config['options']:\n filenames_list = config['options']['filenames'].split(',')\n for filename in filenames_list:\n for input_file_path in input_list:\n dirname, input_file_name = os.path.split(\n input_file_path)\n if filename == input_file_name:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n else:\n for input_file_path in input_list:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'], os.path.\n relpath(input_file_dir, config['config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'], d_dir, os\n .path.relpath(input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path + ' to ' +\n dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n pass\n pass\n",
"step-5": "#coding=utf-8\nimport yaml\nimport os\nimport os.path\nimport shutil\nimport json\nimport subprocess\nimport sys\nsys.path.append(os.path.split(os.path.realpath(__file__))[0])\nimport rtool.taskplugin.plugin.MultiProcessRunner as MultiProcessRunner\nimport rtool.utils as utils\n\nlogger = utils.getLogger('CopyRes')\n\ndef run():\n\tlogger.debug(\"CopyRes\")\n\tpass\n\ndef run_with_configs(configs,tp=None):\n\tlogger.debug(\"Executing NCopyRes\")\n\tapaction = CopyResAction()\n\tapaction.go(configs)\n\tpass\n\ndef safeRemoveDir(dir_path):\n\tif os.path.exists(dir_path):\n\t\tshutil.rmtree(dir_path)\n\tpass\n\ndef clean_output(configs):\n\tdefault_output_path = configs[\"output-root\"]\n\tsafeRemoveDir(default_output_path)\n\tpass\n\nclass CopyResAction:\n\t\"\"\"根据资源配置文件直接复制资源到目标目录\"\"\"\n\t\n\tdefault_option = None\n\n\tres_root = None\n\tpacking_root = None\n\tignore_list=[]\n\n\tdef setResRoot(self,root):\n\t\tself.res_root = root\n\t\tpass\n\tdef setPackingRoot(self,root):\n\t\tself.packing_root = root\n\t\tpass\n\tdef setDefaultOption(self,option):\n\t\tself.default_option = option\n\t\tpass\n\n\tdef go(self,config):\n\n\t\text_list = []\n\t\tinput_list = config['input']\n\t\tif not config['options']['cpall']:\n\t\t\tif 'cpextlist' in config['options']:\n\t\t\t\text_list = config['options']['cpextlist'].split(',')\n\t\t\t\tfor input_file_path in input_list:\n\t\t\t\t\tbasedir,filename = os.path.split(input_file_path)\n\t\t\t\t\tname,fext = os.path.splitext(filename)\n\t\t\t\t\tfor ext in ext_list:\t\t\t\t\t\t\n\t\t\t\t\t\tif ext == fext:\n\t\t\t\t\t\t\t# 保留目录结构的为相对于配置项根目录的层级\n\t\t\t\t\t\t\tinput_file_dir = os.path.dirname(input_file_path)\n\t\t\t\t\t\t\tdest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root']))\n\t\t\t\t\t\t\tdest_dir = config['output-root']\n\t\t\t\t\t\t\t# d_dir = config['output']\n\t\t\t\t\t\t\tif 'dst' in config['options']:\n\t\t\t\t\t\t\t\td_dir = config['options']['dst']\n\t\t\t\t\t\t\t\tdest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root']))\n\t\t\t\t\t\t\tif not os.path.exists(dest_dir):\n\t\t\t\t\t\t\t\tos.makedirs(dest_dir)\n\t\t\t\t\t\t\tlogger.debug(\"[CopyRes]copy \"+input_file_path+\" to \"+dest_dir)\n\t\t\t\t\t\t\tshutil.copy2(input_file_path,dest_dir)\n\t\t\tif 'filenames' in config['options']:\n\t\t\t\tfilenames_list = config['options']['filenames'].split(',')\n\t\t\t\tfor filename in filenames_list:\n\t\t\t\t\tfor input_file_path in input_list:\n\t\t\t\t\t\tdirname,input_file_name = os.path.split(input_file_path)\n\t\t\t\t\t\tif filename==input_file_name:\n\t\t\t\t\t\t\t# 保留目录结构的为相对于配置项根目录的层级\n\t\t\t\t\t\t\tinput_file_dir = os.path.dirname(input_file_path)\n\t\t\t\t\t\t\tdest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root']))\n\t\t\t\t\t\t\tdest_dir = config['output-root']\n\t\t\t\t\t\t\t# d_dir = config['output']\n\t\t\t\t\t\t\tif 'dst' in config['options']:\n\t\t\t\t\t\t\t\td_dir = config['options']['dst']\n\t\t\t\t\t\t\t\tdest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root']))\n\t\t\t\t\t\t\tif not os.path.exists(dest_dir):\n\t\t\t\t\t\t\t\tos.makedirs(dest_dir)\n\t\t\t\t\t\t\tlogger.debug(\"[CopyRes]copy \"+input_file_path+\" to \"+dest_dir)\n\t\t\t\t\t\t\tshutil.copy2(input_file_path,dest_dir)\n\t\telse:\n\t\t\tfor input_file_path in input_list:\n\t\t\t\t# 保留目录结构的为相对于配置项根目录的层级\n\t\t\t\tinput_file_dir = os.path.dirname(input_file_path)\n\t\t\t\tdest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root']))\n\t\t\t\tdest_dir = config['output-root']\n\t\t\t\t# d_dir = config['output']\n\t\t\t\tif 'dst' in config['options']:\n\t\t\t\t\td_dir = config['options']['dst']\n\t\t\t\t\tdest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root']))\n\t\t\t\tif not os.path.exists(dest_dir):\n\t\t\t\t\tos.makedirs(dest_dir)\n\t\t\t\tlogger.debug(\"[CopyRes]copy \"+input_file_path+\" to \"+dest_dir)\n\t\t\t\tshutil.copy2(input_file_path,dest_dir)\n\t\t\tpass\n\t\tpass",
"step-ids": [
6,
8,
13,
14,
15
]
}
|
[
6,
8,
13,
14,
15
] |
<|reserved_special_token_0|>
def getMFCC(rate, sig):
mfcc_feat = mfcc(sig, rate)
return numpy.concatenate(getQuartileMeans(mfcc_feat))
def getLogFBank(rate, sig):
logfbank_feat = logfbank(sig, rate)
return numpy.concatenate(getQuartileMeans(logfbank_feat))
def getData(filename, outdir=None):
if outdir is None or not os.path.exists(outdir + '/' + os.path.splitext
(os.path.basename(filename))[0] + '.csv'):
rate, sig = wav.read(filename)
return getMFCC(rate, sig)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(wd + '/python_speech_features')
<|reserved_special_token_0|>
def getQuartileMeans(values):
l = len(values) / 4
quartileMean1 = numpy.mean(values[:l], axis=0)
quartileMean2 = numpy.mean(values[l:2 * l], axis=0)
quartileMean3 = numpy.mean(values[2 * l:3 * l], axis=0)
quartileMean4 = numpy.mean(values[3 * l:], axis=0)
return [quartileMean1, quartileMean2, quartileMean3, quartileMean4]
def getMFCC(rate, sig):
mfcc_feat = mfcc(sig, rate)
return numpy.concatenate(getQuartileMeans(mfcc_feat))
def getLogFBank(rate, sig):
logfbank_feat = logfbank(sig, rate)
return numpy.concatenate(getQuartileMeans(logfbank_feat))
def getData(filename, outdir=None):
if outdir is None or not os.path.exists(outdir + '/' + os.path.splitext
(os.path.basename(filename))[0] + '.csv'):
rate, sig = wav.read(filename)
return getMFCC(rate, sig)
def writeData(filename, outdir, values):
if not os.path.exists(outdir + '/' + os.path.splitext(os.path.basename(
filename))[0] + '.csv'):
with open(outdir + '/' + os.path.splitext(os.path.basename(filename
))[0] + '.csv', 'w') as f:
addComma = False
for val in values:
if addComma:
f.write(',')
f.write(str(val))
addComma = True
f.write('\n')
def generateMFCCData(indir, outdir):
for f in glob.glob(outdir + '/*.csv'):
os.remove(f)
for f in glob.glob(indir + '/*.wav'):
try:
writeData(f, outdir, getData(f, outdir))
newfilename = os.path.splitext(os.path.basename(f))[0]
print('YES: ' + newfilename)
if 'classify-me' not in indir:
os.rename(f, indir + '/classify-me/' + newfilename + '.wav')
os.rename(indir + '/' + newfilename + '.mp3', indir +
'/classify-me/' + newfilename + '.mp3')
except:
print('NO: ' + f)
if __name__ == '__main__':
generateMFCCData(DIR, OUTDIR)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
wd = os.path.dirname(os.path.realpath(__file__))
sys.path.append(wd + '/python_speech_features')
<|reserved_special_token_0|>
DIR = '/home/quiggles/Desktop/513music/single-genre/classify-me/subset'
OUTDIR = wd + '/songdata/subset'
def getQuartileMeans(values):
l = len(values) / 4
quartileMean1 = numpy.mean(values[:l], axis=0)
quartileMean2 = numpy.mean(values[l:2 * l], axis=0)
quartileMean3 = numpy.mean(values[2 * l:3 * l], axis=0)
quartileMean4 = numpy.mean(values[3 * l:], axis=0)
return [quartileMean1, quartileMean2, quartileMean3, quartileMean4]
def getMFCC(rate, sig):
mfcc_feat = mfcc(sig, rate)
return numpy.concatenate(getQuartileMeans(mfcc_feat))
def getLogFBank(rate, sig):
logfbank_feat = logfbank(sig, rate)
return numpy.concatenate(getQuartileMeans(logfbank_feat))
def getData(filename, outdir=None):
if outdir is None or not os.path.exists(outdir + '/' + os.path.splitext
(os.path.basename(filename))[0] + '.csv'):
rate, sig = wav.read(filename)
return getMFCC(rate, sig)
def writeData(filename, outdir, values):
if not os.path.exists(outdir + '/' + os.path.splitext(os.path.basename(
filename))[0] + '.csv'):
with open(outdir + '/' + os.path.splitext(os.path.basename(filename
))[0] + '.csv', 'w') as f:
addComma = False
for val in values:
if addComma:
f.write(',')
f.write(str(val))
addComma = True
f.write('\n')
def generateMFCCData(indir, outdir):
for f in glob.glob(outdir + '/*.csv'):
os.remove(f)
for f in glob.glob(indir + '/*.wav'):
try:
writeData(f, outdir, getData(f, outdir))
newfilename = os.path.splitext(os.path.basename(f))[0]
print('YES: ' + newfilename)
if 'classify-me' not in indir:
os.rename(f, indir + '/classify-me/' + newfilename + '.wav')
os.rename(indir + '/' + newfilename + '.mp3', indir +
'/classify-me/' + newfilename + '.mp3')
except:
print('NO: ' + f)
if __name__ == '__main__':
generateMFCCData(DIR, OUTDIR)
<|reserved_special_token_1|>
import sys, os, glob, numpy
wd = os.path.dirname(os.path.realpath(__file__))
sys.path.append(wd + '/python_speech_features')
from features import mfcc, logfbank
import scipy.io.wavfile as wav
DIR = '/home/quiggles/Desktop/513music/single-genre/classify-me/subset'
OUTDIR = wd + '/songdata/subset'
def getQuartileMeans(values):
l = len(values) / 4
quartileMean1 = numpy.mean(values[:l], axis=0)
quartileMean2 = numpy.mean(values[l:2 * l], axis=0)
quartileMean3 = numpy.mean(values[2 * l:3 * l], axis=0)
quartileMean4 = numpy.mean(values[3 * l:], axis=0)
return [quartileMean1, quartileMean2, quartileMean3, quartileMean4]
def getMFCC(rate, sig):
mfcc_feat = mfcc(sig, rate)
return numpy.concatenate(getQuartileMeans(mfcc_feat))
def getLogFBank(rate, sig):
logfbank_feat = logfbank(sig, rate)
return numpy.concatenate(getQuartileMeans(logfbank_feat))
def getData(filename, outdir=None):
if outdir is None or not os.path.exists(outdir + '/' + os.path.splitext
(os.path.basename(filename))[0] + '.csv'):
rate, sig = wav.read(filename)
return getMFCC(rate, sig)
def writeData(filename, outdir, values):
if not os.path.exists(outdir + '/' + os.path.splitext(os.path.basename(
filename))[0] + '.csv'):
with open(outdir + '/' + os.path.splitext(os.path.basename(filename
))[0] + '.csv', 'w') as f:
addComma = False
for val in values:
if addComma:
f.write(',')
f.write(str(val))
addComma = True
f.write('\n')
def generateMFCCData(indir, outdir):
for f in glob.glob(outdir + '/*.csv'):
os.remove(f)
for f in glob.glob(indir + '/*.wav'):
try:
writeData(f, outdir, getData(f, outdir))
newfilename = os.path.splitext(os.path.basename(f))[0]
print('YES: ' + newfilename)
if 'classify-me' not in indir:
os.rename(f, indir + '/classify-me/' + newfilename + '.wav')
os.rename(indir + '/' + newfilename + '.mp3', indir +
'/classify-me/' + newfilename + '.mp3')
except:
print('NO: ' + f)
if __name__ == '__main__':
generateMFCCData(DIR, OUTDIR)
<|reserved_special_token_1|>
#!/usr/bin/python
import sys, os, glob, numpy
wd = os.path.dirname(os.path.realpath(__file__))
sys.path.append(wd + '/python_speech_features')
from features import mfcc, logfbank
import scipy.io.wavfile as wav
DIR = '/home/quiggles/Desktop/513music/single-genre/classify-me/subset'
OUTDIR = wd + '/songdata/subset'
# def getMFCC(filename):
# (rate,sig) = wav.read(filename)
# mfcc_feat = mfcc(sig,rate)
# l = len(mfcc_feat)/4
# quartileMean1 = numpy.mean(mfcc_feat[:l], axis=0)
# quartileMean2 = numpy.mean(mfcc_feat[l:2*l], axis=0)
# quartileMean3 = numpy.mean(mfcc_feat[2*l:3*l], axis=0)
# quartileMean4 = numpy.mean(mfcc_feat[3*l:], axis=0)
# return numpy.concatenate([quartileMean1, quartileMean2, quartileMean3, quartileMean4])
# def getLogFBank(filename):
# (rate,sig) = wav.read(filename)
# logfbank_feat = logfbank(sig,rate)
# l = len(logfbank_feat)/4
# quartileMean1 = numpy.mean(logfbank_feat[:l], axis=0)
# quartileMean2 = numpy.mean(logfbank_feat[l:2*l], axis=0)
# quartileMean3 = numpy.mean(logfbank_feat[2*l:3*l], axis=0)
# quartileMean4 = numpy.mean(logfbank_feat[3*l:], axis=0)
# return numpy.concatenate([quartileMean1, quartileMean2, quartileMean3, quartileMean4])
def getQuartileMeans(values):
l = len(values)/4
quartileMean1 = numpy.mean(values[:l], axis=0)
quartileMean2 = numpy.mean(values[l:2*l], axis=0)
quartileMean3 = numpy.mean(values[2*l:3*l], axis=0)
quartileMean4 = numpy.mean(values[3*l:], axis=0)
return [quartileMean1, quartileMean2, quartileMean3, quartileMean4]
def getMFCC(rate,sig):
mfcc_feat = mfcc(sig,rate)
return numpy.concatenate(getQuartileMeans(mfcc_feat))
def getLogFBank(rate,sig):
logfbank_feat = logfbank(sig,rate)
return numpy.concatenate(getQuartileMeans(logfbank_feat))
def getData(filename, outdir=None):
if outdir is None or not os.path.exists(outdir + '/' + os.path.splitext(os.path.basename(filename))[0] + ".csv"):
(rate,sig) = wav.read(filename)
# mfccVals = getMFCC(rate, sig)
# logfVals = getLogFBank(rate, sig)
# return numpy.concatenate([mfccVals, logfVals])
return getMFCC(rate,sig)
def writeData(filename, outdir, values):
if not os.path.exists(outdir + '/' + os.path.splitext(os.path.basename(filename))[0] + ".csv"):
with open(outdir + '/' + os.path.splitext(os.path.basename(filename))[0] + ".csv", 'w') as f:
addComma = False
for val in values:
if addComma:
f.write(',')
f.write(str(val))
addComma = True
f.write('\n')
def generateMFCCData(indir, outdir):
for f in glob.glob(outdir + '/*.csv'):
os.remove(f)
# for f in glob.glob(outdir + '/*.logf'):
# os.remove(f)
for f in glob.glob(indir + '/*.wav'):
try:
writeData(f, outdir, getData(f, outdir))
newfilename = os.path.splitext(os.path.basename(f))[0]
print('YES: '+ newfilename)
if 'classify-me' not in indir:
os.rename(f, indir+"/classify-me/" + newfilename+".wav")
os.rename(indir+'/' + newfilename + ".mp3", indir+"/classify-me/" + newfilename+".mp3")
except:
print('NO: '+f)
if __name__ == '__main__':
generateMFCCData(DIR, OUTDIR)
|
flexible
|
{
"blob_id": "cca1a491e2a48b4b0c7099a6c54e528158ef30bb",
"index": 5189,
"step-1": "<mask token>\n\n\ndef getMFCC(rate, sig):\n mfcc_feat = mfcc(sig, rate)\n return numpy.concatenate(getQuartileMeans(mfcc_feat))\n\n\ndef getLogFBank(rate, sig):\n logfbank_feat = logfbank(sig, rate)\n return numpy.concatenate(getQuartileMeans(logfbank_feat))\n\n\ndef getData(filename, outdir=None):\n if outdir is None or not os.path.exists(outdir + '/' + os.path.splitext\n (os.path.basename(filename))[0] + '.csv'):\n rate, sig = wav.read(filename)\n return getMFCC(rate, sig)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(wd + '/python_speech_features')\n<mask token>\n\n\ndef getQuartileMeans(values):\n l = len(values) / 4\n quartileMean1 = numpy.mean(values[:l], axis=0)\n quartileMean2 = numpy.mean(values[l:2 * l], axis=0)\n quartileMean3 = numpy.mean(values[2 * l:3 * l], axis=0)\n quartileMean4 = numpy.mean(values[3 * l:], axis=0)\n return [quartileMean1, quartileMean2, quartileMean3, quartileMean4]\n\n\ndef getMFCC(rate, sig):\n mfcc_feat = mfcc(sig, rate)\n return numpy.concatenate(getQuartileMeans(mfcc_feat))\n\n\ndef getLogFBank(rate, sig):\n logfbank_feat = logfbank(sig, rate)\n return numpy.concatenate(getQuartileMeans(logfbank_feat))\n\n\ndef getData(filename, outdir=None):\n if outdir is None or not os.path.exists(outdir + '/' + os.path.splitext\n (os.path.basename(filename))[0] + '.csv'):\n rate, sig = wav.read(filename)\n return getMFCC(rate, sig)\n\n\ndef writeData(filename, outdir, values):\n if not os.path.exists(outdir + '/' + os.path.splitext(os.path.basename(\n filename))[0] + '.csv'):\n with open(outdir + '/' + os.path.splitext(os.path.basename(filename\n ))[0] + '.csv', 'w') as f:\n addComma = False\n for val in values:\n if addComma:\n f.write(',')\n f.write(str(val))\n addComma = True\n f.write('\\n')\n\n\ndef generateMFCCData(indir, outdir):\n for f in glob.glob(outdir + '/*.csv'):\n os.remove(f)\n for f in glob.glob(indir + '/*.wav'):\n try:\n writeData(f, outdir, getData(f, outdir))\n newfilename = os.path.splitext(os.path.basename(f))[0]\n print('YES: ' + newfilename)\n if 'classify-me' not in indir:\n os.rename(f, indir + '/classify-me/' + newfilename + '.wav')\n os.rename(indir + '/' + newfilename + '.mp3', indir +\n '/classify-me/' + newfilename + '.mp3')\n except:\n print('NO: ' + f)\n\n\nif __name__ == '__main__':\n generateMFCCData(DIR, OUTDIR)\n",
"step-3": "<mask token>\nwd = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(wd + '/python_speech_features')\n<mask token>\nDIR = '/home/quiggles/Desktop/513music/single-genre/classify-me/subset'\nOUTDIR = wd + '/songdata/subset'\n\n\ndef getQuartileMeans(values):\n l = len(values) / 4\n quartileMean1 = numpy.mean(values[:l], axis=0)\n quartileMean2 = numpy.mean(values[l:2 * l], axis=0)\n quartileMean3 = numpy.mean(values[2 * l:3 * l], axis=0)\n quartileMean4 = numpy.mean(values[3 * l:], axis=0)\n return [quartileMean1, quartileMean2, quartileMean3, quartileMean4]\n\n\ndef getMFCC(rate, sig):\n mfcc_feat = mfcc(sig, rate)\n return numpy.concatenate(getQuartileMeans(mfcc_feat))\n\n\ndef getLogFBank(rate, sig):\n logfbank_feat = logfbank(sig, rate)\n return numpy.concatenate(getQuartileMeans(logfbank_feat))\n\n\ndef getData(filename, outdir=None):\n if outdir is None or not os.path.exists(outdir + '/' + os.path.splitext\n (os.path.basename(filename))[0] + '.csv'):\n rate, sig = wav.read(filename)\n return getMFCC(rate, sig)\n\n\ndef writeData(filename, outdir, values):\n if not os.path.exists(outdir + '/' + os.path.splitext(os.path.basename(\n filename))[0] + '.csv'):\n with open(outdir + '/' + os.path.splitext(os.path.basename(filename\n ))[0] + '.csv', 'w') as f:\n addComma = False\n for val in values:\n if addComma:\n f.write(',')\n f.write(str(val))\n addComma = True\n f.write('\\n')\n\n\ndef generateMFCCData(indir, outdir):\n for f in glob.glob(outdir + '/*.csv'):\n os.remove(f)\n for f in glob.glob(indir + '/*.wav'):\n try:\n writeData(f, outdir, getData(f, outdir))\n newfilename = os.path.splitext(os.path.basename(f))[0]\n print('YES: ' + newfilename)\n if 'classify-me' not in indir:\n os.rename(f, indir + '/classify-me/' + newfilename + '.wav')\n os.rename(indir + '/' + newfilename + '.mp3', indir +\n '/classify-me/' + newfilename + '.mp3')\n except:\n print('NO: ' + f)\n\n\nif __name__ == '__main__':\n generateMFCCData(DIR, OUTDIR)\n",
"step-4": "import sys, os, glob, numpy\nwd = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(wd + '/python_speech_features')\nfrom features import mfcc, logfbank\nimport scipy.io.wavfile as wav\nDIR = '/home/quiggles/Desktop/513music/single-genre/classify-me/subset'\nOUTDIR = wd + '/songdata/subset'\n\n\ndef getQuartileMeans(values):\n l = len(values) / 4\n quartileMean1 = numpy.mean(values[:l], axis=0)\n quartileMean2 = numpy.mean(values[l:2 * l], axis=0)\n quartileMean3 = numpy.mean(values[2 * l:3 * l], axis=0)\n quartileMean4 = numpy.mean(values[3 * l:], axis=0)\n return [quartileMean1, quartileMean2, quartileMean3, quartileMean4]\n\n\ndef getMFCC(rate, sig):\n mfcc_feat = mfcc(sig, rate)\n return numpy.concatenate(getQuartileMeans(mfcc_feat))\n\n\ndef getLogFBank(rate, sig):\n logfbank_feat = logfbank(sig, rate)\n return numpy.concatenate(getQuartileMeans(logfbank_feat))\n\n\ndef getData(filename, outdir=None):\n if outdir is None or not os.path.exists(outdir + '/' + os.path.splitext\n (os.path.basename(filename))[0] + '.csv'):\n rate, sig = wav.read(filename)\n return getMFCC(rate, sig)\n\n\ndef writeData(filename, outdir, values):\n if not os.path.exists(outdir + '/' + os.path.splitext(os.path.basename(\n filename))[0] + '.csv'):\n with open(outdir + '/' + os.path.splitext(os.path.basename(filename\n ))[0] + '.csv', 'w') as f:\n addComma = False\n for val in values:\n if addComma:\n f.write(',')\n f.write(str(val))\n addComma = True\n f.write('\\n')\n\n\ndef generateMFCCData(indir, outdir):\n for f in glob.glob(outdir + '/*.csv'):\n os.remove(f)\n for f in glob.glob(indir + '/*.wav'):\n try:\n writeData(f, outdir, getData(f, outdir))\n newfilename = os.path.splitext(os.path.basename(f))[0]\n print('YES: ' + newfilename)\n if 'classify-me' not in indir:\n os.rename(f, indir + '/classify-me/' + newfilename + '.wav')\n os.rename(indir + '/' + newfilename + '.mp3', indir +\n '/classify-me/' + newfilename + '.mp3')\n except:\n print('NO: ' + f)\n\n\nif __name__ == '__main__':\n generateMFCCData(DIR, OUTDIR)\n",
"step-5": "#!/usr/bin/python\n\nimport sys, os, glob, numpy\nwd = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(wd + '/python_speech_features')\nfrom features import mfcc, logfbank\nimport scipy.io.wavfile as wav\n\nDIR = '/home/quiggles/Desktop/513music/single-genre/classify-me/subset'\nOUTDIR = wd + '/songdata/subset'\n\n\n# def getMFCC(filename):\n# (rate,sig) = wav.read(filename)\n# mfcc_feat = mfcc(sig,rate)\n# l = len(mfcc_feat)/4\n# quartileMean1 = numpy.mean(mfcc_feat[:l], axis=0)\n# quartileMean2 = numpy.mean(mfcc_feat[l:2*l], axis=0)\n# quartileMean3 = numpy.mean(mfcc_feat[2*l:3*l], axis=0)\n# quartileMean4 = numpy.mean(mfcc_feat[3*l:], axis=0)\n\n# return numpy.concatenate([quartileMean1, quartileMean2, quartileMean3, quartileMean4])\n\n# def getLogFBank(filename):\n# (rate,sig) = wav.read(filename)\n# logfbank_feat = logfbank(sig,rate)\n# l = len(logfbank_feat)/4\n# quartileMean1 = numpy.mean(logfbank_feat[:l], axis=0)\n# quartileMean2 = numpy.mean(logfbank_feat[l:2*l], axis=0)\n# quartileMean3 = numpy.mean(logfbank_feat[2*l:3*l], axis=0)\n# quartileMean4 = numpy.mean(logfbank_feat[3*l:], axis=0)\n\n# return numpy.concatenate([quartileMean1, quartileMean2, quartileMean3, quartileMean4])\n\ndef getQuartileMeans(values):\n l = len(values)/4\n quartileMean1 = numpy.mean(values[:l], axis=0)\n quartileMean2 = numpy.mean(values[l:2*l], axis=0)\n quartileMean3 = numpy.mean(values[2*l:3*l], axis=0)\n quartileMean4 = numpy.mean(values[3*l:], axis=0)\n return [quartileMean1, quartileMean2, quartileMean3, quartileMean4]\n\ndef getMFCC(rate,sig):\n mfcc_feat = mfcc(sig,rate)\n return numpy.concatenate(getQuartileMeans(mfcc_feat))\n\ndef getLogFBank(rate,sig):\n logfbank_feat = logfbank(sig,rate)\n return numpy.concatenate(getQuartileMeans(logfbank_feat))\n\ndef getData(filename, outdir=None):\n if outdir is None or not os.path.exists(outdir + '/' + os.path.splitext(os.path.basename(filename))[0] + \".csv\"):\n (rate,sig) = wav.read(filename)\n # mfccVals = getMFCC(rate, sig)\n # logfVals = getLogFBank(rate, sig)\n # return numpy.concatenate([mfccVals, logfVals])\n return getMFCC(rate,sig)\n\ndef writeData(filename, outdir, values):\n if not os.path.exists(outdir + '/' + os.path.splitext(os.path.basename(filename))[0] + \".csv\"):\n with open(outdir + '/' + os.path.splitext(os.path.basename(filename))[0] + \".csv\", 'w') as f:\n addComma = False\n for val in values:\n if addComma:\n f.write(',')\n f.write(str(val))\n addComma = True\n f.write('\\n')\n\ndef generateMFCCData(indir, outdir):\n for f in glob.glob(outdir + '/*.csv'):\n os.remove(f)\n # for f in glob.glob(outdir + '/*.logf'):\n # os.remove(f)\n\n for f in glob.glob(indir + '/*.wav'):\n try:\n writeData(f, outdir, getData(f, outdir))\n newfilename = os.path.splitext(os.path.basename(f))[0]\n print('YES: '+ newfilename)\n if 'classify-me' not in indir:\n os.rename(f, indir+\"/classify-me/\" + newfilename+\".wav\")\n os.rename(indir+'/' + newfilename + \".mp3\", indir+\"/classify-me/\" + newfilename+\".mp3\")\n except:\n print('NO: '+f)\n\nif __name__ == '__main__':\n generateMFCCData(DIR, OUTDIR)\n",
"step-ids": [
3,
7,
8,
9,
10
]
}
|
[
3,
7,
8,
9,
10
] |
import sys
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
stringe = test.strip()
list1 = stringe.split(" | ")
list2 = list1[0].split(" ")
kha = 0
for item in list2:
for c in list1[1]:
if c in item:
kha +=1
if kha == len(list1[1]):
print (item)
break
else:
print (False)
break
|
normal
|
{
"blob_id": "def2721cd89501b1004d5d3f4f58df300616c1be",
"index": 2747,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(sys.argv[1], 'r') as test_cases:\n for test in test_cases:\n stringe = test.strip()\n list1 = stringe.split(' | ')\n list2 = list1[0].split(' ')\n kha = 0\n for item in list2:\n for c in list1[1]:\n if c in item:\n kha += 1\n if kha == len(list1[1]):\n print(item)\n break\n else:\n print(False)\n break\n",
"step-3": "import sys\nwith open(sys.argv[1], 'r') as test_cases:\n for test in test_cases:\n stringe = test.strip()\n list1 = stringe.split(' | ')\n list2 = list1[0].split(' ')\n kha = 0\n for item in list2:\n for c in list1[1]:\n if c in item:\n kha += 1\n if kha == len(list1[1]):\n print(item)\n break\n else:\n print(False)\n break\n",
"step-4": "\r\nimport sys\r\n\r\nwith open(sys.argv[1], 'r') as test_cases:\r\n for test in test_cases:\r\n stringe = test.strip()\r\n list1 = stringe.split(\" | \")\r\n list2 = list1[0].split(\" \")\r\n kha = 0\r\n for item in list2:\r\n for c in list1[1]:\r\n if c in item:\r\n kha +=1\r\n if kha == len(list1[1]):\r\n print (item)\r\n break\r\n else:\r\n print (False)\r\n break",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Blockchain:
def __init__(self):
self.chain = []
self.farmer_details = []
self.create_block(proof=1, previous_hash='0')
self.nodes = set()
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.
datetime.now()), 'proof': proof, 'previous_hash': previous_hash,
'farmer_details': self.farmer_details}
self.farmer_details = []
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof ** 2 -
previous_proof ** 2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **
2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
def add_farmerdetails(self, name, crop_name, quantity, rate):
privatekey = RSA.generate(1024)
publickey = privatekey.publickey()
hash_of_transaction = hashlib.sha256((hashlib.sha256(name.encode())
.hexdigest() + hashlib.sha256(crop_name.encode()).hexdigest() +
hashlib.sha256(str(quantity).encode()).hexdigest() + hashlib.
sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()
data = int(hash_of_transaction, 16)
signature = pow(data, privatekey.d, privatekey.n)
self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.
encode()).hexdigest(), 'crop_name': hashlib.sha256(crop_name.
encode()).hexdigest(), 'quantity_inkg': hashlib.sha256(str(
quantity).encode()).hexdigest(), 'rate_perkg': hashlib.sha256(
str(rate).encode()).hexdigest(), 'hash_of_transaction':
hash_of_transaction, 'signature': signature})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
<|reserved_special_token_0|>
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
block = blockchain.create_block(proof, previous_hash)
current_block = blockchain.get_previous_block()
current_hash = blockchain.hash(current_block)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'], 'timestamp': block['timestamp'], 'proof':
block['proof'], 'previous_hash': block['previous_hash'], 'farmer':
block['farmer_details'], 'current_hash': current_hash}
return jsonify(response), 200
<|reserved_special_token_0|>
@app.route('/is_valid', methods=['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message':
'Houston, we have a problem. The Blockchain is not valid.'}
return jsonify(response), 200
@app.route('/add_farmerdetails', methods=['POST'])
def add_farmer_details():
json = request.get_json()
farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg', 'rate_perkg'
]
if not all(key in json for key in farmer_keys):
return 'Some elements of the farmer_details are missing', 400
index = blockchain.add_farmerdetails(json['name_of_farmer'], json[
'crop_name'], json['quantity_inkg'], json['rate_perkg'])
response = {'message': f'These details will be added to Block {index}'}
return jsonify(response), 201
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Blockchain:
def __init__(self):
self.chain = []
self.farmer_details = []
self.create_block(proof=1, previous_hash='0')
self.nodes = set()
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.
datetime.now()), 'proof': proof, 'previous_hash': previous_hash,
'farmer_details': self.farmer_details}
self.farmer_details = []
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof ** 2 -
previous_proof ** 2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **
2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
def add_farmerdetails(self, name, crop_name, quantity, rate):
privatekey = RSA.generate(1024)
publickey = privatekey.publickey()
hash_of_transaction = hashlib.sha256((hashlib.sha256(name.encode())
.hexdigest() + hashlib.sha256(crop_name.encode()).hexdigest() +
hashlib.sha256(str(quantity).encode()).hexdigest() + hashlib.
sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()
data = int(hash_of_transaction, 16)
signature = pow(data, privatekey.d, privatekey.n)
self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.
encode()).hexdigest(), 'crop_name': hashlib.sha256(crop_name.
encode()).hexdigest(), 'quantity_inkg': hashlib.sha256(str(
quantity).encode()).hexdigest(), 'rate_perkg': hashlib.sha256(
str(rate).encode()).hexdigest(), 'hash_of_transaction':
hash_of_transaction, 'signature': signature})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
<|reserved_special_token_0|>
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
block = blockchain.create_block(proof, previous_hash)
current_block = blockchain.get_previous_block()
current_hash = blockchain.hash(current_block)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'], 'timestamp': block['timestamp'], 'proof':
block['proof'], 'previous_hash': block['previous_hash'], 'farmer':
block['farmer_details'], 'current_hash': current_hash}
return jsonify(response), 200
@app.route('/print_chain', methods=['GET'])
def print_chain():
chain_till_now = []
for xblock in blockchain.chain:
xcurrent_hash = blockchain.hash(xblock)
if len(xblock['farmer_details']) == 0:
chain_till_now.append({'index': xblock['index'], 'timestamp':
xblock['timestamp'], 'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'], 'farmer': xblock[
'farmer_details'], 'current_hash': xcurrent_hash})
else:
l = len(xblock['farmer_details'])
sum = ''
l -= 1
while l >= 0:
sum = xblock['farmer_details'][l]['hash_of_transaction'] + sum
l -= 1
chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode
()).hexdigest(), 'index': xblock['index'], 'timestamp':
xblock['timestamp'], 'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'], 'farmer': xblock[
'farmer_details'], 'current_hash': xcurrent_hash})
response = {'chain': chain_till_now, 'length': len(blockchain.chain)}
return jsonify(response), 200
<|reserved_special_token_0|>
@app.route('/is_valid', methods=['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message':
'Houston, we have a problem. The Blockchain is not valid.'}
return jsonify(response), 200
@app.route('/add_farmerdetails', methods=['POST'])
def add_farmer_details():
json = request.get_json()
farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg', 'rate_perkg'
]
if not all(key in json for key in farmer_keys):
return 'Some elements of the farmer_details are missing', 400
index = blockchain.add_farmerdetails(json['name_of_farmer'], json[
'crop_name'], json['quantity_inkg'], json['rate_perkg'])
response = {'message': f'These details will be added to Block {index}'}
return jsonify(response), 201
<|reserved_special_token_0|>
@app.route('/replace_chain', methods=['GET'])
def replace_chain():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
response = {'message':
'The nodes had different chains so the chain was replaced by the longest one.'
, 'new_chain': blockchain.chain}
else:
response = {'message': 'All good. The chain is the largest one.',
'actual_chain': blockchain.chain}
return jsonify(response), 200
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Blockchain:
def __init__(self):
self.chain = []
self.farmer_details = []
self.create_block(proof=1, previous_hash='0')
self.nodes = set()
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.
datetime.now()), 'proof': proof, 'previous_hash': previous_hash,
'farmer_details': self.farmer_details}
self.farmer_details = []
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof ** 2 -
previous_proof ** 2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **
2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
def add_farmerdetails(self, name, crop_name, quantity, rate):
privatekey = RSA.generate(1024)
publickey = privatekey.publickey()
hash_of_transaction = hashlib.sha256((hashlib.sha256(name.encode())
.hexdigest() + hashlib.sha256(crop_name.encode()).hexdigest() +
hashlib.sha256(str(quantity).encode()).hexdigest() + hashlib.
sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()
data = int(hash_of_transaction, 16)
signature = pow(data, privatekey.d, privatekey.n)
self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.
encode()).hexdigest(), 'crop_name': hashlib.sha256(crop_name.
encode()).hexdigest(), 'quantity_inkg': hashlib.sha256(str(
quantity).encode()).hexdigest(), 'rate_perkg': hashlib.sha256(
str(rate).encode()).hexdigest(), 'hash_of_transaction':
hash_of_transaction, 'signature': signature})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
<|reserved_special_token_0|>
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
block = blockchain.create_block(proof, previous_hash)
current_block = blockchain.get_previous_block()
current_hash = blockchain.hash(current_block)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'], 'timestamp': block['timestamp'], 'proof':
block['proof'], 'previous_hash': block['previous_hash'], 'farmer':
block['farmer_details'], 'current_hash': current_hash}
return jsonify(response), 200
@app.route('/print_chain', methods=['GET'])
def print_chain():
chain_till_now = []
for xblock in blockchain.chain:
xcurrent_hash = blockchain.hash(xblock)
if len(xblock['farmer_details']) == 0:
chain_till_now.append({'index': xblock['index'], 'timestamp':
xblock['timestamp'], 'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'], 'farmer': xblock[
'farmer_details'], 'current_hash': xcurrent_hash})
else:
l = len(xblock['farmer_details'])
sum = ''
l -= 1
while l >= 0:
sum = xblock['farmer_details'][l]['hash_of_transaction'] + sum
l -= 1
chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode
()).hexdigest(), 'index': xblock['index'], 'timestamp':
xblock['timestamp'], 'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'], 'farmer': xblock[
'farmer_details'], 'current_hash': xcurrent_hash})
response = {'chain': chain_till_now, 'length': len(blockchain.chain)}
return jsonify(response), 200
<|reserved_special_token_0|>
@app.route('/is_valid', methods=['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message':
'Houston, we have a problem. The Blockchain is not valid.'}
return jsonify(response), 200
@app.route('/add_farmerdetails', methods=['POST'])
def add_farmer_details():
json = request.get_json()
farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg', 'rate_perkg'
]
if not all(key in json for key in farmer_keys):
return 'Some elements of the farmer_details are missing', 400
index = blockchain.add_farmerdetails(json['name_of_farmer'], json[
'crop_name'], json['quantity_inkg'], json['rate_perkg'])
response = {'message': f'These details will be added to Block {index}'}
return jsonify(response), 201
@app.route('/connect_node', methods=['POST'])
def connect_node():
json = request.get_json()
nodes = json.get('nodes')
if nodes is None:
return 'No node', 400
for node in nodes:
blockchain.add_node(node)
response = {'message':
'All the nodes are now connected. The puspesh Blockchain now contains the following nodes:'
, 'total_nodes': list(blockchain.nodes)}
return jsonify(response), 201
@app.route('/replace_chain', methods=['GET'])
def replace_chain():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
response = {'message':
'The nodes had different chains so the chain was replaced by the longest one.'
, 'new_chain': blockchain.chain}
else:
response = {'message': 'All good. The chain is the largest one.',
'actual_chain': blockchain.chain}
return jsonify(response), 200
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Blockchain:
def __init__(self):
self.chain = []
self.farmer_details = []
self.create_block(proof=1, previous_hash='0')
self.nodes = set()
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.
datetime.now()), 'proof': proof, 'previous_hash': previous_hash,
'farmer_details': self.farmer_details}
self.farmer_details = []
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof ** 2 -
previous_proof ** 2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **
2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
def add_farmerdetails(self, name, crop_name, quantity, rate):
privatekey = RSA.generate(1024)
publickey = privatekey.publickey()
hash_of_transaction = hashlib.sha256((hashlib.sha256(name.encode())
.hexdigest() + hashlib.sha256(crop_name.encode()).hexdigest() +
hashlib.sha256(str(quantity).encode()).hexdigest() + hashlib.
sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()
data = int(hash_of_transaction, 16)
signature = pow(data, privatekey.d, privatekey.n)
self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.
encode()).hexdigest(), 'crop_name': hashlib.sha256(crop_name.
encode()).hexdigest(), 'quantity_inkg': hashlib.sha256(str(
quantity).encode()).hexdigest(), 'rate_perkg': hashlib.sha256(
str(rate).encode()).hexdigest(), 'hash_of_transaction':
hash_of_transaction, 'signature': signature})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
<|reserved_special_token_0|>
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
block = blockchain.create_block(proof, previous_hash)
current_block = blockchain.get_previous_block()
current_hash = blockchain.hash(current_block)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'], 'timestamp': block['timestamp'], 'proof':
block['proof'], 'previous_hash': block['previous_hash'], 'farmer':
block['farmer_details'], 'current_hash': current_hash}
return jsonify(response), 200
@app.route('/print_chain', methods=['GET'])
def print_chain():
chain_till_now = []
for xblock in blockchain.chain:
xcurrent_hash = blockchain.hash(xblock)
if len(xblock['farmer_details']) == 0:
chain_till_now.append({'index': xblock['index'], 'timestamp':
xblock['timestamp'], 'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'], 'farmer': xblock[
'farmer_details'], 'current_hash': xcurrent_hash})
else:
l = len(xblock['farmer_details'])
sum = ''
l -= 1
while l >= 0:
sum = xblock['farmer_details'][l]['hash_of_transaction'] + sum
l -= 1
chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode
()).hexdigest(), 'index': xblock['index'], 'timestamp':
xblock['timestamp'], 'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'], 'farmer': xblock[
'farmer_details'], 'current_hash': xcurrent_hash})
response = {'chain': chain_till_now, 'length': len(blockchain.chain)}
return jsonify(response), 200
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}
return jsonify(response), 200
@app.route('/is_valid', methods=['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message':
'Houston, we have a problem. The Blockchain is not valid.'}
return jsonify(response), 200
@app.route('/add_farmerdetails', methods=['POST'])
def add_farmer_details():
json = request.get_json()
farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg', 'rate_perkg'
]
if not all(key in json for key in farmer_keys):
return 'Some elements of the farmer_details are missing', 400
index = blockchain.add_farmerdetails(json['name_of_farmer'], json[
'crop_name'], json['quantity_inkg'], json['rate_perkg'])
response = {'message': f'These details will be added to Block {index}'}
return jsonify(response), 201
@app.route('/connect_node', methods=['POST'])
def connect_node():
json = request.get_json()
nodes = json.get('nodes')
if nodes is None:
return 'No node', 400
for node in nodes:
blockchain.add_node(node)
response = {'message':
'All the nodes are now connected. The puspesh Blockchain now contains the following nodes:'
, 'total_nodes': list(blockchain.nodes)}
return jsonify(response), 201
@app.route('/replace_chain', methods=['GET'])
def replace_chain():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
response = {'message':
'The nodes had different chains so the chain was replaced by the longest one.'
, 'new_chain': blockchain.chain}
else:
response = {'message': 'All good. The chain is the largest one.',
'actual_chain': blockchain.chain}
return jsonify(response), 200
app.run(host='0.0.0.0', port=5001)
<|reserved_special_token_1|>
import datetime
import hashlib
import json
from flask import Flask, jsonify, request
import requests
from uuid import uuid4
from urllib.parse import urlparse
from Crypto.PublicKey import RSA
# Part 1 - Building a Blockchain
class Blockchain:
#chain(emptylist) , farmer_details(emptylist), nodes(set), create_block(function to create the genesis block)
def __init__(self):
self.chain = []
self.farmer_details = []
self.create_block(proof = 1, previous_hash = '0')
self.nodes = set()
#It creates a dictionary block which contains index(length of chain+1),timestamp( by using the module datetime),
#Proof( passes as parameter),previous_hash(passed as parameter),
#Farmer_details(from self) and append this to the chain.
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1,
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash,
'farmer_details': self.farmer_details}
self.farmer_details = []
self.chain.append(block)
return block
#It returns the last block of the chain.
def get_previous_block(self):
return self.chain[-1]
#It runs a lop and check if hash of new proof^2- previous proof^2 contains 4 leading zeroes.
#if yes,then it returns the new proof otherwise increment the new proof by 1 and iterates again.
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
#- It returns the hash of the block using sha256
def hash(self, block):
encoded_block = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encoded_block).hexdigest()
#It iterates a loop from 0 to chain length and check if hash of the block is same as returned by the hash function,
#then it checks if hash of the proof of current block^2-proof of previous block^2 contains 4 leading zeroes or not.
# if no, then chain is not valid.
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
#- It creates the private key using the RSA.generate(1024),then creates the public key,
# hash of transaction(it is the hash of the sum of hashes of the name,crop_name,quantity,rate),
#data( it is the hash of the transaction in the int form),
#signature( it is created by raising the data to the power of privatekey.d%privatekey.n).
# Then it append a dictionary containing all these information in the hash format to the chain farmer_details
#and returns the index of the new block.
def add_farmerdetails(self, name, crop_name, quantity,rate):
privatekey = RSA.generate(1024)
publickey = privatekey.publickey()
hash_of_transaction=hashlib.sha256((hashlib.sha256(name.encode()).hexdigest()+hashlib.sha256(crop_name.encode()).hexdigest()+hashlib.sha256(str(quantity).encode()).hexdigest()+hashlib.sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()
data=int(hash_of_transaction,16)
signature=pow(data,privatekey.d,privatekey.n)
self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.encode()).hexdigest(),
'crop_name': hashlib.sha256(crop_name.encode()).hexdigest(),
'quantity_inkg': hashlib.sha256(str(quantity).encode()).hexdigest(),
'rate_perkg': hashlib.sha256(str(rate).encode()).hexdigest(),
'hash_of_transaction': hash_of_transaction,
'signature': signature
})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
#It takes the url using urlparse of the address and then adds this to the set nodes in the self.
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
#It access all the nodes in the set nodes and then iterates a loop to get their chain length using get_chain (to be described)
# and replaces the current chain with the longest chain of all the nodes.
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
# Part 2 - Mining our Blockchain
# Creating a Web App
app = Flask(__name__)
# Creating an address for the node on Port 5001
node_address = str(uuid4()).replace('-', '')
# Creating a Blockchain
blockchain = Blockchain()
# Mining a new block
#- It access the previous block by calling the function get_previous_block(),
#then access the previous proof by previous_block[‘proof’],
#then it creates a new proof by using the function proof_of_work(‘previous_proof’),
#then it finds the hash of the previous block by using the function blockchain.hash(previous_block),
# then calls the function create_block( proof,previous_hash),then finds the hash of this block.
# It creates a response containing all the details of the new block,jsonify it and returns it.
@app.route('/mine_block', methods = ['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
#blockchain.add_transaction(sender = node_address, receiver = 'Hadelin', amount = 1)
block = blockchain.create_block(proof, previous_hash)
current_block=blockchain.get_previous_block()
current_hash=blockchain.hash(current_block)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'farmer': block['farmer_details'],
'current_hash': current_hash}
return jsonify(response), 200
# Getting the full Blockchain
#- It creates an empty list chain_till_now, then iterates over all the blocks in the blockchain and find it’s hash
#then check if the list farmer_details is empty or not,
#if it is empty then it appends a dictionary containing the current block’s index,timestamp,proof,previous_hash, current_hash, farmer_details.
# If the farmer_details list is not empty then it first finds the length of the list farmer_details
#then it iterates over the length of the list farmer_details and appends the hash of transaction
# contained within the dictionary of the list farmer_details. Then it creates the hash of this appended hash. This is the merged hash.
# Then it creates a dictionary containing merged hash,index,timestamp,proof,previous_hash,farmer_details and current hash.
# Then, it appends this dictionary to the list chain till now.
# It then creates the response containing the chain till now and length of the blockchain,jasonifies it and returns it.
@app.route('/print_chain',methods=['GET'])
def print_chain():
chain_till_now =[]
for xblock in blockchain.chain:
xcurrent_hash=blockchain.hash(xblock)
if len(xblock['farmer_details'])==0:
chain_till_now.append({'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
else:
l=len(xblock['farmer_details'])
sum=""
l-=1
while(l>=0):
sum=xblock['farmer_details'][l]['hash_of_transaction']+sum
l-=1
chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode()).hexdigest(),
'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
response = {'chain': chain_till_now,
'length': len(blockchain.chain)}
return jsonify(response), 200
#- It creats the response containing the blockchain.chain and its length,jasonifies it and returns it.
@app.route('/get_chain', methods = ['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
# Checking if the Blockchain is valid
#- It calls the function is_chain_valid and returns a string as response based on whether the chain is valid or not.
@app.route('/is_valid', methods = ['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message': 'Houston, we have a problem. The Blockchain is not valid.'}
return jsonify(response), 200
# Adding a new transaction to the Blockchain
#It takes the input in Jason format and checks if all the keys in the farmer keys(name_of_farmer,crop_name,quantity_inkg, rate_perkg) are available in the json file.
#If no, It returns that some elements are missing
# otherwise it calls the function add_farmer_details by passing the farmer details in the json file as parameter and
#returns the index of the block in which these details will be added.
@app.route('/add_farmerdetails', methods = ['POST'])
def add_farmer_details():
json = request.get_json()
farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg','rate_perkg']
if not all(key in json for key in farmer_keys):
return 'Some elements of the farmer_details are missing', 400
index = blockchain.add_farmerdetails(json['name_of_farmer'], json['crop_name'], json['quantity_inkg'], json['rate_perkg'])
response = {'message': f'These details will be added to Block {index}'}
return jsonify(response), 201
# Part 3 - Decentralizing our Blockchain
# Connecting new nodes
#It takes a Jason file as request and first check if it contains any node or not.
# If it contains the nodes then it calls the function blockchain.add_node .
#Then it returns the list of blockchain.nodes as response.
@app.route('/connect_node', methods = ['POST'])
def connect_node():
json = request.get_json()
nodes = json.get('nodes')
if nodes is None:
return "No node", 400
for node in nodes:
blockchain.add_node(node)
response = {'message': 'All the nodes are now connected. The puspesh Blockchain now contains the following nodes:',
'total_nodes': list(blockchain.nodes)}
return jsonify(response), 201
# Replacing the chain by the longest chain if needed
#- It calls the function blockcain.replace_chain. If the chain is replaced
#it returns the response with a message that the nodes has the different chains so the chain has been replaced by the longest chain alongwith the blockchain.chain.
# Otherwise it returns the response with a message all good the chain is the longest one with the blockchain.chain .
#then it jsonify the response and returns it.
@app.route('/replace_chain', methods = ['GET'])
def replace_chain():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
response = {'message': 'The nodes had different chains so the chain was replaced by the longest one.',
'new_chain': blockchain.chain}
else:
response = {'message': 'All good. The chain is the largest one.',
'actual_chain': blockchain.chain}
return jsonify(response), 200
# Running the app
app.run(host = '0.0.0.0', port = 5001)
|
flexible
|
{
"blob_id": "f8c222b1a84a092a3388cb801a88495bc227b1d5",
"index": 9748,
"step-1": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.farmer_details = []\n self.create_block(proof=1, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'farmer_details': self.farmer_details}\n self.farmer_details = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_farmerdetails(self, name, crop_name, quantity, rate):\n privatekey = RSA.generate(1024)\n publickey = privatekey.publickey()\n hash_of_transaction = hashlib.sha256((hashlib.sha256(name.encode())\n .hexdigest() + hashlib.sha256(crop_name.encode()).hexdigest() +\n hashlib.sha256(str(quantity).encode()).hexdigest() + hashlib.\n sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()\n data = int(hash_of_transaction, 16)\n signature = pow(data, privatekey.d, privatekey.n)\n self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.\n encode()).hexdigest(), 'crop_name': hashlib.sha256(crop_name.\n encode()).hexdigest(), 'quantity_inkg': hashlib.sha256(str(\n quantity).encode()).hexdigest(), 'rate_perkg': hashlib.sha256(\n str(rate).encode()).hexdigest(), 'hash_of_transaction':\n hash_of_transaction, 'signature': signature})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n network = self.nodes\n longest_chain = None\n max_length = len(self.chain)\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n\n\n@app.route('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n block = blockchain.create_block(proof, previous_hash)\n current_block = blockchain.get_previous_block()\n current_hash = blockchain.hash(current_block)\n response = {'message': 'Congratulations, you just mined a block!',\n 'index': block['index'], 'timestamp': block['timestamp'], 'proof':\n block['proof'], 'previous_hash': block['previous_hash'], 'farmer':\n block['farmer_details'], 'current_hash': current_hash}\n return jsonify(response), 200\n\n\n<mask token>\n\n\n@app.route('/is_valid', methods=['GET'])\ndef is_valid():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message': 'All good. The Blockchain is valid.'}\n else:\n response = {'message':\n 'Houston, we have a problem. The Blockchain is not valid.'}\n return jsonify(response), 200\n\n\n@app.route('/add_farmerdetails', methods=['POST'])\ndef add_farmer_details():\n json = request.get_json()\n farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg', 'rate_perkg'\n ]\n if not all(key in json for key in farmer_keys):\n return 'Some elements of the farmer_details are missing', 400\n index = blockchain.add_farmerdetails(json['name_of_farmer'], json[\n 'crop_name'], json['quantity_inkg'], json['rate_perkg'])\n response = {'message': f'These details will be added to Block {index}'}\n return jsonify(response), 201\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.farmer_details = []\n self.create_block(proof=1, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'farmer_details': self.farmer_details}\n self.farmer_details = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_farmerdetails(self, name, crop_name, quantity, rate):\n privatekey = RSA.generate(1024)\n publickey = privatekey.publickey()\n hash_of_transaction = hashlib.sha256((hashlib.sha256(name.encode())\n .hexdigest() + hashlib.sha256(crop_name.encode()).hexdigest() +\n hashlib.sha256(str(quantity).encode()).hexdigest() + hashlib.\n sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()\n data = int(hash_of_transaction, 16)\n signature = pow(data, privatekey.d, privatekey.n)\n self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.\n encode()).hexdigest(), 'crop_name': hashlib.sha256(crop_name.\n encode()).hexdigest(), 'quantity_inkg': hashlib.sha256(str(\n quantity).encode()).hexdigest(), 'rate_perkg': hashlib.sha256(\n str(rate).encode()).hexdigest(), 'hash_of_transaction':\n hash_of_transaction, 'signature': signature})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n network = self.nodes\n longest_chain = None\n max_length = len(self.chain)\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n\n\n@app.route('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n block = blockchain.create_block(proof, previous_hash)\n current_block = blockchain.get_previous_block()\n current_hash = blockchain.hash(current_block)\n response = {'message': 'Congratulations, you just mined a block!',\n 'index': block['index'], 'timestamp': block['timestamp'], 'proof':\n block['proof'], 'previous_hash': block['previous_hash'], 'farmer':\n block['farmer_details'], 'current_hash': current_hash}\n return jsonify(response), 200\n\n\n@app.route('/print_chain', methods=['GET'])\ndef print_chain():\n chain_till_now = []\n for xblock in blockchain.chain:\n xcurrent_hash = blockchain.hash(xblock)\n if len(xblock['farmer_details']) == 0:\n chain_till_now.append({'index': xblock['index'], 'timestamp':\n xblock['timestamp'], 'proof': xblock['proof'],\n 'previous_hash': xblock['previous_hash'], 'farmer': xblock[\n 'farmer_details'], 'current_hash': xcurrent_hash})\n else:\n l = len(xblock['farmer_details'])\n sum = ''\n l -= 1\n while l >= 0:\n sum = xblock['farmer_details'][l]['hash_of_transaction'] + sum\n l -= 1\n chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode\n ()).hexdigest(), 'index': xblock['index'], 'timestamp':\n xblock['timestamp'], 'proof': xblock['proof'],\n 'previous_hash': xblock['previous_hash'], 'farmer': xblock[\n 'farmer_details'], 'current_hash': xcurrent_hash})\n response = {'chain': chain_till_now, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\n<mask token>\n\n\n@app.route('/is_valid', methods=['GET'])\ndef is_valid():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message': 'All good. The Blockchain is valid.'}\n else:\n response = {'message':\n 'Houston, we have a problem. The Blockchain is not valid.'}\n return jsonify(response), 200\n\n\n@app.route('/add_farmerdetails', methods=['POST'])\ndef add_farmer_details():\n json = request.get_json()\n farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg', 'rate_perkg'\n ]\n if not all(key in json for key in farmer_keys):\n return 'Some elements of the farmer_details are missing', 400\n index = blockchain.add_farmerdetails(json['name_of_farmer'], json[\n 'crop_name'], json['quantity_inkg'], json['rate_perkg'])\n response = {'message': f'These details will be added to Block {index}'}\n return jsonify(response), 201\n\n\n<mask token>\n\n\n@app.route('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message':\n 'The nodes had different chains so the chain was replaced by the longest one.'\n , 'new_chain': blockchain.chain}\n else:\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.farmer_details = []\n self.create_block(proof=1, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'farmer_details': self.farmer_details}\n self.farmer_details = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_farmerdetails(self, name, crop_name, quantity, rate):\n privatekey = RSA.generate(1024)\n publickey = privatekey.publickey()\n hash_of_transaction = hashlib.sha256((hashlib.sha256(name.encode())\n .hexdigest() + hashlib.sha256(crop_name.encode()).hexdigest() +\n hashlib.sha256(str(quantity).encode()).hexdigest() + hashlib.\n sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()\n data = int(hash_of_transaction, 16)\n signature = pow(data, privatekey.d, privatekey.n)\n self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.\n encode()).hexdigest(), 'crop_name': hashlib.sha256(crop_name.\n encode()).hexdigest(), 'quantity_inkg': hashlib.sha256(str(\n quantity).encode()).hexdigest(), 'rate_perkg': hashlib.sha256(\n str(rate).encode()).hexdigest(), 'hash_of_transaction':\n hash_of_transaction, 'signature': signature})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n network = self.nodes\n longest_chain = None\n max_length = len(self.chain)\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n\n\n@app.route('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n block = blockchain.create_block(proof, previous_hash)\n current_block = blockchain.get_previous_block()\n current_hash = blockchain.hash(current_block)\n response = {'message': 'Congratulations, you just mined a block!',\n 'index': block['index'], 'timestamp': block['timestamp'], 'proof':\n block['proof'], 'previous_hash': block['previous_hash'], 'farmer':\n block['farmer_details'], 'current_hash': current_hash}\n return jsonify(response), 200\n\n\n@app.route('/print_chain', methods=['GET'])\ndef print_chain():\n chain_till_now = []\n for xblock in blockchain.chain:\n xcurrent_hash = blockchain.hash(xblock)\n if len(xblock['farmer_details']) == 0:\n chain_till_now.append({'index': xblock['index'], 'timestamp':\n xblock['timestamp'], 'proof': xblock['proof'],\n 'previous_hash': xblock['previous_hash'], 'farmer': xblock[\n 'farmer_details'], 'current_hash': xcurrent_hash})\n else:\n l = len(xblock['farmer_details'])\n sum = ''\n l -= 1\n while l >= 0:\n sum = xblock['farmer_details'][l]['hash_of_transaction'] + sum\n l -= 1\n chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode\n ()).hexdigest(), 'index': xblock['index'], 'timestamp':\n xblock['timestamp'], 'proof': xblock['proof'],\n 'previous_hash': xblock['previous_hash'], 'farmer': xblock[\n 'farmer_details'], 'current_hash': xcurrent_hash})\n response = {'chain': chain_till_now, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\n<mask token>\n\n\n@app.route('/is_valid', methods=['GET'])\ndef is_valid():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message': 'All good. The Blockchain is valid.'}\n else:\n response = {'message':\n 'Houston, we have a problem. The Blockchain is not valid.'}\n return jsonify(response), 200\n\n\n@app.route('/add_farmerdetails', methods=['POST'])\ndef add_farmer_details():\n json = request.get_json()\n farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg', 'rate_perkg'\n ]\n if not all(key in json for key in farmer_keys):\n return 'Some elements of the farmer_details are missing', 400\n index = blockchain.add_farmerdetails(json['name_of_farmer'], json[\n 'crop_name'], json['quantity_inkg'], json['rate_perkg'])\n response = {'message': f'These details will be added to Block {index}'}\n return jsonify(response), 201\n\n\n@app.route('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json()\n nodes = json.get('nodes')\n if nodes is None:\n return 'No node', 400\n for node in nodes:\n blockchain.add_node(node)\n response = {'message':\n 'All the nodes are now connected. The puspesh Blockchain now contains the following nodes:'\n , 'total_nodes': list(blockchain.nodes)}\n return jsonify(response), 201\n\n\n@app.route('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message':\n 'The nodes had different chains so the chain was replaced by the longest one.'\n , 'new_chain': blockchain.chain}\n else:\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.farmer_details = []\n self.create_block(proof=1, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'farmer_details': self.farmer_details}\n self.farmer_details = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_farmerdetails(self, name, crop_name, quantity, rate):\n privatekey = RSA.generate(1024)\n publickey = privatekey.publickey()\n hash_of_transaction = hashlib.sha256((hashlib.sha256(name.encode())\n .hexdigest() + hashlib.sha256(crop_name.encode()).hexdigest() +\n hashlib.sha256(str(quantity).encode()).hexdigest() + hashlib.\n sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()\n data = int(hash_of_transaction, 16)\n signature = pow(data, privatekey.d, privatekey.n)\n self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.\n encode()).hexdigest(), 'crop_name': hashlib.sha256(crop_name.\n encode()).hexdigest(), 'quantity_inkg': hashlib.sha256(str(\n quantity).encode()).hexdigest(), 'rate_perkg': hashlib.sha256(\n str(rate).encode()).hexdigest(), 'hash_of_transaction':\n hash_of_transaction, 'signature': signature})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n network = self.nodes\n longest_chain = None\n max_length = len(self.chain)\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n\n\n@app.route('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n block = blockchain.create_block(proof, previous_hash)\n current_block = blockchain.get_previous_block()\n current_hash = blockchain.hash(current_block)\n response = {'message': 'Congratulations, you just mined a block!',\n 'index': block['index'], 'timestamp': block['timestamp'], 'proof':\n block['proof'], 'previous_hash': block['previous_hash'], 'farmer':\n block['farmer_details'], 'current_hash': current_hash}\n return jsonify(response), 200\n\n\n@app.route('/print_chain', methods=['GET'])\ndef print_chain():\n chain_till_now = []\n for xblock in blockchain.chain:\n xcurrent_hash = blockchain.hash(xblock)\n if len(xblock['farmer_details']) == 0:\n chain_till_now.append({'index': xblock['index'], 'timestamp':\n xblock['timestamp'], 'proof': xblock['proof'],\n 'previous_hash': xblock['previous_hash'], 'farmer': xblock[\n 'farmer_details'], 'current_hash': xcurrent_hash})\n else:\n l = len(xblock['farmer_details'])\n sum = ''\n l -= 1\n while l >= 0:\n sum = xblock['farmer_details'][l]['hash_of_transaction'] + sum\n l -= 1\n chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode\n ()).hexdigest(), 'index': xblock['index'], 'timestamp':\n xblock['timestamp'], 'proof': xblock['proof'],\n 'previous_hash': xblock['previous_hash'], 'farmer': xblock[\n 'farmer_details'], 'current_hash': xcurrent_hash})\n response = {'chain': chain_till_now, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\n@app.route('/get_chain', methods=['GET'])\ndef get_chain():\n response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\n@app.route('/is_valid', methods=['GET'])\ndef is_valid():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message': 'All good. The Blockchain is valid.'}\n else:\n response = {'message':\n 'Houston, we have a problem. The Blockchain is not valid.'}\n return jsonify(response), 200\n\n\n@app.route('/add_farmerdetails', methods=['POST'])\ndef add_farmer_details():\n json = request.get_json()\n farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg', 'rate_perkg'\n ]\n if not all(key in json for key in farmer_keys):\n return 'Some elements of the farmer_details are missing', 400\n index = blockchain.add_farmerdetails(json['name_of_farmer'], json[\n 'crop_name'], json['quantity_inkg'], json['rate_perkg'])\n response = {'message': f'These details will be added to Block {index}'}\n return jsonify(response), 201\n\n\n@app.route('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json()\n nodes = json.get('nodes')\n if nodes is None:\n return 'No node', 400\n for node in nodes:\n blockchain.add_node(node)\n response = {'message':\n 'All the nodes are now connected. The puspesh Blockchain now contains the following nodes:'\n , 'total_nodes': list(blockchain.nodes)}\n return jsonify(response), 201\n\n\n@app.route('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message':\n 'The nodes had different chains so the chain was replaced by the longest one.'\n , 'new_chain': blockchain.chain}\n else:\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\napp.run(host='0.0.0.0', port=5001)\n",
"step-5": "\r\nimport datetime\r\nimport hashlib\r\nimport json\r\nfrom flask import Flask, jsonify, request\r\nimport requests\r\nfrom uuid import uuid4\r\nfrom urllib.parse import urlparse\r\nfrom Crypto.PublicKey import RSA\r\n\r\n# Part 1 - Building a Blockchain\r\n\r\nclass Blockchain:\r\n#chain(emptylist) , farmer_details(emptylist), nodes(set), create_block(function to create the genesis block)\r\n def __init__(self):\r\n self.chain = []\r\n self.farmer_details = []\r\n self.create_block(proof = 1, previous_hash = '0')\r\n self.nodes = set()\r\n#It creates a dictionary block which contains index(length of chain+1),timestamp( by using the module datetime),\r\n#Proof( passes as parameter),previous_hash(passed as parameter),\r\n#Farmer_details(from self) and append this to the chain.\r\n \r\n def create_block(self, proof, previous_hash):\r\n block = {'index': len(self.chain) + 1,\r\n 'timestamp': str(datetime.datetime.now()),\r\n 'proof': proof,\r\n 'previous_hash': previous_hash,\r\n 'farmer_details': self.farmer_details}\r\n self.farmer_details = []\r\n self.chain.append(block)\r\n return block\r\n#It returns the last block of the chain.\r\n def get_previous_block(self):\r\n return self.chain[-1]\r\n#It runs a lop and check if hash of new proof^2- previous proof^2 contains 4 leading zeroes. \r\n#if yes,then it returns the new proof otherwise increment the new proof by 1 and iterates again.\r\n def proof_of_work(self, previous_proof):\r\n new_proof = 1\r\n check_proof = False\r\n while check_proof is False:\r\n hash_operation = hashlib.sha256(str(new_proof**2 - previous_proof**2).encode()).hexdigest()\r\n if hash_operation[:4] == '0000':\r\n check_proof = True\r\n else:\r\n new_proof += 1\r\n return new_proof\r\n#- It returns the hash of the block using sha256 \r\n def hash(self, block):\r\n encoded_block = json.dumps(block, sort_keys = True).encode()\r\n return hashlib.sha256(encoded_block).hexdigest()\r\n#It iterates a loop from 0 to chain length and check if hash of the block is same as returned by the hash function, \r\n#then it checks if hash of the proof of current block^2-proof of previous block^2 contains 4 leading zeroes or not.\r\n# if no, then chain is not valid. \r\n def is_chain_valid(self, chain):\r\n previous_block = chain[0]\r\n block_index = 1\r\n while block_index < len(chain):\r\n block = chain[block_index]\r\n if block['previous_hash'] != self.hash(previous_block):\r\n return False\r\n previous_proof = previous_block['proof']\r\n proof = block['proof']\r\n hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()\r\n if hash_operation[:4] != '0000':\r\n return False\r\n previous_block = block\r\n block_index += 1\r\n return True\r\n#- It creates the private key using the RSA.generate(1024),then creates the public key,\r\n# hash of transaction(it is the hash of the sum of hashes of the name,crop_name,quantity,rate),\r\n#data( it is the hash of the transaction in the int form),\r\n#signature( it is created by raising the data to the power of privatekey.d%privatekey.n).\r\n# Then it append a dictionary containing all these information in the hash format to the chain farmer_details \r\n#and returns the index of the new block. \r\n def add_farmerdetails(self, name, crop_name, quantity,rate):\r\n privatekey = RSA.generate(1024) \r\n publickey = privatekey.publickey() \r\n hash_of_transaction=hashlib.sha256((hashlib.sha256(name.encode()).hexdigest()+hashlib.sha256(crop_name.encode()).hexdigest()+hashlib.sha256(str(quantity).encode()).hexdigest()+hashlib.sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()\r\n data=int(hash_of_transaction,16)\r\n signature=pow(data,privatekey.d,privatekey.n)\r\n self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.encode()).hexdigest(),\r\n 'crop_name': hashlib.sha256(crop_name.encode()).hexdigest(),\r\n 'quantity_inkg': hashlib.sha256(str(quantity).encode()).hexdigest(),\r\n 'rate_perkg': hashlib.sha256(str(rate).encode()).hexdigest(),\r\n 'hash_of_transaction': hash_of_transaction,\r\n 'signature': signature\r\n })\r\n previous_block = self.get_previous_block()\r\n return previous_block['index'] + 1\r\n#It takes the url using urlparse of the address and then adds this to the set nodes in the self.\r\n def add_node(self, address):\r\n parsed_url = urlparse(address)\r\n self.nodes.add(parsed_url.netloc)\r\n#It access all the nodes in the set nodes and then iterates a loop to get their chain length using get_chain (to be described)\r\n# and replaces the current chain with the longest chain of all the nodes. \r\n def replace_chain(self):\r\n network = self.nodes\r\n longest_chain = None\r\n max_length = len(self.chain)\r\n for node in network:\r\n response = requests.get(f'http://{node}/get_chain')\r\n if response.status_code == 200:\r\n length = response.json()['length']\r\n chain = response.json()['chain']\r\n if length > max_length and self.is_chain_valid(chain):\r\n max_length = length\r\n longest_chain = chain\r\n if longest_chain:\r\n self.chain = longest_chain\r\n return True\r\n return False\r\n\r\n# Part 2 - Mining our Blockchain\r\n\r\n# Creating a Web App\r\napp = Flask(__name__)\r\n\r\n# Creating an address for the node on Port 5001\r\nnode_address = str(uuid4()).replace('-', '')\r\n\r\n# Creating a Blockchain\r\nblockchain = Blockchain()\r\n\r\n# Mining a new block\r\n#- It access the previous block by calling the function get_previous_block(), \r\n#then access the previous proof by previous_block[‘proof’],\r\n#then it creates a new proof by using the function proof_of_work(‘previous_proof’), \r\n#then it finds the hash of the previous block by using the function blockchain.hash(previous_block),\r\n# then calls the function create_block( proof,previous_hash),then finds the hash of this block.\r\n# It creates a response containing all the details of the new block,jsonify it and returns it.\r\n@app.route('/mine_block', methods = ['GET'])\r\ndef mine_block():\r\n previous_block = blockchain.get_previous_block()\r\n previous_proof = previous_block['proof']\r\n proof = blockchain.proof_of_work(previous_proof)\r\n previous_hash = blockchain.hash(previous_block)\r\n #blockchain.add_transaction(sender = node_address, receiver = 'Hadelin', amount = 1)\r\n block = blockchain.create_block(proof, previous_hash)\r\n current_block=blockchain.get_previous_block()\r\n current_hash=blockchain.hash(current_block)\r\n response = {'message': 'Congratulations, you just mined a block!',\r\n 'index': block['index'],\r\n 'timestamp': block['timestamp'],\r\n 'proof': block['proof'],\r\n 'previous_hash': block['previous_hash'],\r\n 'farmer': block['farmer_details'],\r\n 'current_hash': current_hash}\r\n return jsonify(response), 200\r\n\r\n# Getting the full Blockchain\r\n#- It creates an empty list chain_till_now, then iterates over all the blocks in the blockchain and find it’s hash \r\n#then check if the list farmer_details is empty or not, \r\n#if it is empty then it appends a dictionary containing the current block’s index,timestamp,proof,previous_hash, current_hash, farmer_details.\r\n# If the farmer_details list is not empty then it first finds the length of the list farmer_details \r\n#then it iterates over the length of the list farmer_details and appends the hash of transaction \r\n# contained within the dictionary of the list farmer_details. Then it creates the hash of this appended hash. This is the merged hash.\r\n# Then it creates a dictionary containing merged hash,index,timestamp,proof,previous_hash,farmer_details and current hash.\r\n# Then, it appends this dictionary to the list chain till now.\r\n# It then creates the response containing the chain till now and length of the blockchain,jasonifies it and returns it. \r\n\r\n@app.route('/print_chain',methods=['GET'])\r\ndef print_chain():\r\n chain_till_now =[]\r\n for xblock in blockchain.chain:\r\n xcurrent_hash=blockchain.hash(xblock) \r\n if len(xblock['farmer_details'])==0:\r\n chain_till_now.append({'index': xblock['index'],\r\n 'timestamp': xblock['timestamp'],\r\n 'proof': xblock['proof'],\r\n 'previous_hash': xblock['previous_hash'],\r\n 'farmer': xblock['farmer_details'],\r\n 'current_hash': xcurrent_hash})\r\n else:\r\n l=len(xblock['farmer_details'])\r\n sum=\"\"\r\n l-=1\r\n while(l>=0):\r\n sum=xblock['farmer_details'][l]['hash_of_transaction']+sum\r\n l-=1\r\n chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode()).hexdigest(),\r\n 'index': xblock['index'],\r\n 'timestamp': xblock['timestamp'],\r\n 'proof': xblock['proof'],\r\n 'previous_hash': xblock['previous_hash'],\r\n 'farmer': xblock['farmer_details'],\r\n 'current_hash': xcurrent_hash}) \r\n response = {'chain': chain_till_now,\r\n 'length': len(blockchain.chain)}\r\n return jsonify(response), 200\r\n\r\n#- It creats the response containing the blockchain.chain and its length,jasonifies it and returns it. \r\n@app.route('/get_chain', methods = ['GET'])\r\ndef get_chain():\r\n response = {'chain': blockchain.chain,\r\n 'length': len(blockchain.chain)}\r\n return jsonify(response), 200\r\n\r\n# Checking if the Blockchain is valid\r\n#- It calls the function is_chain_valid and returns a string as response based on whether the chain is valid or not.\r\n@app.route('/is_valid', methods = ['GET'])\r\ndef is_valid():\r\n is_valid = blockchain.is_chain_valid(blockchain.chain)\r\n if is_valid:\r\n response = {'message': 'All good. The Blockchain is valid.'}\r\n else:\r\n response = {'message': 'Houston, we have a problem. The Blockchain is not valid.'}\r\n return jsonify(response), 200\r\n\r\n# Adding a new transaction to the Blockchain\r\n#It takes the input in Jason format and checks if all the keys in the farmer keys(name_of_farmer,crop_name,quantity_inkg, rate_perkg) are available in the json file. \r\n#If no, It returns that some elements are missing\r\n# otherwise it calls the function add_farmer_details by passing the farmer details in the json file as parameter and \r\n#returns the index of the block in which these details will be added.\r\n@app.route('/add_farmerdetails', methods = ['POST'])\r\ndef add_farmer_details():\r\n json = request.get_json()\r\n farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg','rate_perkg']\r\n if not all(key in json for key in farmer_keys):\r\n return 'Some elements of the farmer_details are missing', 400\r\n index = blockchain.add_farmerdetails(json['name_of_farmer'], json['crop_name'], json['quantity_inkg'], json['rate_perkg'])\r\n response = {'message': f'These details will be added to Block {index}'}\r\n return jsonify(response), 201\r\n\r\n# Part 3 - Decentralizing our Blockchain\r\n\r\n# Connecting new nodes\r\n#It takes a Jason file as request and first check if it contains any node or not.\r\n# If it contains the nodes then it calls the function blockchain.add_node .\r\n#Then it returns the list of blockchain.nodes as response.\r\n@app.route('/connect_node', methods = ['POST'])\r\ndef connect_node():\r\n json = request.get_json()\r\n nodes = json.get('nodes')\r\n if nodes is None:\r\n return \"No node\", 400\r\n for node in nodes:\r\n blockchain.add_node(node)\r\n response = {'message': 'All the nodes are now connected. The puspesh Blockchain now contains the following nodes:',\r\n 'total_nodes': list(blockchain.nodes)}\r\n return jsonify(response), 201\r\n\r\n# Replacing the chain by the longest chain if needed\r\n#- It calls the function blockcain.replace_chain. If the chain is replaced \r\n#it returns the response with a message that the nodes has the different chains so the chain has been replaced by the longest chain alongwith the blockchain.chain.\r\n# Otherwise it returns the response with a message all good the chain is the longest one with the blockchain.chain .\r\n#then it jsonify the response and returns it.\r\n@app.route('/replace_chain', methods = ['GET'])\r\ndef replace_chain():\r\n is_chain_replaced = blockchain.replace_chain()\r\n if is_chain_replaced:\r\n response = {'message': 'The nodes had different chains so the chain was replaced by the longest one.',\r\n 'new_chain': blockchain.chain}\r\n else:\r\n response = {'message': 'All good. The chain is the largest one.',\r\n 'actual_chain': blockchain.chain}\r\n return jsonify(response), 200\r\n\r\n# Running the app\r\napp.run(host = '0.0.0.0', port = 5001)\r\n",
"step-ids": [
13,
15,
16,
18,
21
]
}
|
[
13,
15,
16,
18,
21
] |
<|reserved_special_token_0|>
class HTTPError(CCEError):
""" HTTPError raised when HTTP request returned a error."""
def __init__(self, reason=None):
"""
Initialize HTTPError with `response` object and `status`.
"""
self.reason = reason
super(HTTPError, self).__init__(reason)
class StopCCEIteration(CCEError):
"""Exception to exit from the engine iteration."""
pass
class CCESplitError(CCEError):
"""Exception to exit the job in Split Task"""
pass
class QuitJobError(CCEError):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FuncException(CCEError):
<|reserved_special_token_0|>
pass
class HTTPError(CCEError):
""" HTTPError raised when HTTP request returned a error."""
def __init__(self, reason=None):
"""
Initialize HTTPError with `response` object and `status`.
"""
self.reason = reason
super(HTTPError, self).__init__(reason)
class StopCCEIteration(CCEError):
"""Exception to exit from the engine iteration."""
pass
class CCESplitError(CCEError):
"""Exception to exit the job in Split Task"""
pass
class QuitJobError(CCEError):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FuncException(CCEError):
"""Ext function call exception"""
pass
class HTTPError(CCEError):
""" HTTPError raised when HTTP request returned a error."""
def __init__(self, reason=None):
"""
Initialize HTTPError with `response` object and `status`.
"""
self.reason = reason
super(HTTPError, self).__init__(reason)
class StopCCEIteration(CCEError):
"""Exception to exit from the engine iteration."""
pass
class CCESplitError(CCEError):
"""Exception to exit the job in Split Task"""
pass
class QuitJobError(CCEError):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ConfigException(CCEError):
"""Config exception"""
pass
class FuncException(CCEError):
"""Ext function call exception"""
pass
class HTTPError(CCEError):
""" HTTPError raised when HTTP request returned a error."""
def __init__(self, reason=None):
"""
Initialize HTTPError with `response` object and `status`.
"""
self.reason = reason
super(HTTPError, self).__init__(reason)
class StopCCEIteration(CCEError):
"""Exception to exit from the engine iteration."""
pass
class CCESplitError(CCEError):
"""Exception to exit the job in Split Task"""
pass
class QuitJobError(CCEError):
pass
<|reserved_special_token_1|>
"""APP Cloud Connect errors"""
class CCEError(Exception):
pass
class ConfigException(CCEError):
"""Config exception"""
pass
class FuncException(CCEError):
"""Ext function call exception"""
pass
class HTTPError(CCEError):
""" HTTPError raised when HTTP request returned a error."""
def __init__(self, reason=None):
"""
Initialize HTTPError with `response` object and `status`.
"""
self.reason = reason
super(HTTPError, self).__init__(reason)
class StopCCEIteration(CCEError):
"""Exception to exit from the engine iteration."""
pass
class CCESplitError(CCEError):
"""Exception to exit the job in Split Task"""
pass
class QuitJobError(CCEError):
pass
|
flexible
|
{
"blob_id": "e2840eb1b0d731d6b0356835ba371d05ba351ff6",
"index": 5323,
"step-1": "<mask token>\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-2": "<mask token>\n\n\nclass FuncException(CCEError):\n <mask token>\n pass\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-3": "<mask token>\n\n\nclass FuncException(CCEError):\n \"\"\"Ext function call exception\"\"\"\n pass\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-4": "<mask token>\n\n\nclass ConfigException(CCEError):\n \"\"\"Config exception\"\"\"\n pass\n\n\nclass FuncException(CCEError):\n \"\"\"Ext function call exception\"\"\"\n pass\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-5": "\"\"\"APP Cloud Connect errors\"\"\"\n\n\nclass CCEError(Exception):\n pass\n\n\nclass ConfigException(CCEError):\n \"\"\"Config exception\"\"\"\n pass\n\n\nclass FuncException(CCEError):\n \"\"\"Ext function call exception\"\"\"\n pass\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-ids": [
8,
9,
10,
12,
14
]
}
|
[
8,
9,
10,
12,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def all_match_data(year):
"""
Searches through the parse_matches data for all games in a specific season prints them out with a game ID and
returns the data in a list to the main program
:param year: Specific format YYYY between 2008 - 2017
:return: year_match_data
"""
year_match_data = []
match_year_data = pm()
for count in range(len(match_year_data)):
if year == match_year_data[count][1]:
year_match_data.append(match_year_data[count])
for count in range(len(year_match_data)):
print(
f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs {year_match_data[count][5]}'
)
return year_match_data
<|reserved_special_token_1|>
from get_info import parse_matches as pm
def all_match_data(year):
"""
Searches through the parse_matches data for all games in a specific season prints them out with a game ID and
returns the data in a list to the main program
:param year: Specific format YYYY between 2008 - 2017
:return: year_match_data
"""
year_match_data = []
match_year_data = pm()
for count in range(len(match_year_data)):
if year == match_year_data[count][1]:
year_match_data.append(match_year_data[count])
for count in range(len(year_match_data)):
print(
f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs {year_match_data[count][5]}'
)
return year_match_data
<|reserved_special_token_1|>
from get_info import parse_matches as pm
def all_match_data(year):
"""
Searches through the parse_matches data for all games in a specific season prints them out with a game ID and
returns the data in a list to the main program
:param year: Specific format YYYY between 2008 - 2017
:return: year_match_data
"""
year_match_data = []
match_year_data = pm()
for count in range(len(match_year_data)):
if year == match_year_data[count][1]:
year_match_data.append(match_year_data[count])
for count in range(len(year_match_data)):
print(
f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs '
f'{year_match_data[count][5]}')
return year_match_data
|
flexible
|
{
"blob_id": "bc53af24bb46d2be3122e290c4732b312f4ebdf5",
"index": 5313,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef all_match_data(year):\n \"\"\"\n Searches through the parse_matches data for all games in a specific season prints them out with a game ID and\n returns the data in a list to the main program\n :param year: Specific format YYYY between 2008 - 2017\n :return: year_match_data\n \"\"\"\n year_match_data = []\n match_year_data = pm()\n for count in range(len(match_year_data)):\n if year == match_year_data[count][1]:\n year_match_data.append(match_year_data[count])\n for count in range(len(year_match_data)):\n print(\n f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs {year_match_data[count][5]}'\n )\n return year_match_data\n",
"step-3": "from get_info import parse_matches as pm\n\n\ndef all_match_data(year):\n \"\"\"\n Searches through the parse_matches data for all games in a specific season prints them out with a game ID and\n returns the data in a list to the main program\n :param year: Specific format YYYY between 2008 - 2017\n :return: year_match_data\n \"\"\"\n year_match_data = []\n match_year_data = pm()\n for count in range(len(match_year_data)):\n if year == match_year_data[count][1]:\n year_match_data.append(match_year_data[count])\n for count in range(len(year_match_data)):\n print(\n f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs {year_match_data[count][5]}'\n )\n return year_match_data\n",
"step-4": "from get_info import parse_matches as pm\n\n\ndef all_match_data(year):\n \"\"\"\n Searches through the parse_matches data for all games in a specific season prints them out with a game ID and\n returns the data in a list to the main program\n :param year: Specific format YYYY between 2008 - 2017\n :return: year_match_data\n \"\"\"\n year_match_data = []\n match_year_data = pm()\n for count in range(len(match_year_data)):\n if year == match_year_data[count][1]:\n year_match_data.append(match_year_data[count])\n for count in range(len(year_match_data)):\n print(\n f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs '\n f'{year_match_data[count][5]}')\n\n return year_match_data\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Defaults(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Defaults(object):
INBUS_VERSION = 2
LOCALHOST = '127.0.0.1'
PORT = 7222
INBUS_ADDRESS = LOCALHOST, PORT
BUFFER_SIZE = 65536
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Maarten Los
# See LICENSE.rst for details.
class Defaults(object):
INBUS_VERSION = 2
LOCALHOST = "127.0.0.1"
PORT = 7222
INBUS_ADDRESS = (LOCALHOST, PORT)
BUFFER_SIZE = 65536
|
flexible
|
{
"blob_id": "bc087482e901ce1831cef56aa9c7aef0c8f2d15a",
"index": 1793,
"step-1": "<mask token>\n",
"step-2": "class Defaults(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "class Defaults(object):\n INBUS_VERSION = 2\n LOCALHOST = '127.0.0.1'\n PORT = 7222\n INBUS_ADDRESS = LOCALHOST, PORT\n BUFFER_SIZE = 65536\n",
"step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2017 Maarten Los\n# See LICENSE.rst for details.\n\n\nclass Defaults(object):\n INBUS_VERSION = 2\n LOCALHOST = \"127.0.0.1\"\n PORT = 7222\n INBUS_ADDRESS = (LOCALHOST, PORT)\n BUFFER_SIZE = 65536\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def calculo_suma():
print('---Funcion con Python---')
print('la sumatoria de los valores: ', dato['Bronce'].sum())
print('---Funcion con Numpy---')
print('la sumatoria de los valores: ', numpy.sum(dato['Bronce']))
print('---Otras Formas---')
print(dato.Bronce.sum())
print(numpy.sum(dato.Bronce))
def calculo_conteo():
print('---Funcion de Python---')
print('Los número de elementos son :', len(dato['Bronce']))
print(len(dato.Bronce))
print('---Funcion de Pandas---')
print('Los número de elementos son :', dato['Bronce'].count())
print(dato.Bronce.count())
print('---Funcion de Numpy---')
print('Los número de elementos son :', numpy.size(dato['Bronce']))
print(numpy.size(dato.Bronce))
def calculo_media():
print('---Funcion de Python---')
print('La media es: ', dato.Bronce.sum() / dato.Bronce.count())
print('---Funcion de Pandas---')
print('La media es: ', dato.Bronce.mean())
print('---Funcion de Numpy---')
print('La media es: ', numpy.mean(dato.Bronce))
def calculo_media2(redondeo=2):
print('---Mediana con 2 decimales---')
media = dato.Bronce.mean()
media = round(media, redondeo)
return media
def calculo_moda():
moda = dato.Bronce.mode()
return moda
def calculo_mediana():
nro_item = numpy.size(dato.Bronce)
pos_mediana = round(nro_item / 2)
print('Posicion mediana: ', pos_mediana)
mediana = dato.Bronce[pos_mediana - 1]
return mediana
def calculo_percentiles():
tramos = [20, 50, 75]
percentiles = numpy.percentile(dato['Bronce'], tramos)
print('Percentiles', percentiles)
def grafico_percentil():
import matplotlib.pylab as plt
import seaborn as sb
sb.boxplot(y='Bronce', data=dato)
plt.show()
def calculo_varianza():
vari = numpy.var(dato)
print('La varianza es:', vari)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(dato)
def calculo_suma():
print('---Funcion con Python---')
print('la sumatoria de los valores: ', dato['Bronce'].sum())
print('---Funcion con Numpy---')
print('la sumatoria de los valores: ', numpy.sum(dato['Bronce']))
print('---Otras Formas---')
print(dato.Bronce.sum())
print(numpy.sum(dato.Bronce))
def calculo_conteo():
print('---Funcion de Python---')
print('Los número de elementos son :', len(dato['Bronce']))
print(len(dato.Bronce))
print('---Funcion de Pandas---')
print('Los número de elementos son :', dato['Bronce'].count())
print(dato.Bronce.count())
print('---Funcion de Numpy---')
print('Los número de elementos son :', numpy.size(dato['Bronce']))
print(numpy.size(dato.Bronce))
def calculo_media():
print('---Funcion de Python---')
print('La media es: ', dato.Bronce.sum() / dato.Bronce.count())
print('---Funcion de Pandas---')
print('La media es: ', dato.Bronce.mean())
print('---Funcion de Numpy---')
print('La media es: ', numpy.mean(dato.Bronce))
def calculo_media2(redondeo=2):
print('---Mediana con 2 decimales---')
media = dato.Bronce.mean()
media = round(media, redondeo)
return media
def calculo_moda():
moda = dato.Bronce.mode()
return moda
def calculo_mediana():
nro_item = numpy.size(dato.Bronce)
pos_mediana = round(nro_item / 2)
print('Posicion mediana: ', pos_mediana)
mediana = dato.Bronce[pos_mediana - 1]
return mediana
def calculo_percentiles():
tramos = [20, 50, 75]
percentiles = numpy.percentile(dato['Bronce'], tramos)
print('Percentiles', percentiles)
def grafico_percentil():
import matplotlib.pylab as plt
import seaborn as sb
sb.boxplot(y='Bronce', data=dato)
plt.show()
def calculo_varianza():
vari = numpy.var(dato)
print('La varianza es:', vari)
calculo_varianza()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dato = pd.read_csv('medallero_Panamericanos_Lima2019.csv')
print(dato)
def calculo_suma():
print('---Funcion con Python---')
print('la sumatoria de los valores: ', dato['Bronce'].sum())
print('---Funcion con Numpy---')
print('la sumatoria de los valores: ', numpy.sum(dato['Bronce']))
print('---Otras Formas---')
print(dato.Bronce.sum())
print(numpy.sum(dato.Bronce))
def calculo_conteo():
print('---Funcion de Python---')
print('Los número de elementos son :', len(dato['Bronce']))
print(len(dato.Bronce))
print('---Funcion de Pandas---')
print('Los número de elementos son :', dato['Bronce'].count())
print(dato.Bronce.count())
print('---Funcion de Numpy---')
print('Los número de elementos son :', numpy.size(dato['Bronce']))
print(numpy.size(dato.Bronce))
def calculo_media():
print('---Funcion de Python---')
print('La media es: ', dato.Bronce.sum() / dato.Bronce.count())
print('---Funcion de Pandas---')
print('La media es: ', dato.Bronce.mean())
print('---Funcion de Numpy---')
print('La media es: ', numpy.mean(dato.Bronce))
def calculo_media2(redondeo=2):
print('---Mediana con 2 decimales---')
media = dato.Bronce.mean()
media = round(media, redondeo)
return media
def calculo_moda():
moda = dato.Bronce.mode()
return moda
def calculo_mediana():
nro_item = numpy.size(dato.Bronce)
pos_mediana = round(nro_item / 2)
print('Posicion mediana: ', pos_mediana)
mediana = dato.Bronce[pos_mediana - 1]
return mediana
def calculo_percentiles():
tramos = [20, 50, 75]
percentiles = numpy.percentile(dato['Bronce'], tramos)
print('Percentiles', percentiles)
def grafico_percentil():
import matplotlib.pylab as plt
import seaborn as sb
sb.boxplot(y='Bronce', data=dato)
plt.show()
def calculo_varianza():
vari = numpy.var(dato)
print('La varianza es:', vari)
calculo_varianza()
<|reserved_special_token_1|>
import pandas as pd
import numpy
dato = pd.read_csv('medallero_Panamericanos_Lima2019.csv')
print(dato)
def calculo_suma():
print('---Funcion con Python---')
print('la sumatoria de los valores: ', dato['Bronce'].sum())
print('---Funcion con Numpy---')
print('la sumatoria de los valores: ', numpy.sum(dato['Bronce']))
print('---Otras Formas---')
print(dato.Bronce.sum())
print(numpy.sum(dato.Bronce))
def calculo_conteo():
print('---Funcion de Python---')
print('Los número de elementos son :', len(dato['Bronce']))
print(len(dato.Bronce))
print('---Funcion de Pandas---')
print('Los número de elementos son :', dato['Bronce'].count())
print(dato.Bronce.count())
print('---Funcion de Numpy---')
print('Los número de elementos son :', numpy.size(dato['Bronce']))
print(numpy.size(dato.Bronce))
def calculo_media():
print('---Funcion de Python---')
print('La media es: ', dato.Bronce.sum() / dato.Bronce.count())
print('---Funcion de Pandas---')
print('La media es: ', dato.Bronce.mean())
print('---Funcion de Numpy---')
print('La media es: ', numpy.mean(dato.Bronce))
def calculo_media2(redondeo=2):
print('---Mediana con 2 decimales---')
media = dato.Bronce.mean()
media = round(media, redondeo)
return media
def calculo_moda():
moda = dato.Bronce.mode()
return moda
def calculo_mediana():
nro_item = numpy.size(dato.Bronce)
pos_mediana = round(nro_item / 2)
print('Posicion mediana: ', pos_mediana)
mediana = dato.Bronce[pos_mediana - 1]
return mediana
def calculo_percentiles():
tramos = [20, 50, 75]
percentiles = numpy.percentile(dato['Bronce'], tramos)
print('Percentiles', percentiles)
def grafico_percentil():
import matplotlib.pylab as plt
import seaborn as sb
sb.boxplot(y='Bronce', data=dato)
plt.show()
def calculo_varianza():
vari = numpy.var(dato)
print('La varianza es:', vari)
calculo_varianza()
<|reserved_special_token_1|>
import pandas as pd
import numpy
dato=pd.read_csv('medallero_Panamericanos_Lima2019.csv')
print(dato)
def calculo_suma():
print("---Funcion con Python---")
print("la sumatoria de los valores: ", dato['Bronce'].sum())
print("---Funcion con Numpy---")
print("la sumatoria de los valores: ", numpy.sum(dato['Bronce']))
print("---Otras Formas---")
print(dato.Bronce.sum())
print(numpy.sum(dato.Bronce))
def calculo_conteo():
print("---Funcion de Python---")
print("Los número de elementos son :",len(dato['Bronce']))
print(len(dato.Bronce))
print("---Funcion de Pandas---")
print("Los número de elementos son :",dato['Bronce'].count())
print(dato.Bronce.count())
print("---Funcion de Numpy---")
print("Los número de elementos son :",numpy.size(dato['Bronce']))
print(numpy.size(dato.Bronce))
def calculo_media():
print("---Funcion de Python---")
print("La media es: ",dato.Bronce.sum()/dato.Bronce.count())
print("---Funcion de Pandas---")
print("La media es: ",dato.Bronce.mean())
print("---Funcion de Numpy---")
print("La media es: ",numpy.mean(dato.Bronce))
def calculo_media2(redondeo=2):
print("---Mediana con 2 decimales---")
media=dato.Bronce.mean()
media=round(media, redondeo)
return media
def calculo_moda():
moda=dato.Bronce.mode()
return moda
def calculo_mediana():
nro_item=numpy.size(dato.Bronce)
pos_mediana=round(nro_item/2)
print('Posicion mediana: ', pos_mediana)
mediana=dato.Bronce[pos_mediana-1]
return mediana
def calculo_percentiles():
tramos =[20, 50, 75]
percentiles=numpy.percentile(dato['Bronce'], tramos)
print('Percentiles', percentiles)
def grafico_percentil():
import matplotlib.pylab as plt
import seaborn as sb
sb.boxplot(y="Bronce", data=dato)
plt.show()
def calculo_varianza():
vari=numpy.var(dato)
print("La varianza es:" ,vari)
calculo_varianza()
|
flexible
|
{
"blob_id": "f5542cfe6827c352cc6e6da1147e727f2b2d8247",
"index": 9586,
"step-1": "<mask token>\n\n\ndef calculo_suma():\n print('---Funcion con Python---')\n print('la sumatoria de los valores: ', dato['Bronce'].sum())\n print('---Funcion con Numpy---')\n print('la sumatoria de los valores: ', numpy.sum(dato['Bronce']))\n print('---Otras Formas---')\n print(dato.Bronce.sum())\n print(numpy.sum(dato.Bronce))\n\n\ndef calculo_conteo():\n print('---Funcion de Python---')\n print('Los número de elementos son :', len(dato['Bronce']))\n print(len(dato.Bronce))\n print('---Funcion de Pandas---')\n print('Los número de elementos son :', dato['Bronce'].count())\n print(dato.Bronce.count())\n print('---Funcion de Numpy---')\n print('Los número de elementos son :', numpy.size(dato['Bronce']))\n print(numpy.size(dato.Bronce))\n\n\ndef calculo_media():\n print('---Funcion de Python---')\n print('La media es: ', dato.Bronce.sum() / dato.Bronce.count())\n print('---Funcion de Pandas---')\n print('La media es: ', dato.Bronce.mean())\n print('---Funcion de Numpy---')\n print('La media es: ', numpy.mean(dato.Bronce))\n\n\ndef calculo_media2(redondeo=2):\n print('---Mediana con 2 decimales---')\n media = dato.Bronce.mean()\n media = round(media, redondeo)\n return media\n\n\ndef calculo_moda():\n moda = dato.Bronce.mode()\n return moda\n\n\ndef calculo_mediana():\n nro_item = numpy.size(dato.Bronce)\n pos_mediana = round(nro_item / 2)\n print('Posicion mediana: ', pos_mediana)\n mediana = dato.Bronce[pos_mediana - 1]\n return mediana\n\n\ndef calculo_percentiles():\n tramos = [20, 50, 75]\n percentiles = numpy.percentile(dato['Bronce'], tramos)\n print('Percentiles', percentiles)\n\n\ndef grafico_percentil():\n import matplotlib.pylab as plt\n import seaborn as sb\n sb.boxplot(y='Bronce', data=dato)\n plt.show()\n\n\ndef calculo_varianza():\n vari = numpy.var(dato)\n print('La varianza es:', vari)\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(dato)\n\n\ndef calculo_suma():\n print('---Funcion con Python---')\n print('la sumatoria de los valores: ', dato['Bronce'].sum())\n print('---Funcion con Numpy---')\n print('la sumatoria de los valores: ', numpy.sum(dato['Bronce']))\n print('---Otras Formas---')\n print(dato.Bronce.sum())\n print(numpy.sum(dato.Bronce))\n\n\ndef calculo_conteo():\n print('---Funcion de Python---')\n print('Los número de elementos son :', len(dato['Bronce']))\n print(len(dato.Bronce))\n print('---Funcion de Pandas---')\n print('Los número de elementos son :', dato['Bronce'].count())\n print(dato.Bronce.count())\n print('---Funcion de Numpy---')\n print('Los número de elementos son :', numpy.size(dato['Bronce']))\n print(numpy.size(dato.Bronce))\n\n\ndef calculo_media():\n print('---Funcion de Python---')\n print('La media es: ', dato.Bronce.sum() / dato.Bronce.count())\n print('---Funcion de Pandas---')\n print('La media es: ', dato.Bronce.mean())\n print('---Funcion de Numpy---')\n print('La media es: ', numpy.mean(dato.Bronce))\n\n\ndef calculo_media2(redondeo=2):\n print('---Mediana con 2 decimales---')\n media = dato.Bronce.mean()\n media = round(media, redondeo)\n return media\n\n\ndef calculo_moda():\n moda = dato.Bronce.mode()\n return moda\n\n\ndef calculo_mediana():\n nro_item = numpy.size(dato.Bronce)\n pos_mediana = round(nro_item / 2)\n print('Posicion mediana: ', pos_mediana)\n mediana = dato.Bronce[pos_mediana - 1]\n return mediana\n\n\ndef calculo_percentiles():\n tramos = [20, 50, 75]\n percentiles = numpy.percentile(dato['Bronce'], tramos)\n print('Percentiles', percentiles)\n\n\ndef grafico_percentil():\n import matplotlib.pylab as plt\n import seaborn as sb\n sb.boxplot(y='Bronce', data=dato)\n plt.show()\n\n\ndef calculo_varianza():\n vari = numpy.var(dato)\n print('La varianza es:', vari)\n\n\ncalculo_varianza()\n",
"step-3": "<mask token>\ndato = pd.read_csv('medallero_Panamericanos_Lima2019.csv')\nprint(dato)\n\n\ndef calculo_suma():\n print('---Funcion con Python---')\n print('la sumatoria de los valores: ', dato['Bronce'].sum())\n print('---Funcion con Numpy---')\n print('la sumatoria de los valores: ', numpy.sum(dato['Bronce']))\n print('---Otras Formas---')\n print(dato.Bronce.sum())\n print(numpy.sum(dato.Bronce))\n\n\ndef calculo_conteo():\n print('---Funcion de Python---')\n print('Los número de elementos son :', len(dato['Bronce']))\n print(len(dato.Bronce))\n print('---Funcion de Pandas---')\n print('Los número de elementos son :', dato['Bronce'].count())\n print(dato.Bronce.count())\n print('---Funcion de Numpy---')\n print('Los número de elementos son :', numpy.size(dato['Bronce']))\n print(numpy.size(dato.Bronce))\n\n\ndef calculo_media():\n print('---Funcion de Python---')\n print('La media es: ', dato.Bronce.sum() / dato.Bronce.count())\n print('---Funcion de Pandas---')\n print('La media es: ', dato.Bronce.mean())\n print('---Funcion de Numpy---')\n print('La media es: ', numpy.mean(dato.Bronce))\n\n\ndef calculo_media2(redondeo=2):\n print('---Mediana con 2 decimales---')\n media = dato.Bronce.mean()\n media = round(media, redondeo)\n return media\n\n\ndef calculo_moda():\n moda = dato.Bronce.mode()\n return moda\n\n\ndef calculo_mediana():\n nro_item = numpy.size(dato.Bronce)\n pos_mediana = round(nro_item / 2)\n print('Posicion mediana: ', pos_mediana)\n mediana = dato.Bronce[pos_mediana - 1]\n return mediana\n\n\ndef calculo_percentiles():\n tramos = [20, 50, 75]\n percentiles = numpy.percentile(dato['Bronce'], tramos)\n print('Percentiles', percentiles)\n\n\ndef grafico_percentil():\n import matplotlib.pylab as plt\n import seaborn as sb\n sb.boxplot(y='Bronce', data=dato)\n plt.show()\n\n\ndef calculo_varianza():\n vari = numpy.var(dato)\n print('La varianza es:', vari)\n\n\ncalculo_varianza()\n",
"step-4": "import pandas as pd\nimport numpy\ndato = pd.read_csv('medallero_Panamericanos_Lima2019.csv')\nprint(dato)\n\n\ndef calculo_suma():\n print('---Funcion con Python---')\n print('la sumatoria de los valores: ', dato['Bronce'].sum())\n print('---Funcion con Numpy---')\n print('la sumatoria de los valores: ', numpy.sum(dato['Bronce']))\n print('---Otras Formas---')\n print(dato.Bronce.sum())\n print(numpy.sum(dato.Bronce))\n\n\ndef calculo_conteo():\n print('---Funcion de Python---')\n print('Los número de elementos son :', len(dato['Bronce']))\n print(len(dato.Bronce))\n print('---Funcion de Pandas---')\n print('Los número de elementos son :', dato['Bronce'].count())\n print(dato.Bronce.count())\n print('---Funcion de Numpy---')\n print('Los número de elementos son :', numpy.size(dato['Bronce']))\n print(numpy.size(dato.Bronce))\n\n\ndef calculo_media():\n print('---Funcion de Python---')\n print('La media es: ', dato.Bronce.sum() / dato.Bronce.count())\n print('---Funcion de Pandas---')\n print('La media es: ', dato.Bronce.mean())\n print('---Funcion de Numpy---')\n print('La media es: ', numpy.mean(dato.Bronce))\n\n\ndef calculo_media2(redondeo=2):\n print('---Mediana con 2 decimales---')\n media = dato.Bronce.mean()\n media = round(media, redondeo)\n return media\n\n\ndef calculo_moda():\n moda = dato.Bronce.mode()\n return moda\n\n\ndef calculo_mediana():\n nro_item = numpy.size(dato.Bronce)\n pos_mediana = round(nro_item / 2)\n print('Posicion mediana: ', pos_mediana)\n mediana = dato.Bronce[pos_mediana - 1]\n return mediana\n\n\ndef calculo_percentiles():\n tramos = [20, 50, 75]\n percentiles = numpy.percentile(dato['Bronce'], tramos)\n print('Percentiles', percentiles)\n\n\ndef grafico_percentil():\n import matplotlib.pylab as plt\n import seaborn as sb\n sb.boxplot(y='Bronce', data=dato)\n plt.show()\n\n\ndef calculo_varianza():\n vari = numpy.var(dato)\n print('La varianza es:', vari)\n\n\ncalculo_varianza()\n",
"step-5": "import pandas as pd\nimport numpy\n\ndato=pd.read_csv('medallero_Panamericanos_Lima2019.csv')\nprint(dato)\n\ndef calculo_suma():\n print(\"---Funcion con Python---\")\n print(\"la sumatoria de los valores: \", dato['Bronce'].sum())\n print(\"---Funcion con Numpy---\")\n print(\"la sumatoria de los valores: \", numpy.sum(dato['Bronce']))\n print(\"---Otras Formas---\")\n print(dato.Bronce.sum())\n print(numpy.sum(dato.Bronce))\n\ndef calculo_conteo():\n print(\"---Funcion de Python---\")\n print(\"Los número de elementos son :\",len(dato['Bronce']))\n print(len(dato.Bronce))\n print(\"---Funcion de Pandas---\")\n print(\"Los número de elementos son :\",dato['Bronce'].count())\n print(dato.Bronce.count())\n print(\"---Funcion de Numpy---\")\n print(\"Los número de elementos son :\",numpy.size(dato['Bronce']))\n print(numpy.size(dato.Bronce))\n\ndef calculo_media():\n print(\"---Funcion de Python---\")\n print(\"La media es: \",dato.Bronce.sum()/dato.Bronce.count())\n print(\"---Funcion de Pandas---\")\n print(\"La media es: \",dato.Bronce.mean())\n print(\"---Funcion de Numpy---\")\n print(\"La media es: \",numpy.mean(dato.Bronce))\n\ndef calculo_media2(redondeo=2):\n print(\"---Mediana con 2 decimales---\")\n media=dato.Bronce.mean()\n media=round(media, redondeo)\n return media\n\ndef calculo_moda():\n moda=dato.Bronce.mode()\n return moda\ndef calculo_mediana():\n nro_item=numpy.size(dato.Bronce)\n pos_mediana=round(nro_item/2)\n print('Posicion mediana: ', pos_mediana)\n mediana=dato.Bronce[pos_mediana-1]\n return mediana\n\ndef calculo_percentiles():\n tramos =[20, 50, 75]\n percentiles=numpy.percentile(dato['Bronce'], tramos)\n print('Percentiles', percentiles)\n\ndef grafico_percentil():\n import matplotlib.pylab as plt\n import seaborn as sb\n sb.boxplot(y=\"Bronce\", data=dato)\n plt.show()\n\ndef calculo_varianza():\n vari=numpy.var(dato)\n print(\"La varianza es:\" ,vari)\n\ncalculo_varianza()\n",
"step-ids": [
9,
10,
11,
12,
13
]
}
|
[
9,
10,
11,
12,
13
] |
import os
import zipfile
import cv2
import numpy as np
from sklearn import svm
from sklearn import cross_validation
from sklearn.externals import joblib
import matplotlib.pyplot as plt
""" Global constants """
data_zip = "data.zip" # The zip archive
clean_files = [".csv", ".jpg"] # File extensions to clean
data_file = "data.csv"
img_ext = ".jpg"
perf_file = "performance.txt"
def unzip_data():
""" Unzip the data held in zip file """
zip_ref = zipfile.ZipFile(data_zip, 'r')
zip_ref.extractall('')
zip_ref.close()
def clean_data():
""" Clean up all the unzipped data """
for clean_file in clean_files:
file_list = [f for f in os.listdir(".") if f.endswith(clean_file)]
for f in file_list:
os.remove(f)
def downscale_image(img, bottom, x, y):
"""
Take bottom section of image
Rescale
Canny edge detection
"""
width, height = tuple(img.shape[1::-1])
img = img[int(round((1 - bottom) * (height - 1))):(height - 1), 1:(width - 1)]
img = cv2.resize(img, (x, y))
#img = cv2.Canny(img, 100, 200)
ret, img = cv2.threshold(img, img.mean(), 255, cv2.THRESH_BINARY)
return img
def main():
unzip_data()
labels = []
""" The labels """
data = np.genfromtxt(
data_file, # file name
skip_header=0, # lines to skip at the top
skip_footer=0, # lines to skip at the bottom
delimiter=',', # column delimiter
dtype='int', # data type
filling_values=0, # fill missing values with 0
usecols=(0, 1, 2, 3, 4, 5, 6), # columns to read
names=[
'filename',
'one',
'two',
'three',
'four',
'five',
'six'
] # column names
)
for ones in data['one']:
if ones:
labels.append(1)
else:
labels.append(-1)
""" The features """
x = 5
y = 12
bottom = 0.4
features = []
for name in data['filename']:
""" Load the image """
name_ext = str(name) + img_ext
img = cv2.imread(name_ext, 0)
""" Take bottom section"""
width, height = tuple(img.shape[1::-1])
img = img[int(round((1 - bottom) * (height - 1))):(height - 1), 1:(width - 1)]
bottom_ext = str(name) + "_bottom_"+ img_ext
cv2.imwrite(bottom_ext,img)
""" Scale down """
img = cv2.resize(img, (x, y))
ret, img = cv2.threshold(img, img.mean(), 255, cv2.THRESH_BINARY)
scale_ext = str(name) + "_scale_"+ img_ext
""" Scale back up only to save """
cv2.imwrite(scale_ext,cv2.resize(img, (100*x, 100*y)))
""" Add to list of training features """
features.append(img.flatten())
""" Train and validate the classifier """
loops = 2
acc = 0
mean = []
for i in range(1, loops):
""" Split data for cross validation """
features_train, features_test, labels_train, labels_test = \
cross_validation.train_test_split(features, labels, test_size=0.2, random_state=10)
""" Train """
clf = svm.SVC(gamma=0.001)
clf.fit(features_train, labels_train)
""" Score """
acc += clf.score(features_test, labels_test)
mean.append(acc/i)
""" Write performance to file to keep track """
f = open(perf_file, 'w')
f.write("Performance: " + str(mean[-1]))
f.close()
""" Train on all the data """
clf = svm.SVC(gamma=0.001)
clf.fit(features, labels)
""" Save the classifier """
joblib.dump(clf, "bottom.clf")
""" Decision function """
distances = clf.decision_function(features)
""" False positives and negatives, look out for uncertainity """
for i in range(0,len(distances)):
print i+1,distances[i],
if labels[i] > 0:
if distances[i] < 0:
print "\t\tFALSE NEGATIVE",
else:
print "\t\tPOSITIVE",
else:
if distances[i] > 0:
print "\t\tFALSE POSITIVE",
else:
print "\t\tNEGATIVE",
if(abs(distances[i]) < 0.9):
print "\t\tUNCERTAIN"
else:
print ""
""" remove temp data """
#clean_data()
""" Ensure the mean has converged """
#plt.plot(mean)
#plt.show() # WILL STALL HERE
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "d2da95f44e814accd3a91c5e8497ceff85c98711",
"index": 2848,
"step-1": "import os\nimport zipfile\nimport cv2\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn import cross_validation\nfrom sklearn.externals import joblib\nimport matplotlib.pyplot as plt\n\n\n\"\"\" Global constants \"\"\"\ndata_zip = \"data.zip\" # The zip archive\nclean_files = [\".csv\", \".jpg\"] # File extensions to clean\ndata_file = \"data.csv\"\nimg_ext = \".jpg\"\nperf_file = \"performance.txt\"\n\n\ndef unzip_data():\n \"\"\" Unzip the data held in zip file \"\"\"\n zip_ref = zipfile.ZipFile(data_zip, 'r')\n zip_ref.extractall('')\n zip_ref.close()\n\n\ndef clean_data():\n \"\"\" Clean up all the unzipped data \"\"\"\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)\n\n\ndef downscale_image(img, bottom, x, y):\n \"\"\"\n Take bottom section of image\n Rescale\n Canny edge detection\n \"\"\"\n width, height = tuple(img.shape[1::-1])\n img = img[int(round((1 - bottom) * (height - 1))):(height - 1), 1:(width - 1)]\n img = cv2.resize(img, (x, y))\n #img = cv2.Canny(img, 100, 200)\n ret, img = cv2.threshold(img, img.mean(), 255, cv2.THRESH_BINARY)\n return img\n\n\ndef main():\n unzip_data()\n\n labels = []\n\n \"\"\" The labels \"\"\"\n data = np.genfromtxt(\n data_file, # file name\n skip_header=0, # lines to skip at the top\n skip_footer=0, # lines to skip at the bottom\n delimiter=',', # column delimiter\n dtype='int', # data type\n filling_values=0, # fill missing values with 0\n usecols=(0, 1, 2, 3, 4, 5, 6), # columns to read\n names=[\n 'filename',\n 'one',\n 'two',\n 'three',\n 'four',\n 'five',\n 'six'\n ] # column names\n )\n for ones in data['one']:\n if ones:\n labels.append(1)\n else:\n labels.append(-1)\n\n \"\"\" The features \"\"\"\n x = 5\n y = 12\n bottom = 0.4\n features = []\n for name in data['filename']:\n \"\"\" Load the image \"\"\"\n name_ext = str(name) + img_ext\n img = cv2.imread(name_ext, 0)\n \"\"\" Take bottom section\"\"\"\n width, height = tuple(img.shape[1::-1])\n img = img[int(round((1 - bottom) * (height - 1))):(height - 1), 1:(width - 1)]\n bottom_ext = str(name) + \"_bottom_\"+ img_ext\n cv2.imwrite(bottom_ext,img)\n \"\"\" Scale down \"\"\"\n img = cv2.resize(img, (x, y))\n ret, img = cv2.threshold(img, img.mean(), 255, cv2.THRESH_BINARY)\n scale_ext = str(name) + \"_scale_\"+ img_ext\n \"\"\" Scale back up only to save \"\"\"\n cv2.imwrite(scale_ext,cv2.resize(img, (100*x, 100*y)))\n \"\"\" Add to list of training features \"\"\"\n features.append(img.flatten())\n\n \"\"\" Train and validate the classifier \"\"\"\n loops = 2\n acc = 0\n mean = []\n for i in range(1, loops):\n \"\"\" Split data for cross validation \"\"\"\n features_train, features_test, labels_train, labels_test = \\\n cross_validation.train_test_split(features, labels, test_size=0.2, random_state=10)\n\n \"\"\" Train \"\"\"\n clf = svm.SVC(gamma=0.001)\n clf.fit(features_train, labels_train)\n\n \"\"\" Score \"\"\"\n acc += clf.score(features_test, labels_test)\n mean.append(acc/i)\n\n \"\"\" Write performance to file to keep track \"\"\"\n f = open(perf_file, 'w')\n f.write(\"Performance: \" + str(mean[-1]))\n f.close()\n\n \"\"\" Train on all the data \"\"\"\n clf = svm.SVC(gamma=0.001)\n clf.fit(features, labels)\n\n \"\"\" Save the classifier \"\"\"\n joblib.dump(clf, \"bottom.clf\")\n\n \"\"\" Decision function \"\"\"\n distances = clf.decision_function(features)\n\n \"\"\" False positives and negatives, look out for uncertainity \"\"\"\n for i in range(0,len(distances)):\n print i+1,distances[i],\n if labels[i] > 0:\n if distances[i] < 0:\n print \"\\t\\tFALSE NEGATIVE\",\n else:\n print \"\\t\\tPOSITIVE\",\n else:\n if distances[i] > 0:\n print \"\\t\\tFALSE POSITIVE\",\n else:\n print \"\\t\\tNEGATIVE\",\n if(abs(distances[i]) < 0.9):\n print \"\\t\\tUNCERTAIN\"\n else:\n print \"\"\n\n \"\"\" remove temp data \"\"\"\n #clean_data()\n\n \"\"\" Ensure the mean has converged \"\"\"\n #plt.plot(mean)\n #plt.show() # WILL STALL HERE\n\nif __name__ == \"__main__\":\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def test_create_all():
eng = create_engine('cql://user:password@localhost:49154/system')
metadata.create_all(eng)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_create_engine():
eng = create_engine('cql://user:password@localhost:49154/system')
assert eng.execute('select * from system.schema_keyspaces')
def test_table_names():
eng = create_engine('cql://user:password@localhost:49154/system')
eng.table_names()
def test_create_all():
eng = create_engine('cql://user:password@localhost:49154/system')
metadata.create_all(eng)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
metadata = MetaData()
users = Table('users', metadata, Column('id', Integer, primary_key=True),
Column('name', String), Column('fullname', String))
def test_create_engine():
eng = create_engine('cql://user:password@localhost:49154/system')
assert eng.execute('select * from system.schema_keyspaces')
def test_table_names():
eng = create_engine('cql://user:password@localhost:49154/system')
eng.table_names()
def test_create_all():
eng = create_engine('cql://user:password@localhost:49154/system')
metadata.create_all(eng)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pytest
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
metadata = MetaData()
users = Table('users', metadata, Column('id', Integer, primary_key=True),
Column('name', String), Column('fullname', String))
def test_create_engine():
eng = create_engine('cql://user:password@localhost:49154/system')
assert eng.execute('select * from system.schema_keyspaces')
def test_table_names():
eng = create_engine('cql://user:password@localhost:49154/system')
eng.table_names()
def test_create_all():
eng = create_engine('cql://user:password@localhost:49154/system')
metadata.create_all(eng)
<|reserved_special_token_1|>
"""
Tests for `sqlalchemy-cql` module.
"""
import pytest
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
metadata = MetaData()
users = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('fullname', String),
)
def test_create_engine():
eng = create_engine("cql://user:password@localhost:49154/system")
assert eng.execute("select * from system.schema_keyspaces")
def test_table_names():
eng = create_engine("cql://user:password@localhost:49154/system")
eng.table_names()
def test_create_all():
eng = create_engine("cql://user:password@localhost:49154/system")
metadata.create_all(eng)
|
flexible
|
{
"blob_id": "f5b18673dd5a3ba3070c07e88ae83a531669311a",
"index": 2139,
"step-1": "<mask token>\n\n\ndef test_create_all():\n eng = create_engine('cql://user:password@localhost:49154/system')\n metadata.create_all(eng)\n",
"step-2": "<mask token>\n\n\ndef test_create_engine():\n eng = create_engine('cql://user:password@localhost:49154/system')\n assert eng.execute('select * from system.schema_keyspaces')\n\n\ndef test_table_names():\n eng = create_engine('cql://user:password@localhost:49154/system')\n eng.table_names()\n\n\ndef test_create_all():\n eng = create_engine('cql://user:password@localhost:49154/system')\n metadata.create_all(eng)\n",
"step-3": "<mask token>\nmetadata = MetaData()\nusers = Table('users', metadata, Column('id', Integer, primary_key=True),\n Column('name', String), Column('fullname', String))\n\n\ndef test_create_engine():\n eng = create_engine('cql://user:password@localhost:49154/system')\n assert eng.execute('select * from system.schema_keyspaces')\n\n\ndef test_table_names():\n eng = create_engine('cql://user:password@localhost:49154/system')\n eng.table_names()\n\n\ndef test_create_all():\n eng = create_engine('cql://user:password@localhost:49154/system')\n metadata.create_all(eng)\n",
"step-4": "<mask token>\nimport pytest\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey\nmetadata = MetaData()\nusers = Table('users', metadata, Column('id', Integer, primary_key=True),\n Column('name', String), Column('fullname', String))\n\n\ndef test_create_engine():\n eng = create_engine('cql://user:password@localhost:49154/system')\n assert eng.execute('select * from system.schema_keyspaces')\n\n\ndef test_table_names():\n eng = create_engine('cql://user:password@localhost:49154/system')\n eng.table_names()\n\n\ndef test_create_all():\n eng = create_engine('cql://user:password@localhost:49154/system')\n metadata.create_all(eng)\n",
"step-5": "\"\"\"\nTests for `sqlalchemy-cql` module.\n\"\"\"\nimport pytest\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey\n\nmetadata = MetaData()\nusers = Table('users', metadata,\n Column('id', Integer, primary_key=True),\n Column('name', String),\n Column('fullname', String),\n)\n\ndef test_create_engine():\n eng = create_engine(\"cql://user:password@localhost:49154/system\")\n assert eng.execute(\"select * from system.schema_keyspaces\")\n\n\ndef test_table_names():\n eng = create_engine(\"cql://user:password@localhost:49154/system\")\n eng.table_names()\n\n\ndef test_create_all():\n eng = create_engine(\"cql://user:password@localhost:49154/system\")\n metadata.create_all(eng)",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import sys
import bisect
t = int(raw_input())
for i in xrange(1, t+1):
n, k = map(int, raw_input().strip().split())
s = [n]
for j in xrange(k):
num = s.pop()
if num % 2 != 0:
ls = num/2
lr = num/2
if ls != 0:
bisect.insort_left(s,ls)
bisect.insort_left(s,lr)
else:
ls = num/2 -1
lr = num/2
if ls != 0:
bisect.insort_left(s,ls)
bisect.insort_left(s,lr)
else:
bisect.insort_left(s,lr)
print "Case #{}: {} {}".format(i, lr, ls)
|
normal
|
{
"blob_id": "488c111c051796b481794678cb04108fcf11ac39",
"index": 5778,
"step-1": "import sys\nimport bisect\n\nt = int(raw_input())\n\nfor i in xrange(1, t+1):\n n, k = map(int, raw_input().strip().split())\n s = [n]\n for j in xrange(k):\n num = s.pop()\n if num % 2 != 0:\n ls = num/2\n lr = num/2\n if ls != 0:\n bisect.insort_left(s,ls)\n bisect.insort_left(s,lr)\n else:\n ls = num/2 -1\n lr = num/2\n if ls != 0:\n bisect.insort_left(s,ls)\n bisect.insort_left(s,lr)\n else:\n bisect.insort_left(s,lr) \n \n print \"Case #{}: {} {}\".format(i, lr, ls)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import ga.ga as ga
import os
import datetime
def ga_optimise(synth, param_count, target, output_dir, iterations = 10, pop_size = 500):
fs = ga.ga_optimise(compute_population_fitnesses = ga.compute_population_fitnesses,
target = target,
synth = synth,
param_count = param_count,
iterations = iterations,
pop_size = pop_size,
crossovers = param_count / 5,
mutation_rate = 0.5,
log = True,
data_folder = output_dir)
return fs
if __name__ == '__main__':
vst_synth = "../mda DX10.vst"
vst_param_count = 15
target_dir = "../runs/" + datetime.datetime.now().strftime("%Y%m%d%H%M%s") + "/"
os.mkdir(target_dir)
print "Generating set of target sounds from 32 presets on "+vst_synth
# first generate the target sounds
# which are the 32 presets from the synth
for i in range(0, 32):
filename = target_dir + "preset_"+str(i)+".wav"
print "Target "+str(i)+": "+filename
ga.render_preset(vst_synth, i, filename)
for i in range(0, 32):
filename = target_dir + "preset_"+str(i)+".wav"
print "Looking for target: "+filename
target_mfccs = ga.wav_to_mfcc(filename)
data_folder = target_dir + "_preset_"+str(i) + "/"
try:
os.mkdir(data_folder)
except:
print "data folder already there."
ga.string_to_file("synth: "+vst_synth + "\npreset: "+str(i), data_folder + "details.txt")
ga_optimise(vst_synth, vst_param_count, target_mfccs, data_folder)
# targets = ga.get_files_in_dir(test_dir, filter = "wav")
# for target in targets:
# print "Looking for "+target
# target_mfccs = ga.wav_to_mfcc("test.wav")
# data_folder = "data/data_"+target+"/"
# try:
# os.mkdir(data_folder)
# except:
# print "data folder already there."
# ga_optimise(vst_synth, vst_param_count, target_mfccs, data_folder)
|
normal
|
{
"blob_id": "4bc9896847e4ab92a01dfcf674362140cc31ef4f",
"index": 5587,
"step-1": "import ga.ga as ga\nimport os\nimport datetime\n\n\ndef ga_optimise(synth, param_count, target, output_dir, iterations = 10, pop_size = 500):\n\tfs = ga.ga_optimise(compute_population_fitnesses = ga.compute_population_fitnesses, \n\t\t\t\ttarget = target, \n\t\t\t\tsynth = synth, \n\t\t\t\tparam_count = param_count, \n\t\t\t\titerations = iterations, \n\t\t\t\tpop_size = pop_size, \n\t\t\t\tcrossovers = param_count / 5, \n\t\t\t\tmutation_rate = 0.5, \n\t\t\t\tlog = True, \n\t\t\t\tdata_folder = output_dir)\n\treturn fs\n\n\nif __name__ == '__main__':\n\tvst_synth = \"../mda DX10.vst\"\n\tvst_param_count = 15\n\ttarget_dir = \"../runs/\" + datetime.datetime.now().strftime(\"%Y%m%d%H%M%s\") + \"/\"\n\tos.mkdir(target_dir)\n\tprint \"Generating set of target sounds from 32 presets on \"+vst_synth\n\t# first generate the target sounds\n\t# which are the 32 presets from the synth\n\tfor i in range(0, 32):\n\t\tfilename = target_dir + \"preset_\"+str(i)+\".wav\"\n\t\tprint \"Target \"+str(i)+\": \"+filename\n\t\tga.render_preset(vst_synth, i, filename)\n\n\tfor i in range(0, 32):\n\t\tfilename = target_dir + \"preset_\"+str(i)+\".wav\"\n\t\tprint \"Looking for target: \"+filename\n\t\ttarget_mfccs = ga.wav_to_mfcc(filename)\n\t\tdata_folder = target_dir + \"_preset_\"+str(i) + \"/\"\t\t\n\t\ttry:\n\t\t\tos.mkdir(data_folder)\n\t\texcept:\n\t\t\tprint \"data folder already there.\"\n\t\tga.string_to_file(\"synth: \"+vst_synth + \"\\npreset: \"+str(i), data_folder + \"details.txt\")\n\t\tga_optimise(vst_synth, vst_param_count, target_mfccs, data_folder)\n\t\t\n\n\t# targets = ga.get_files_in_dir(test_dir, filter = \"wav\")\n\t# for target in targets:\n\t# \tprint \"Looking for \"+target\n\t# \ttarget_mfccs = ga.wav_to_mfcc(\"test.wav\")\n\t# \tdata_folder = \"data/data_\"+target+\"/\"\n\t# \ttry:\n\t# \t\tos.mkdir(data_folder)\n\t# \texcept:\n\t# \t\tprint \"data folder already there.\"\n\t# \tga_optimise(vst_synth, vst_param_count, target_mfccs, data_folder)\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# import sys
# class PriorityQueue:
# """Array-based priority queue implementation."""
#
# def __init__(self):
# """Initially empty priority queue."""
# self.queue = []
# self.min_index = None
# self.heap_size = 0
#
# def __len__(self):
# # Number of elements in the queue.
# return len(self.queue)
#
# def left(self, i):
# return 2 * i
#
# def right(self, i):
# return 2 * i + 1
#
# def parent(self, i):
# return i // 2
#
# def min_heapify(self, i):
# l = self.left(i)
# r = self.right(i)
# if l <= self.heap_size and self.queue[l-1] < self.queue[i-1]:
# least = l
# else:
# least = i
# if r <= self.heap_size and self.queue[r-1] < self.queue[i-1]:
# least = r
# if least != i:
# temp = self.queue[i-1]
# self.queue[i-1] = self.queue[least-1]
# self.queue[least-1] = temp
# self.min_heapify(least)
#
# # def build_min_heap(self):
# # self.heap_size = len(self.queue)
# # for i in range(len(self.queue) // 2, -1, -1):
# # self.min_heapify(i)
#
# def heap_increase_key(self, i, key):
# if key > self.queue[i-1]:
# raise ValueError("new key is larger than current key")
# self.queue[i-1] = key
# while i > 1 and self.queue[self.parent(i)-1] > self.queue[i-1]:
# tmp = self.queue[self.parent(i)-1]
# self.queue[self.parent(i)-1] = self.queue[i-1]
# self.queue[i-1] = tmp
# i = self.parent(i)
#
# def append(self, key):
# """Inserts an element in the priority queue."""
# if key is None:
# raise ValueError('Cannot insert None in the queue')
# self.heap_size += 1
# self.queue.insert(self.heap_size-1, sys.maxsize)
# self.heap_increase_key(self.heap_size, key)
# self.min_index = None
#
# def min(self):
# """The smallest element in the queue."""
# if self.heap_size == 0:
# return None
# return self.queue[0]
#
# def pop(self):
# """Removes the minimum element in the queue.
#
# Returns:
# The value of the removed element.
# """
# if self.heap_size == 0:
# return None
# self._find_min()
# popped_key = self.queue.pop(self.min_index)
# self.heap_size -= 1
# print(self.queue, self.heap_size)
# if self.heap_size != 0:
# self.queue[0] = self.queue[self.heap_size-1]
# self.min_heapify(0)
# self.min_index = None
# return popped_key
#
# def _find_min(self):
# # Computes the index of the minimum element in the queue.
# #
# # This method may crash if called when the queue is empty.
# if self.min_index is not None:
# return
# self.min_index = 0
class PriorityQueue:
"""Heap-based priority queue implementation."""
def __init__(self):
"""Initially empty priority queue."""
self.heap = [None]
def __len__(self):
# Number of elements in the queue.
return len(self.heap) - 1
def append(self, key):
"""Inserts an element in the priority queue."""
if key is None:
raise ValueError('Cannot insert None in the queue')
i = len(self.heap)
self.heap.append(key)
while i > 1:
parent = i // 2
if key < self.heap[parent]:
self.heap[i], self.heap[parent] = self.heap[parent], key
i = parent
else:
break
def min(self):
"""Returns the smallest element in the queue."""
return self.heap[1]
def pop(self):
"""Removes the minimum element in the queue.
Returns:
The value of the removed element.
"""
heap = self.heap
popped_key = heap[1]
if len(heap) == 2:
return heap.pop()
heap[1] = key = heap.pop()
i = 1
while True:
left = i * 2
if len(heap) <= left:
break
left_key = heap[left]
right = i * 2 + 1
right_key = right < len(heap) and heap[right]
if right_key and right_key < left_key:
child_key = right_key
child = right
else:
child_key = left_key
child = left
if key <= child_key:
break
self.heap[i], self.heap[child] = child_key, key
i = child
return popped_key
A = PriorityQueue()
A.append(1)
A.append(4)
A.append(3)
print(A.heap)
A.append(2)
print(A.heap)
A.append(0)
print(A.heap)
A.append(7)
A.append(6)
A.append(5)
# print(A.pop())
# print(A.pop())
# print(A.pop())
# print(A.pop())
# print(A.pop())
# print(A.pop())
# print(A.pop())
# print(A.pop())
|
normal
|
{
"blob_id": "f0630d248cfa575ee859e5c441deeb01b68c8150",
"index": 3741,
"step-1": "class PriorityQueue:\n <mask token>\n\n def __init__(self):\n \"\"\"Initially empty priority queue.\"\"\"\n self.heap = [None]\n\n def __len__(self):\n return len(self.heap) - 1\n\n def append(self, key):\n \"\"\"Inserts an element in the priority queue.\"\"\"\n if key is None:\n raise ValueError('Cannot insert None in the queue')\n i = len(self.heap)\n self.heap.append(key)\n while i > 1:\n parent = i // 2\n if key < self.heap[parent]:\n self.heap[i], self.heap[parent] = self.heap[parent], key\n i = parent\n else:\n break\n\n def min(self):\n \"\"\"Returns the smallest element in the queue.\"\"\"\n return self.heap[1]\n\n def pop(self):\n \"\"\"Removes the minimum element in the queue.\n\n Returns:\n The value of the removed element.\n \"\"\"\n heap = self.heap\n popped_key = heap[1]\n if len(heap) == 2:\n return heap.pop()\n heap[1] = key = heap.pop()\n i = 1\n while True:\n left = i * 2\n if len(heap) <= left:\n break\n left_key = heap[left]\n right = i * 2 + 1\n right_key = right < len(heap) and heap[right]\n if right_key and right_key < left_key:\n child_key = right_key\n child = right\n else:\n child_key = left_key\n child = left\n if key <= child_key:\n break\n self.heap[i], self.heap[child] = child_key, key\n i = child\n return popped_key\n\n\n<mask token>\n",
"step-2": "class PriorityQueue:\n \"\"\"Heap-based priority queue implementation.\"\"\"\n\n def __init__(self):\n \"\"\"Initially empty priority queue.\"\"\"\n self.heap = [None]\n\n def __len__(self):\n return len(self.heap) - 1\n\n def append(self, key):\n \"\"\"Inserts an element in the priority queue.\"\"\"\n if key is None:\n raise ValueError('Cannot insert None in the queue')\n i = len(self.heap)\n self.heap.append(key)\n while i > 1:\n parent = i // 2\n if key < self.heap[parent]:\n self.heap[i], self.heap[parent] = self.heap[parent], key\n i = parent\n else:\n break\n\n def min(self):\n \"\"\"Returns the smallest element in the queue.\"\"\"\n return self.heap[1]\n\n def pop(self):\n \"\"\"Removes the minimum element in the queue.\n\n Returns:\n The value of the removed element.\n \"\"\"\n heap = self.heap\n popped_key = heap[1]\n if len(heap) == 2:\n return heap.pop()\n heap[1] = key = heap.pop()\n i = 1\n while True:\n left = i * 2\n if len(heap) <= left:\n break\n left_key = heap[left]\n right = i * 2 + 1\n right_key = right < len(heap) and heap[right]\n if right_key and right_key < left_key:\n child_key = right_key\n child = right\n else:\n child_key = left_key\n child = left\n if key <= child_key:\n break\n self.heap[i], self.heap[child] = child_key, key\n i = child\n return popped_key\n\n\n<mask token>\n",
"step-3": "class PriorityQueue:\n \"\"\"Heap-based priority queue implementation.\"\"\"\n\n def __init__(self):\n \"\"\"Initially empty priority queue.\"\"\"\n self.heap = [None]\n\n def __len__(self):\n return len(self.heap) - 1\n\n def append(self, key):\n \"\"\"Inserts an element in the priority queue.\"\"\"\n if key is None:\n raise ValueError('Cannot insert None in the queue')\n i = len(self.heap)\n self.heap.append(key)\n while i > 1:\n parent = i // 2\n if key < self.heap[parent]:\n self.heap[i], self.heap[parent] = self.heap[parent], key\n i = parent\n else:\n break\n\n def min(self):\n \"\"\"Returns the smallest element in the queue.\"\"\"\n return self.heap[1]\n\n def pop(self):\n \"\"\"Removes the minimum element in the queue.\n\n Returns:\n The value of the removed element.\n \"\"\"\n heap = self.heap\n popped_key = heap[1]\n if len(heap) == 2:\n return heap.pop()\n heap[1] = key = heap.pop()\n i = 1\n while True:\n left = i * 2\n if len(heap) <= left:\n break\n left_key = heap[left]\n right = i * 2 + 1\n right_key = right < len(heap) and heap[right]\n if right_key and right_key < left_key:\n child_key = right_key\n child = right\n else:\n child_key = left_key\n child = left\n if key <= child_key:\n break\n self.heap[i], self.heap[child] = child_key, key\n i = child\n return popped_key\n\n\n<mask token>\nA.append(1)\nA.append(4)\nA.append(3)\nprint(A.heap)\nA.append(2)\nprint(A.heap)\nA.append(0)\nprint(A.heap)\nA.append(7)\nA.append(6)\nA.append(5)\n",
"step-4": "class PriorityQueue:\n \"\"\"Heap-based priority queue implementation.\"\"\"\n\n def __init__(self):\n \"\"\"Initially empty priority queue.\"\"\"\n self.heap = [None]\n\n def __len__(self):\n return len(self.heap) - 1\n\n def append(self, key):\n \"\"\"Inserts an element in the priority queue.\"\"\"\n if key is None:\n raise ValueError('Cannot insert None in the queue')\n i = len(self.heap)\n self.heap.append(key)\n while i > 1:\n parent = i // 2\n if key < self.heap[parent]:\n self.heap[i], self.heap[parent] = self.heap[parent], key\n i = parent\n else:\n break\n\n def min(self):\n \"\"\"Returns the smallest element in the queue.\"\"\"\n return self.heap[1]\n\n def pop(self):\n \"\"\"Removes the minimum element in the queue.\n\n Returns:\n The value of the removed element.\n \"\"\"\n heap = self.heap\n popped_key = heap[1]\n if len(heap) == 2:\n return heap.pop()\n heap[1] = key = heap.pop()\n i = 1\n while True:\n left = i * 2\n if len(heap) <= left:\n break\n left_key = heap[left]\n right = i * 2 + 1\n right_key = right < len(heap) and heap[right]\n if right_key and right_key < left_key:\n child_key = right_key\n child = right\n else:\n child_key = left_key\n child = left\n if key <= child_key:\n break\n self.heap[i], self.heap[child] = child_key, key\n i = child\n return popped_key\n\n\nA = PriorityQueue()\nA.append(1)\nA.append(4)\nA.append(3)\nprint(A.heap)\nA.append(2)\nprint(A.heap)\nA.append(0)\nprint(A.heap)\nA.append(7)\nA.append(6)\nA.append(5)\n",
"step-5": "# import sys\n# class PriorityQueue:\n# \"\"\"Array-based priority queue implementation.\"\"\"\n#\n# def __init__(self):\n# \"\"\"Initially empty priority queue.\"\"\"\n# self.queue = []\n# self.min_index = None\n# self.heap_size = 0\n#\n# def __len__(self):\n# # Number of elements in the queue.\n# return len(self.queue)\n#\n# def left(self, i):\n# return 2 * i\n#\n# def right(self, i):\n# return 2 * i + 1\n#\n# def parent(self, i):\n# return i // 2\n#\n# def min_heapify(self, i):\n# l = self.left(i)\n# r = self.right(i)\n# if l <= self.heap_size and self.queue[l-1] < self.queue[i-1]:\n# least = l\n# else:\n# least = i\n# if r <= self.heap_size and self.queue[r-1] < self.queue[i-1]:\n# least = r\n# if least != i:\n# temp = self.queue[i-1]\n# self.queue[i-1] = self.queue[least-1]\n# self.queue[least-1] = temp\n# self.min_heapify(least)\n#\n# # def build_min_heap(self):\n# # self.heap_size = len(self.queue)\n# # for i in range(len(self.queue) // 2, -1, -1):\n# # self.min_heapify(i)\n#\n# def heap_increase_key(self, i, key):\n# if key > self.queue[i-1]:\n# raise ValueError(\"new key is larger than current key\")\n# self.queue[i-1] = key\n# while i > 1 and self.queue[self.parent(i)-1] > self.queue[i-1]:\n# tmp = self.queue[self.parent(i)-1]\n# self.queue[self.parent(i)-1] = self.queue[i-1]\n# self.queue[i-1] = tmp\n# i = self.parent(i)\n#\n# def append(self, key):\n# \"\"\"Inserts an element in the priority queue.\"\"\"\n# if key is None:\n# raise ValueError('Cannot insert None in the queue')\n# self.heap_size += 1\n# self.queue.insert(self.heap_size-1, sys.maxsize)\n# self.heap_increase_key(self.heap_size, key)\n# self.min_index = None\n#\n# def min(self):\n# \"\"\"The smallest element in the queue.\"\"\"\n# if self.heap_size == 0:\n# return None\n# return self.queue[0]\n#\n# def pop(self):\n# \"\"\"Removes the minimum element in the queue.\n#\n# Returns:\n# The value of the removed element.\n# \"\"\"\n# if self.heap_size == 0:\n# return None\n# self._find_min()\n# popped_key = self.queue.pop(self.min_index)\n# self.heap_size -= 1\n# print(self.queue, self.heap_size)\n# if self.heap_size != 0:\n# self.queue[0] = self.queue[self.heap_size-1]\n# self.min_heapify(0)\n# self.min_index = None\n# return popped_key\n#\n# def _find_min(self):\n# # Computes the index of the minimum element in the queue.\n# #\n# # This method may crash if called when the queue is empty.\n# if self.min_index is not None:\n# return\n# self.min_index = 0\n\nclass PriorityQueue:\n \"\"\"Heap-based priority queue implementation.\"\"\"\n\n def __init__(self):\n \"\"\"Initially empty priority queue.\"\"\"\n self.heap = [None]\n\n def __len__(self):\n # Number of elements in the queue.\n return len(self.heap) - 1\n\n def append(self, key):\n \"\"\"Inserts an element in the priority queue.\"\"\"\n if key is None:\n raise ValueError('Cannot insert None in the queue')\n\n i = len(self.heap)\n self.heap.append(key)\n while i > 1:\n parent = i // 2\n if key < self.heap[parent]:\n self.heap[i], self.heap[parent] = self.heap[parent], key\n i = parent\n else:\n break\n\n def min(self):\n \"\"\"Returns the smallest element in the queue.\"\"\"\n return self.heap[1]\n\n def pop(self):\n \"\"\"Removes the minimum element in the queue.\n\n Returns:\n The value of the removed element.\n \"\"\"\n heap = self.heap\n popped_key = heap[1]\n if len(heap) == 2:\n return heap.pop()\n heap[1] = key = heap.pop()\n\n i = 1\n while True:\n left = i * 2\n if len(heap) <= left:\n break\n left_key = heap[left]\n right = i * 2 + 1\n right_key = right < len(heap) and heap[right]\n if right_key and right_key < left_key:\n child_key = right_key\n child = right\n else:\n child_key = left_key\n child = left\n if key <= child_key:\n break\n self.heap[i], self.heap[child] = child_key, key\n i = child\n return popped_key\n\nA = PriorityQueue()\nA.append(1)\nA.append(4)\nA.append(3)\nprint(A.heap)\nA.append(2)\nprint(A.heap)\nA.append(0)\nprint(A.heap)\nA.append(7)\nA.append(6)\nA.append(5)\n# print(A.pop())\n# print(A.pop())\n# print(A.pop())\n# print(A.pop())\n# print(A.pop())\n# print(A.pop())\n# print(A.pop())\n# print(A.pop())\n\n\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
def main():
"""Remove a category from a coco json file
"""
parser = ArgumentParser(description=
'Category Filter: Filter a List of Categories from a JSON')
parser.add_argument('json_file_path', help='JSON file path')
parser.add_argument('out_file', help='Output filename')
args = parser.parse_args()
ann_file = open(args.json_file_path)
category_names = ['sports ball', 'cell phone', 'couch', 'elephant',
'tie', 'spoon', 'skis', 'apple', 'giraffe', 'laptop',
'tennis racket', 'sink', 'dog', 'fork', 'cat', 'teddy bear',
'train', 'skateboard', 'toilet', 'sandwich', 'bed', 'keyboard',
'baseball glove', 'baseball bat', 'airplane', 'oven', 'hot dog',
'refrigerator', 'frisbee', 'mouse', 'fire hydrant', 'stop sign',
'bear', 'snowboard', 'parking meter', 'toothbrush', 'microwave',
'scissors', 'hair drier', 'toaster']
json_coco = json.load(ann_file)
new_json = deepcopy(json_coco)
for ann in json_coco['annotations']:
if return_cat_name(json_coco, ann['category_id']) in category_names:
new_json['annotations'].remove(ann)
for cat in json_coco['categories']:
if cat['name'] in category_names:
new_json['categories'].remove(cat)
output = open(args.out_file, 'w')
json.dump(new_json, output)
output.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def return_cat_name(json_coco, category):
"""Return the category name of a category ID
Arguments:
json_coco {dict} -- json dict file from coco file
category {int} -- category ID
Returns:
string -- category name
Raises:
KeyError: Category ID not found
"""
for cat in json_coco['categories']:
if cat['id'] == category:
return cat['name']
print('Categoria não encontrada: ', category)
sys.exit()
def main():
"""Remove a category from a coco json file
"""
parser = ArgumentParser(description=
'Category Filter: Filter a List of Categories from a JSON')
parser.add_argument('json_file_path', help='JSON file path')
parser.add_argument('out_file', help='Output filename')
args = parser.parse_args()
ann_file = open(args.json_file_path)
category_names = ['sports ball', 'cell phone', 'couch', 'elephant',
'tie', 'spoon', 'skis', 'apple', 'giraffe', 'laptop',
'tennis racket', 'sink', 'dog', 'fork', 'cat', 'teddy bear',
'train', 'skateboard', 'toilet', 'sandwich', 'bed', 'keyboard',
'baseball glove', 'baseball bat', 'airplane', 'oven', 'hot dog',
'refrigerator', 'frisbee', 'mouse', 'fire hydrant', 'stop sign',
'bear', 'snowboard', 'parking meter', 'toothbrush', 'microwave',
'scissors', 'hair drier', 'toaster']
json_coco = json.load(ann_file)
new_json = deepcopy(json_coco)
for ann in json_coco['annotations']:
if return_cat_name(json_coco, ann['category_id']) in category_names:
new_json['annotations'].remove(ann)
for cat in json_coco['categories']:
if cat['name'] in category_names:
new_json['categories'].remove(cat)
output = open(args.out_file, 'w')
json.dump(new_json, output)
output.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def return_cat_name(json_coco, category):
"""Return the category name of a category ID
Arguments:
json_coco {dict} -- json dict file from coco file
category {int} -- category ID
Returns:
string -- category name
Raises:
KeyError: Category ID not found
"""
for cat in json_coco['categories']:
if cat['id'] == category:
return cat['name']
print('Categoria não encontrada: ', category)
sys.exit()
def main():
"""Remove a category from a coco json file
"""
parser = ArgumentParser(description=
'Category Filter: Filter a List of Categories from a JSON')
parser.add_argument('json_file_path', help='JSON file path')
parser.add_argument('out_file', help='Output filename')
args = parser.parse_args()
ann_file = open(args.json_file_path)
category_names = ['sports ball', 'cell phone', 'couch', 'elephant',
'tie', 'spoon', 'skis', 'apple', 'giraffe', 'laptop',
'tennis racket', 'sink', 'dog', 'fork', 'cat', 'teddy bear',
'train', 'skateboard', 'toilet', 'sandwich', 'bed', 'keyboard',
'baseball glove', 'baseball bat', 'airplane', 'oven', 'hot dog',
'refrigerator', 'frisbee', 'mouse', 'fire hydrant', 'stop sign',
'bear', 'snowboard', 'parking meter', 'toothbrush', 'microwave',
'scissors', 'hair drier', 'toaster']
json_coco = json.load(ann_file)
new_json = deepcopy(json_coco)
for ann in json_coco['annotations']:
if return_cat_name(json_coco, ann['category_id']) in category_names:
new_json['annotations'].remove(ann)
for cat in json_coco['categories']:
if cat['name'] in category_names:
new_json['categories'].remove(cat)
output = open(args.out_file, 'w')
json.dump(new_json, output)
output.close()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import json
import sys
from copy import deepcopy
from argparse import ArgumentParser
def return_cat_name(json_coco, category):
"""Return the category name of a category ID
Arguments:
json_coco {dict} -- json dict file from coco file
category {int} -- category ID
Returns:
string -- category name
Raises:
KeyError: Category ID not found
"""
for cat in json_coco['categories']:
if cat['id'] == category:
return cat['name']
print('Categoria não encontrada: ', category)
sys.exit()
def main():
"""Remove a category from a coco json file
"""
parser = ArgumentParser(description=
'Category Filter: Filter a List of Categories from a JSON')
parser.add_argument('json_file_path', help='JSON file path')
parser.add_argument('out_file', help='Output filename')
args = parser.parse_args()
ann_file = open(args.json_file_path)
category_names = ['sports ball', 'cell phone', 'couch', 'elephant',
'tie', 'spoon', 'skis', 'apple', 'giraffe', 'laptop',
'tennis racket', 'sink', 'dog', 'fork', 'cat', 'teddy bear',
'train', 'skateboard', 'toilet', 'sandwich', 'bed', 'keyboard',
'baseball glove', 'baseball bat', 'airplane', 'oven', 'hot dog',
'refrigerator', 'frisbee', 'mouse', 'fire hydrant', 'stop sign',
'bear', 'snowboard', 'parking meter', 'toothbrush', 'microwave',
'scissors', 'hair drier', 'toaster']
json_coco = json.load(ann_file)
new_json = deepcopy(json_coco)
for ann in json_coco['annotations']:
if return_cat_name(json_coco, ann['category_id']) in category_names:
new_json['annotations'].remove(ann)
for cat in json_coco['categories']:
if cat['name'] in category_names:
new_json['categories'].remove(cat)
output = open(args.out_file, 'w')
json.dump(new_json, output)
output.close()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import json
import sys
from copy import deepcopy
from argparse import ArgumentParser
# TODO: Ord category's IDs after deletion
def return_cat_name(json_coco, category):
"""Return the category name of a category ID
Arguments:
json_coco {dict} -- json dict file from coco file
category {int} -- category ID
Returns:
string -- category name
Raises:
KeyError: Category ID not found
"""
for cat in json_coco['categories']:
if cat['id'] == category:
return cat['name']
print("Categoria não encontrada: ", category)
sys.exit()
def main():
"""Remove a category from a coco json file
"""
parser = ArgumentParser(
description='Category Filter: Filter a List of Categories from a JSON')
parser.add_argument('json_file_path', help='JSON file path')
parser.add_argument('out_file', help='Output filename')
args = parser.parse_args()
ann_file = open(args.json_file_path)
category_names = ["sports ball", "cell phone", "couch", "elephant", "tie", "spoon", "skis", "apple", "giraffe", "laptop", "tennis racket", "sink", "dog", "fork", "cat", "teddy bear", "train", "skateboard", "toilet", "sandwich", "bed", "keyboard", "baseball glove", "baseball bat", "airplane", "oven", "hot dog", "refrigerator", "frisbee", "mouse", "fire hydrant", "stop sign", "bear", "snowboard", "parking meter", "toothbrush", "microwave", "scissors", "hair drier", "toaster"]
json_coco = json.load(ann_file)
new_json = deepcopy(json_coco)
for ann in json_coco['annotations']:
if return_cat_name(json_coco, ann['category_id']) in category_names:
new_json['annotations'].remove(ann)
for cat in json_coco['categories']:
if cat['name'] in category_names:
new_json['categories'].remove(cat)
output = open(args.out_file, "w")
json.dump(new_json, output)
output.close()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "467327b98ab99bdad429943c701c751be4f67940",
"index": 9378,
"step-1": "<mask token>\n\n\ndef main():\n \"\"\"Remove a category from a coco json file\n \"\"\"\n parser = ArgumentParser(description=\n 'Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n ann_file = open(args.json_file_path)\n category_names = ['sports ball', 'cell phone', 'couch', 'elephant',\n 'tie', 'spoon', 'skis', 'apple', 'giraffe', 'laptop',\n 'tennis racket', 'sink', 'dog', 'fork', 'cat', 'teddy bear',\n 'train', 'skateboard', 'toilet', 'sandwich', 'bed', 'keyboard',\n 'baseball glove', 'baseball bat', 'airplane', 'oven', 'hot dog',\n 'refrigerator', 'frisbee', 'mouse', 'fire hydrant', 'stop sign',\n 'bear', 'snowboard', 'parking meter', 'toothbrush', 'microwave',\n 'scissors', 'hair drier', 'toaster']\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n output = open(args.out_file, 'w')\n json.dump(new_json, output)\n output.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef return_cat_name(json_coco, category):\n \"\"\"Return the category name of a category ID\n\n Arguments:\n json_coco {dict} -- json dict file from coco file\n category {int} -- category ID\n\n Returns:\n string -- category name\n Raises:\n KeyError: Category ID not found\n \"\"\"\n for cat in json_coco['categories']:\n if cat['id'] == category:\n return cat['name']\n print('Categoria não encontrada: ', category)\n sys.exit()\n\n\ndef main():\n \"\"\"Remove a category from a coco json file\n \"\"\"\n parser = ArgumentParser(description=\n 'Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n ann_file = open(args.json_file_path)\n category_names = ['sports ball', 'cell phone', 'couch', 'elephant',\n 'tie', 'spoon', 'skis', 'apple', 'giraffe', 'laptop',\n 'tennis racket', 'sink', 'dog', 'fork', 'cat', 'teddy bear',\n 'train', 'skateboard', 'toilet', 'sandwich', 'bed', 'keyboard',\n 'baseball glove', 'baseball bat', 'airplane', 'oven', 'hot dog',\n 'refrigerator', 'frisbee', 'mouse', 'fire hydrant', 'stop sign',\n 'bear', 'snowboard', 'parking meter', 'toothbrush', 'microwave',\n 'scissors', 'hair drier', 'toaster']\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n output = open(args.out_file, 'w')\n json.dump(new_json, output)\n output.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef return_cat_name(json_coco, category):\n \"\"\"Return the category name of a category ID\n\n Arguments:\n json_coco {dict} -- json dict file from coco file\n category {int} -- category ID\n\n Returns:\n string -- category name\n Raises:\n KeyError: Category ID not found\n \"\"\"\n for cat in json_coco['categories']:\n if cat['id'] == category:\n return cat['name']\n print('Categoria não encontrada: ', category)\n sys.exit()\n\n\ndef main():\n \"\"\"Remove a category from a coco json file\n \"\"\"\n parser = ArgumentParser(description=\n 'Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n ann_file = open(args.json_file_path)\n category_names = ['sports ball', 'cell phone', 'couch', 'elephant',\n 'tie', 'spoon', 'skis', 'apple', 'giraffe', 'laptop',\n 'tennis racket', 'sink', 'dog', 'fork', 'cat', 'teddy bear',\n 'train', 'skateboard', 'toilet', 'sandwich', 'bed', 'keyboard',\n 'baseball glove', 'baseball bat', 'airplane', 'oven', 'hot dog',\n 'refrigerator', 'frisbee', 'mouse', 'fire hydrant', 'stop sign',\n 'bear', 'snowboard', 'parking meter', 'toothbrush', 'microwave',\n 'scissors', 'hair drier', 'toaster']\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n output = open(args.out_file, 'w')\n json.dump(new_json, output)\n output.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import json\nimport sys\nfrom copy import deepcopy\nfrom argparse import ArgumentParser\n\n\ndef return_cat_name(json_coco, category):\n \"\"\"Return the category name of a category ID\n\n Arguments:\n json_coco {dict} -- json dict file from coco file\n category {int} -- category ID\n\n Returns:\n string -- category name\n Raises:\n KeyError: Category ID not found\n \"\"\"\n for cat in json_coco['categories']:\n if cat['id'] == category:\n return cat['name']\n print('Categoria não encontrada: ', category)\n sys.exit()\n\n\ndef main():\n \"\"\"Remove a category from a coco json file\n \"\"\"\n parser = ArgumentParser(description=\n 'Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n ann_file = open(args.json_file_path)\n category_names = ['sports ball', 'cell phone', 'couch', 'elephant',\n 'tie', 'spoon', 'skis', 'apple', 'giraffe', 'laptop',\n 'tennis racket', 'sink', 'dog', 'fork', 'cat', 'teddy bear',\n 'train', 'skateboard', 'toilet', 'sandwich', 'bed', 'keyboard',\n 'baseball glove', 'baseball bat', 'airplane', 'oven', 'hot dog',\n 'refrigerator', 'frisbee', 'mouse', 'fire hydrant', 'stop sign',\n 'bear', 'snowboard', 'parking meter', 'toothbrush', 'microwave',\n 'scissors', 'hair drier', 'toaster']\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n output = open(args.out_file, 'w')\n json.dump(new_json, output)\n output.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import json\nimport sys\nfrom copy import deepcopy\nfrom argparse import ArgumentParser\n\n# TODO: Ord category's IDs after deletion\n\n\ndef return_cat_name(json_coco, category):\n \"\"\"Return the category name of a category ID\n\n Arguments:\n json_coco {dict} -- json dict file from coco file\n category {int} -- category ID\n\n Returns:\n string -- category name\n Raises:\n KeyError: Category ID not found\n \"\"\"\n for cat in json_coco['categories']:\n if cat['id'] == category:\n return cat['name']\n print(\"Categoria não encontrada: \", category)\n sys.exit()\n\n\ndef main():\n \"\"\"Remove a category from a coco json file\n \"\"\"\n parser = ArgumentParser(\n description='Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n\n ann_file = open(args.json_file_path)\n category_names = [\"sports ball\", \"cell phone\", \"couch\", \"elephant\", \"tie\", \"spoon\", \"skis\", \"apple\", \"giraffe\", \"laptop\", \"tennis racket\", \"sink\", \"dog\", \"fork\", \"cat\", \"teddy bear\", \"train\", \"skateboard\", \"toilet\", \"sandwich\", \"bed\", \"keyboard\", \"baseball glove\", \"baseball bat\", \"airplane\", \"oven\", \"hot dog\", \"refrigerator\", \"frisbee\", \"mouse\", \"fire hydrant\", \"stop sign\", \"bear\", \"snowboard\", \"parking meter\", \"toothbrush\", \"microwave\", \"scissors\", \"hair drier\", \"toaster\"]\n\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n\n output = open(args.out_file, \"w\")\n json.dump(new_json, output)\n output.close()\n\n\nif __name__ == \"__main__\":\n main()\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def move_directory(input_directory_path, output_directory_path):
print('moving %s to %s' % (input_directory_path, output_directory_path))
if not dry_run:
shutil.move(input_directory_path, output_directory_path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def move_directory(input_directory_path, output_directory_path):
print('moving %s to %s' % (input_directory_path, output_directory_path))
if not dry_run:
shutil.move(input_directory_path, output_directory_path)
print('Root dir is %s' % root_path)
for level1 in os.listdir(root_path):
level1_path = os.path.join(root_path, level1)
if os.path.isdir(level1_path):
print('> %s' % level1)
for level2 in os.listdir(level1_path):
level2_path = os.path.join(level1_path, level2)
if os.path.isdir(level2_path):
print('>> %s' % level2)
move_directory(level2_path, root_path)
print('Deleting %s' % level1_path)
if not dry_run:
shutil.rmtree(level1_path)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root_path = 'C:/Users/koyou/Desktop/test'
dry_run = False
def move_directory(input_directory_path, output_directory_path):
print('moving %s to %s' % (input_directory_path, output_directory_path))
if not dry_run:
shutil.move(input_directory_path, output_directory_path)
print('Root dir is %s' % root_path)
for level1 in os.listdir(root_path):
level1_path = os.path.join(root_path, level1)
if os.path.isdir(level1_path):
print('> %s' % level1)
for level2 in os.listdir(level1_path):
level2_path = os.path.join(level1_path, level2)
if os.path.isdir(level2_path):
print('>> %s' % level2)
move_directory(level2_path, root_path)
print('Deleting %s' % level1_path)
if not dry_run:
shutil.rmtree(level1_path)
<|reserved_special_token_1|>
import os
import shutil
# root_path = '../from_1691'
root_path = 'C:/Users/koyou/Desktop/test'
# 실수할 수도 있으므로 dry_run 을 설정해서 로그만 찍을 것인지
# 실제 작동도 진행할 것인지 결정한다.
# dry_run = True
dry_run = False
def move_directory(input_directory_path, output_directory_path):
print("moving %s to %s" % (input_directory_path, output_directory_path))
if not dry_run:
shutil.move(input_directory_path, output_directory_path)
#
# main
#
print("Root dir is %s" % root_path)
for level1 in os.listdir(root_path): # level1 == test1
level1_path = os.path.join(root_path, level1)
if os.path.isdir(level1_path):
# 디렉토리 이름을 출력해줘야 진행상황 알 수 있음
print("> %s" % level1)
for level2 in os.listdir(level1_path): # level2 == test1-1
level2_path = os.path.join(level1_path, level2)
if os.path.isdir(level2_path):
# level2 이름 출력
print(">> %s" % level2)
move_directory(level2_path, root_path)
# 2. deleting dir
print("Deleting %s" % level1_path)
if not dry_run:
shutil.rmtree(level1_path)
|
flexible
|
{
"blob_id": "7de19a85a6a05bd2972b11571d5f05219c6beb1a",
"index": 916,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef move_directory(input_directory_path, output_directory_path):\n print('moving %s to %s' % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef move_directory(input_directory_path, output_directory_path):\n print('moving %s to %s' % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\nprint('Root dir is %s' % root_path)\nfor level1 in os.listdir(root_path):\n level1_path = os.path.join(root_path, level1)\n if os.path.isdir(level1_path):\n print('> %s' % level1)\n for level2 in os.listdir(level1_path):\n level2_path = os.path.join(level1_path, level2)\n if os.path.isdir(level2_path):\n print('>> %s' % level2)\n move_directory(level2_path, root_path)\n print('Deleting %s' % level1_path)\n if not dry_run:\n shutil.rmtree(level1_path)\n",
"step-4": "<mask token>\nroot_path = 'C:/Users/koyou/Desktop/test'\ndry_run = False\n\n\ndef move_directory(input_directory_path, output_directory_path):\n print('moving %s to %s' % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\nprint('Root dir is %s' % root_path)\nfor level1 in os.listdir(root_path):\n level1_path = os.path.join(root_path, level1)\n if os.path.isdir(level1_path):\n print('> %s' % level1)\n for level2 in os.listdir(level1_path):\n level2_path = os.path.join(level1_path, level2)\n if os.path.isdir(level2_path):\n print('>> %s' % level2)\n move_directory(level2_path, root_path)\n print('Deleting %s' % level1_path)\n if not dry_run:\n shutil.rmtree(level1_path)\n",
"step-5": "import os\nimport shutil\n\n# root_path = '../from_1691'\nroot_path = 'C:/Users/koyou/Desktop/test'\n\n# 실수할 수도 있으므로 dry_run 을 설정해서 로그만 찍을 것인지\n# 실제 작동도 진행할 것인지 결정한다.\n# dry_run = True\ndry_run = False\n\ndef move_directory(input_directory_path, output_directory_path):\n print(\"moving %s to %s\" % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\n#\n# main\n#\nprint(\"Root dir is %s\" % root_path)\n\nfor level1 in os.listdir(root_path): # level1 == test1\n level1_path = os.path.join(root_path, level1)\n if os.path.isdir(level1_path):\n # 디렉토리 이름을 출력해줘야 진행상황 알 수 있음\n print(\"> %s\" % level1)\n\n for level2 in os.listdir(level1_path): # level2 == test1-1\n level2_path = os.path.join(level1_path, level2)\n if os.path.isdir(level2_path):\n # level2 이름 출력\n print(\">> %s\" % level2)\n\n move_directory(level2_path, root_path)\n\n # 2. deleting dir\n print(\"Deleting %s\" % level1_path)\n if not dry_run:\n shutil.rmtree(level1_path)\n",
"step-ids": [
0,
1,
2,
3,
5
]
}
|
[
0,
1,
2,
3,
5
] |
""" Classes and functions for generalized q-sampling """
import numpy as np
from dipy.reconst.odf import OdfModel, OdfFit, gfa
from dipy.reconst.cache import Cache
import warnings
from dipy.reconst.multi_voxel import multi_voxel_fit
from dipy.reconst.recspeed import local_maxima, remove_similar_vertices
class GeneralizedQSamplingModel(OdfModel, Cache):
def __init__(self,
gtab,
method='gqi2',
sampling_length=1.2,
normalize_peaks=False):
r""" Generalized Q-Sampling Imaging [1]_
This model has the same assumptions as the DSI method i.e. Cartesian
grid sampling in q-space and fast gradient switching.
Implements equations 2.14 from [2]_ for standard GQI and equation 2.16
from [2]_ for GQI2. You can think of GQI2 as an analytical solution of
the DSI ODF.
Parameters
----------
gtab : object,
GradientTable
method : str,
'standard' or 'gqi2'
sampling_length : float,
diffusion sampling length (lambda in eq. 2.14 and 2.16)
References
----------
.. [1] Yeh F-C et al., "Generalized Q-Sampling Imaging", IEEE TMI, 2010
.. [2] Garyfallidis E, "Towards an accurate brain tractography", PhD
thesis, University of Cambridge, 2012.
Notes
-----
As of version 0.9, range of the sampling length in GQI2 has changed
to match the same scale used in the 'standard' method [1]_. This
means that the value of `sampling_length` should be approximately
1 - 1.3 (see [1]_, pg. 1628).
Examples
--------
Here we create an example where we provide the data, a gradient table
and a reconstruction sphere and calculate the ODF for the first
voxel in the data.
>>> from dipy.data import dsi_voxels
>>> data, gtab = dsi_voxels()
>>> from dipy.core.subdivide_octahedron import create_unit_sphere
>>> sphere = create_unit_sphere(5)
>>> from dipy.reconst.gqi import GeneralizedQSamplingModel
>>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)
>>> voxel_signal = data[0, 0, 0]
>>> odf = gq.fit(voxel_signal).odf(sphere)
See Also
--------
dipy.reconst.dsi.DiffusionSpectrumModel
"""
OdfModel.__init__(self, gtab)
self.method = method
self.Lambda = sampling_length
self.normalize_peaks = normalize_peaks
# 0.01506 = 6*D where D is the free water diffusion coefficient
# l_values sqrt(6 D tau) D free water diffusion coefficient and
# tau included in the b-value
scaling = np.sqrt(self.gtab.bvals * 0.01506)
tmp = np.tile(scaling, (3, 1))
gradsT = self.gtab.bvecs.T
b_vector = gradsT * tmp # element-wise product
self.b_vector = b_vector.T
@multi_voxel_fit
def fit(self, data):
return GeneralizedQSamplingFit(self, data)
class GeneralizedQSamplingFit(OdfFit):
def __init__(self, model, data):
""" Calculates PDF and ODF for a single voxel
Parameters
----------
model : object,
DiffusionSpectrumModel
data : 1d ndarray,
signal values
"""
OdfFit.__init__(self, model, data)
self._gfa = None
self.npeaks = 5
self._peak_values = None
self._peak_indices = None
self._qa = None
def odf(self, sphere):
""" Calculates the discrete ODF for a given discrete sphere.
"""
self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)
if self.gqi_vector is None:
if self.model.method == 'gqi2':
H = squared_radial_component
# print self.gqi_vector.shape
self.gqi_vector = np.real(H(np.dot(
self.model.b_vector, sphere.vertices.T) *
self.model.Lambda))
if self.model.method == 'standard':
self.gqi_vector = np.real(np.sinc(np.dot(
self.model.b_vector, sphere.vertices.T) *
self.model.Lambda / np.pi))
self.model.cache_set('gqi_vector', sphere, self.gqi_vector)
return np.dot(self.data, self.gqi_vector)
def normalize_qa(qa, max_qa=None):
""" Normalize quantitative anisotropy.
Used mostly with GQI rather than GQI2.
Parameters
----------
qa : array, shape (X, Y, Z, N)
where N is the maximum number of peaks stored
max_qa : float,
maximum qa value. Usually found in the CSF (corticospinal fluid).
Returns
-------
nqa : array, shape (x, Y, Z, N)
normalized quantitative anisotropy
Notes
-----
Normalized quantitative anisotropy has the very useful property
to be very small near gray matter and background areas. Therefore,
it can be used to mask out white matter areas.
"""
if max_qa is None:
return qa / qa.max()
return qa / max_qa
def squared_radial_component(x, tol=0.01):
""" Part of the GQI2 integral
Eq.8 in the referenced paper by Yeh et al. 2010
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = (2 * x * np.cos(x) + (x * x - 2) * np.sin(x)) / (x ** 3)
x_near_zero = (x < tol) & (x > -tol)
return np.where(x_near_zero, 1./3, result)
def npa(self, odf, width=5):
""" non-parametric anisotropy
Nimmo-Smith et al. ISMRM 2011
"""
# odf = self.odf(s)
t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)
psi0 = t0[1] ** 2
psi1 = t1[1] ** 2
psi2 = t2[1] ** 2
npa = (np.sqrt(
(psi0 - psi1) ** 2 +
(psi1 - psi2) ** 2 +
(psi2 - psi0) ** 2) /
np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2)))
# print 'tom >>>> ',t0,t1,t2,npa
return t0, t1, t2, npa
def equatorial_zone_vertices(vertices, pole, width=5):
"""
finds the 'vertices' in the equatorial zone conjugate
to 'pole' with width half 'width' degrees
"""
return [i
for i, v in enumerate(vertices)
if np.abs(np.dot(v, pole)) < np.abs(np.sin(np.pi * width / 180))]
def polar_zone_vertices(vertices, pole, width=5):
"""
finds the 'vertices' in the equatorial band around
the 'pole' of radius 'width' degrees
"""
return [i
for i, v in enumerate(vertices)
if np.abs(np.dot(v, pole)) > np.abs(np.cos(np.pi * width / 180))]
def upper_hemi_map(v):
"""
maps a 3-vector into the z-upper hemisphere
"""
return np.sign(v[2])*v
def equatorial_maximum(vertices, odf, pole, width):
eqvert = equatorial_zone_vertices(vertices, pole, width)
# need to test for whether eqvert is empty or not
if len(eqvert) == 0:
print('empty equatorial band at %s pole with width %f' %
(np.array_str(pole), width))
return None, None
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def patch_vertices(vertices, pole, width):
"""
find 'vertices' within the cone of 'width' degrees around 'pole'
"""
return [i
for i, v in enumerate(vertices)
if np.abs(np.dot(v, pole)) > np.abs(np.cos(np.pi * width / 180))]
def patch_maximum(vertices, odf, pole, width):
eqvert = patch_vertices(vertices, pole, width)
# need to test for whether eqvert is empty or not
if len(eqvert) == 0:
print('empty cone around pole %s with with width %f' %
(np.array_str(pole), width))
return np.Null, np.Null
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def odf_sum(odf):
return np.sum(odf)
def patch_sum(vertices, odf, pole, width):
eqvert = patch_vertices(vertices, pole, width)
# need to test for whether eqvert is empty or not
if len(eqvert) == 0:
print('empty cone around pole %s with with width %f' %
(np.array_str(pole), width))
return np.Null
return np.sum([odf[i] for i in eqvert])
def triple_odf_maxima(vertices, odf, width):
indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])
odfmax1 = odf[indmax1]
pole = vertices[indmax1]
eqvert = equatorial_zone_vertices(vertices, pole, width)
indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)
indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p]))
for p in eqvert])]
odfmax3 = odf[indmax3]
"""
cross12 = np.cross(vertices[indmax1],vertices[indmax2])
cross12 = cross12/np.sqrt(np.sum(cross12**2))
indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)
"""
return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]
|
normal
|
{
"blob_id": "2f193cb1eaf7b5e99d20025716a248144af90b92",
"index": 1925,
"step-1": "<mask token>\n\n\nclass GeneralizedQSamplingModel(OdfModel, Cache):\n\n def __init__(self, gtab, method='gqi2', sampling_length=1.2,\n normalize_peaks=False):\n \"\"\" Generalized Q-Sampling Imaging [1]_\n\n This model has the same assumptions as the DSI method i.e. Cartesian\n grid sampling in q-space and fast gradient switching.\n\n Implements equations 2.14 from [2]_ for standard GQI and equation 2.16\n from [2]_ for GQI2. You can think of GQI2 as an analytical solution of\n the DSI ODF.\n\n Parameters\n ----------\n gtab : object,\n GradientTable\n method : str,\n 'standard' or 'gqi2'\n sampling_length : float,\n diffusion sampling length (lambda in eq. 2.14 and 2.16)\n\n References\n ----------\n .. [1] Yeh F-C et al., \"Generalized Q-Sampling Imaging\", IEEE TMI, 2010\n\n .. [2] Garyfallidis E, \"Towards an accurate brain tractography\", PhD\n thesis, University of Cambridge, 2012.\n\n Notes\n -----\n As of version 0.9, range of the sampling length in GQI2 has changed\n to match the same scale used in the 'standard' method [1]_. This\n means that the value of `sampling_length` should be approximately\n 1 - 1.3 (see [1]_, pg. 1628).\n\n Examples\n --------\n Here we create an example where we provide the data, a gradient table\n and a reconstruction sphere and calculate the ODF for the first\n voxel in the data.\n\n >>> from dipy.data import dsi_voxels\n >>> data, gtab = dsi_voxels()\n >>> from dipy.core.subdivide_octahedron import create_unit_sphere\n >>> sphere = create_unit_sphere(5)\n >>> from dipy.reconst.gqi import GeneralizedQSamplingModel\n >>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)\n >>> voxel_signal = data[0, 0, 0]\n >>> odf = gq.fit(voxel_signal).odf(sphere)\n\n See Also\n --------\n dipy.reconst.dsi.DiffusionSpectrumModel\n\n \"\"\"\n OdfModel.__init__(self, gtab)\n self.method = method\n self.Lambda = sampling_length\n self.normalize_peaks = normalize_peaks\n scaling = np.sqrt(self.gtab.bvals * 0.01506)\n tmp = np.tile(scaling, (3, 1))\n gradsT = self.gtab.bvecs.T\n b_vector = gradsT * tmp\n self.b_vector = b_vector.T\n\n @multi_voxel_fit\n def fit(self, data):\n return GeneralizedQSamplingFit(self, data)\n\n\nclass GeneralizedQSamplingFit(OdfFit):\n\n def __init__(self, model, data):\n \"\"\" Calculates PDF and ODF for a single voxel\n\n Parameters\n ----------\n model : object,\n DiffusionSpectrumModel\n data : 1d ndarray,\n signal values\n\n \"\"\"\n OdfFit.__init__(self, model, data)\n self._gfa = None\n self.npeaks = 5\n self._peak_values = None\n self._peak_indices = None\n self._qa = None\n\n def odf(self, sphere):\n \"\"\" Calculates the discrete ODF for a given discrete sphere.\n \"\"\"\n self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)\n if self.gqi_vector is None:\n if self.model.method == 'gqi2':\n H = squared_radial_component\n self.gqi_vector = np.real(H(np.dot(self.model.b_vector,\n sphere.vertices.T) * self.model.Lambda))\n if self.model.method == 'standard':\n self.gqi_vector = np.real(np.sinc(np.dot(self.model.\n b_vector, sphere.vertices.T) * self.model.Lambda / np.pi))\n self.model.cache_set('gqi_vector', sphere, self.gqi_vector)\n return np.dot(self.data, self.gqi_vector)\n\n\n<mask token>\n\n\ndef npa(self, odf, width=5):\n \"\"\" non-parametric anisotropy\n\n Nimmo-Smith et al. ISMRM 2011\n \"\"\"\n t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)\n psi0 = t0[1] ** 2\n psi1 = t1[1] ** 2\n psi2 = t2[1] ** 2\n npa = np.sqrt((psi0 - psi1) ** 2 + (psi1 - psi2) ** 2 + (psi2 - psi0) ** 2\n ) / np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2))\n return t0, t1, t2, npa\n\n\n<mask token>\n\n\ndef polar_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial band around\n the 'pole' of radius 'width' degrees\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef upper_hemi_map(v):\n \"\"\"\n maps a 3-vector into the z-upper hemisphere\n \"\"\"\n return np.sign(v[2]) * v\n\n\ndef equatorial_maximum(vertices, odf, pole, width):\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty equatorial band at %s pole with width %f' % (np.\n array_str(pole), width))\n return None, None\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef patch_vertices(vertices, pole, width):\n \"\"\"\n find 'vertices' within the cone of 'width' degrees around 'pole'\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef patch_maximum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null, np.Null\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef odf_sum(odf):\n return np.sum(odf)\n\n\n<mask token>\n\n\ndef triple_odf_maxima(vertices, odf, width):\n indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])\n odfmax1 = odf[indmax1]\n pole = vertices[indmax1]\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)\n indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p\n ])) for p in eqvert])]\n odfmax3 = odf[indmax3]\n \"\"\"\n cross12 = np.cross(vertices[indmax1],vertices[indmax2])\n cross12 = cross12/np.sqrt(np.sum(cross12**2))\n indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)\n \"\"\"\n return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]\n",
"step-2": "<mask token>\n\n\nclass GeneralizedQSamplingModel(OdfModel, Cache):\n\n def __init__(self, gtab, method='gqi2', sampling_length=1.2,\n normalize_peaks=False):\n \"\"\" Generalized Q-Sampling Imaging [1]_\n\n This model has the same assumptions as the DSI method i.e. Cartesian\n grid sampling in q-space and fast gradient switching.\n\n Implements equations 2.14 from [2]_ for standard GQI and equation 2.16\n from [2]_ for GQI2. You can think of GQI2 as an analytical solution of\n the DSI ODF.\n\n Parameters\n ----------\n gtab : object,\n GradientTable\n method : str,\n 'standard' or 'gqi2'\n sampling_length : float,\n diffusion sampling length (lambda in eq. 2.14 and 2.16)\n\n References\n ----------\n .. [1] Yeh F-C et al., \"Generalized Q-Sampling Imaging\", IEEE TMI, 2010\n\n .. [2] Garyfallidis E, \"Towards an accurate brain tractography\", PhD\n thesis, University of Cambridge, 2012.\n\n Notes\n -----\n As of version 0.9, range of the sampling length in GQI2 has changed\n to match the same scale used in the 'standard' method [1]_. This\n means that the value of `sampling_length` should be approximately\n 1 - 1.3 (see [1]_, pg. 1628).\n\n Examples\n --------\n Here we create an example where we provide the data, a gradient table\n and a reconstruction sphere and calculate the ODF for the first\n voxel in the data.\n\n >>> from dipy.data import dsi_voxels\n >>> data, gtab = dsi_voxels()\n >>> from dipy.core.subdivide_octahedron import create_unit_sphere\n >>> sphere = create_unit_sphere(5)\n >>> from dipy.reconst.gqi import GeneralizedQSamplingModel\n >>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)\n >>> voxel_signal = data[0, 0, 0]\n >>> odf = gq.fit(voxel_signal).odf(sphere)\n\n See Also\n --------\n dipy.reconst.dsi.DiffusionSpectrumModel\n\n \"\"\"\n OdfModel.__init__(self, gtab)\n self.method = method\n self.Lambda = sampling_length\n self.normalize_peaks = normalize_peaks\n scaling = np.sqrt(self.gtab.bvals * 0.01506)\n tmp = np.tile(scaling, (3, 1))\n gradsT = self.gtab.bvecs.T\n b_vector = gradsT * tmp\n self.b_vector = b_vector.T\n\n @multi_voxel_fit\n def fit(self, data):\n return GeneralizedQSamplingFit(self, data)\n\n\nclass GeneralizedQSamplingFit(OdfFit):\n\n def __init__(self, model, data):\n \"\"\" Calculates PDF and ODF for a single voxel\n\n Parameters\n ----------\n model : object,\n DiffusionSpectrumModel\n data : 1d ndarray,\n signal values\n\n \"\"\"\n OdfFit.__init__(self, model, data)\n self._gfa = None\n self.npeaks = 5\n self._peak_values = None\n self._peak_indices = None\n self._qa = None\n\n def odf(self, sphere):\n \"\"\" Calculates the discrete ODF for a given discrete sphere.\n \"\"\"\n self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)\n if self.gqi_vector is None:\n if self.model.method == 'gqi2':\n H = squared_radial_component\n self.gqi_vector = np.real(H(np.dot(self.model.b_vector,\n sphere.vertices.T) * self.model.Lambda))\n if self.model.method == 'standard':\n self.gqi_vector = np.real(np.sinc(np.dot(self.model.\n b_vector, sphere.vertices.T) * self.model.Lambda / np.pi))\n self.model.cache_set('gqi_vector', sphere, self.gqi_vector)\n return np.dot(self.data, self.gqi_vector)\n\n\ndef normalize_qa(qa, max_qa=None):\n \"\"\" Normalize quantitative anisotropy.\n\n Used mostly with GQI rather than GQI2.\n\n Parameters\n ----------\n qa : array, shape (X, Y, Z, N)\n where N is the maximum number of peaks stored\n max_qa : float,\n maximum qa value. Usually found in the CSF (corticospinal fluid).\n\n Returns\n -------\n nqa : array, shape (x, Y, Z, N)\n normalized quantitative anisotropy\n\n Notes\n -----\n Normalized quantitative anisotropy has the very useful property\n to be very small near gray matter and background areas. Therefore,\n it can be used to mask out white matter areas.\n\n \"\"\"\n if max_qa is None:\n return qa / qa.max()\n return qa / max_qa\n\n\n<mask token>\n\n\ndef npa(self, odf, width=5):\n \"\"\" non-parametric anisotropy\n\n Nimmo-Smith et al. ISMRM 2011\n \"\"\"\n t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)\n psi0 = t0[1] ** 2\n psi1 = t1[1] ** 2\n psi2 = t2[1] ** 2\n npa = np.sqrt((psi0 - psi1) ** 2 + (psi1 - psi2) ** 2 + (psi2 - psi0) ** 2\n ) / np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2))\n return t0, t1, t2, npa\n\n\n<mask token>\n\n\ndef polar_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial band around\n the 'pole' of radius 'width' degrees\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef upper_hemi_map(v):\n \"\"\"\n maps a 3-vector into the z-upper hemisphere\n \"\"\"\n return np.sign(v[2]) * v\n\n\ndef equatorial_maximum(vertices, odf, pole, width):\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty equatorial band at %s pole with width %f' % (np.\n array_str(pole), width))\n return None, None\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef patch_vertices(vertices, pole, width):\n \"\"\"\n find 'vertices' within the cone of 'width' degrees around 'pole'\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef patch_maximum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null, np.Null\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef odf_sum(odf):\n return np.sum(odf)\n\n\ndef patch_sum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null\n return np.sum([odf[i] for i in eqvert])\n\n\ndef triple_odf_maxima(vertices, odf, width):\n indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])\n odfmax1 = odf[indmax1]\n pole = vertices[indmax1]\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)\n indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p\n ])) for p in eqvert])]\n odfmax3 = odf[indmax3]\n \"\"\"\n cross12 = np.cross(vertices[indmax1],vertices[indmax2])\n cross12 = cross12/np.sqrt(np.sum(cross12**2))\n indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)\n \"\"\"\n return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]\n",
"step-3": "<mask token>\n\n\nclass GeneralizedQSamplingModel(OdfModel, Cache):\n\n def __init__(self, gtab, method='gqi2', sampling_length=1.2,\n normalize_peaks=False):\n \"\"\" Generalized Q-Sampling Imaging [1]_\n\n This model has the same assumptions as the DSI method i.e. Cartesian\n grid sampling in q-space and fast gradient switching.\n\n Implements equations 2.14 from [2]_ for standard GQI and equation 2.16\n from [2]_ for GQI2. You can think of GQI2 as an analytical solution of\n the DSI ODF.\n\n Parameters\n ----------\n gtab : object,\n GradientTable\n method : str,\n 'standard' or 'gqi2'\n sampling_length : float,\n diffusion sampling length (lambda in eq. 2.14 and 2.16)\n\n References\n ----------\n .. [1] Yeh F-C et al., \"Generalized Q-Sampling Imaging\", IEEE TMI, 2010\n\n .. [2] Garyfallidis E, \"Towards an accurate brain tractography\", PhD\n thesis, University of Cambridge, 2012.\n\n Notes\n -----\n As of version 0.9, range of the sampling length in GQI2 has changed\n to match the same scale used in the 'standard' method [1]_. This\n means that the value of `sampling_length` should be approximately\n 1 - 1.3 (see [1]_, pg. 1628).\n\n Examples\n --------\n Here we create an example where we provide the data, a gradient table\n and a reconstruction sphere and calculate the ODF for the first\n voxel in the data.\n\n >>> from dipy.data import dsi_voxels\n >>> data, gtab = dsi_voxels()\n >>> from dipy.core.subdivide_octahedron import create_unit_sphere\n >>> sphere = create_unit_sphere(5)\n >>> from dipy.reconst.gqi import GeneralizedQSamplingModel\n >>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)\n >>> voxel_signal = data[0, 0, 0]\n >>> odf = gq.fit(voxel_signal).odf(sphere)\n\n See Also\n --------\n dipy.reconst.dsi.DiffusionSpectrumModel\n\n \"\"\"\n OdfModel.__init__(self, gtab)\n self.method = method\n self.Lambda = sampling_length\n self.normalize_peaks = normalize_peaks\n scaling = np.sqrt(self.gtab.bvals * 0.01506)\n tmp = np.tile(scaling, (3, 1))\n gradsT = self.gtab.bvecs.T\n b_vector = gradsT * tmp\n self.b_vector = b_vector.T\n\n @multi_voxel_fit\n def fit(self, data):\n return GeneralizedQSamplingFit(self, data)\n\n\nclass GeneralizedQSamplingFit(OdfFit):\n\n def __init__(self, model, data):\n \"\"\" Calculates PDF and ODF for a single voxel\n\n Parameters\n ----------\n model : object,\n DiffusionSpectrumModel\n data : 1d ndarray,\n signal values\n\n \"\"\"\n OdfFit.__init__(self, model, data)\n self._gfa = None\n self.npeaks = 5\n self._peak_values = None\n self._peak_indices = None\n self._qa = None\n\n def odf(self, sphere):\n \"\"\" Calculates the discrete ODF for a given discrete sphere.\n \"\"\"\n self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)\n if self.gqi_vector is None:\n if self.model.method == 'gqi2':\n H = squared_radial_component\n self.gqi_vector = np.real(H(np.dot(self.model.b_vector,\n sphere.vertices.T) * self.model.Lambda))\n if self.model.method == 'standard':\n self.gqi_vector = np.real(np.sinc(np.dot(self.model.\n b_vector, sphere.vertices.T) * self.model.Lambda / np.pi))\n self.model.cache_set('gqi_vector', sphere, self.gqi_vector)\n return np.dot(self.data, self.gqi_vector)\n\n\ndef normalize_qa(qa, max_qa=None):\n \"\"\" Normalize quantitative anisotropy.\n\n Used mostly with GQI rather than GQI2.\n\n Parameters\n ----------\n qa : array, shape (X, Y, Z, N)\n where N is the maximum number of peaks stored\n max_qa : float,\n maximum qa value. Usually found in the CSF (corticospinal fluid).\n\n Returns\n -------\n nqa : array, shape (x, Y, Z, N)\n normalized quantitative anisotropy\n\n Notes\n -----\n Normalized quantitative anisotropy has the very useful property\n to be very small near gray matter and background areas. Therefore,\n it can be used to mask out white matter areas.\n\n \"\"\"\n if max_qa is None:\n return qa / qa.max()\n return qa / max_qa\n\n\ndef squared_radial_component(x, tol=0.01):\n \"\"\" Part of the GQI2 integral\n\n Eq.8 in the referenced paper by Yeh et al. 2010\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n result = (2 * x * np.cos(x) + (x * x - 2) * np.sin(x)) / x ** 3\n x_near_zero = (x < tol) & (x > -tol)\n return np.where(x_near_zero, 1.0 / 3, result)\n\n\ndef npa(self, odf, width=5):\n \"\"\" non-parametric anisotropy\n\n Nimmo-Smith et al. ISMRM 2011\n \"\"\"\n t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)\n psi0 = t0[1] ** 2\n psi1 = t1[1] ** 2\n psi2 = t2[1] ** 2\n npa = np.sqrt((psi0 - psi1) ** 2 + (psi1 - psi2) ** 2 + (psi2 - psi0) ** 2\n ) / np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2))\n return t0, t1, t2, npa\n\n\n<mask token>\n\n\ndef polar_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial band around\n the 'pole' of radius 'width' degrees\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef upper_hemi_map(v):\n \"\"\"\n maps a 3-vector into the z-upper hemisphere\n \"\"\"\n return np.sign(v[2]) * v\n\n\ndef equatorial_maximum(vertices, odf, pole, width):\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty equatorial band at %s pole with width %f' % (np.\n array_str(pole), width))\n return None, None\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef patch_vertices(vertices, pole, width):\n \"\"\"\n find 'vertices' within the cone of 'width' degrees around 'pole'\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef patch_maximum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null, np.Null\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef odf_sum(odf):\n return np.sum(odf)\n\n\ndef patch_sum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null\n return np.sum([odf[i] for i in eqvert])\n\n\ndef triple_odf_maxima(vertices, odf, width):\n indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])\n odfmax1 = odf[indmax1]\n pole = vertices[indmax1]\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)\n indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p\n ])) for p in eqvert])]\n odfmax3 = odf[indmax3]\n \"\"\"\n cross12 = np.cross(vertices[indmax1],vertices[indmax2])\n cross12 = cross12/np.sqrt(np.sum(cross12**2))\n indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)\n \"\"\"\n return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]\n",
"step-4": "<mask token>\nimport numpy as np\nfrom dipy.reconst.odf import OdfModel, OdfFit, gfa\nfrom dipy.reconst.cache import Cache\nimport warnings\nfrom dipy.reconst.multi_voxel import multi_voxel_fit\nfrom dipy.reconst.recspeed import local_maxima, remove_similar_vertices\n\n\nclass GeneralizedQSamplingModel(OdfModel, Cache):\n\n def __init__(self, gtab, method='gqi2', sampling_length=1.2,\n normalize_peaks=False):\n \"\"\" Generalized Q-Sampling Imaging [1]_\n\n This model has the same assumptions as the DSI method i.e. Cartesian\n grid sampling in q-space and fast gradient switching.\n\n Implements equations 2.14 from [2]_ for standard GQI and equation 2.16\n from [2]_ for GQI2. You can think of GQI2 as an analytical solution of\n the DSI ODF.\n\n Parameters\n ----------\n gtab : object,\n GradientTable\n method : str,\n 'standard' or 'gqi2'\n sampling_length : float,\n diffusion sampling length (lambda in eq. 2.14 and 2.16)\n\n References\n ----------\n .. [1] Yeh F-C et al., \"Generalized Q-Sampling Imaging\", IEEE TMI, 2010\n\n .. [2] Garyfallidis E, \"Towards an accurate brain tractography\", PhD\n thesis, University of Cambridge, 2012.\n\n Notes\n -----\n As of version 0.9, range of the sampling length in GQI2 has changed\n to match the same scale used in the 'standard' method [1]_. This\n means that the value of `sampling_length` should be approximately\n 1 - 1.3 (see [1]_, pg. 1628).\n\n Examples\n --------\n Here we create an example where we provide the data, a gradient table\n and a reconstruction sphere and calculate the ODF for the first\n voxel in the data.\n\n >>> from dipy.data import dsi_voxels\n >>> data, gtab = dsi_voxels()\n >>> from dipy.core.subdivide_octahedron import create_unit_sphere\n >>> sphere = create_unit_sphere(5)\n >>> from dipy.reconst.gqi import GeneralizedQSamplingModel\n >>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)\n >>> voxel_signal = data[0, 0, 0]\n >>> odf = gq.fit(voxel_signal).odf(sphere)\n\n See Also\n --------\n dipy.reconst.dsi.DiffusionSpectrumModel\n\n \"\"\"\n OdfModel.__init__(self, gtab)\n self.method = method\n self.Lambda = sampling_length\n self.normalize_peaks = normalize_peaks\n scaling = np.sqrt(self.gtab.bvals * 0.01506)\n tmp = np.tile(scaling, (3, 1))\n gradsT = self.gtab.bvecs.T\n b_vector = gradsT * tmp\n self.b_vector = b_vector.T\n\n @multi_voxel_fit\n def fit(self, data):\n return GeneralizedQSamplingFit(self, data)\n\n\nclass GeneralizedQSamplingFit(OdfFit):\n\n def __init__(self, model, data):\n \"\"\" Calculates PDF and ODF for a single voxel\n\n Parameters\n ----------\n model : object,\n DiffusionSpectrumModel\n data : 1d ndarray,\n signal values\n\n \"\"\"\n OdfFit.__init__(self, model, data)\n self._gfa = None\n self.npeaks = 5\n self._peak_values = None\n self._peak_indices = None\n self._qa = None\n\n def odf(self, sphere):\n \"\"\" Calculates the discrete ODF for a given discrete sphere.\n \"\"\"\n self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)\n if self.gqi_vector is None:\n if self.model.method == 'gqi2':\n H = squared_radial_component\n self.gqi_vector = np.real(H(np.dot(self.model.b_vector,\n sphere.vertices.T) * self.model.Lambda))\n if self.model.method == 'standard':\n self.gqi_vector = np.real(np.sinc(np.dot(self.model.\n b_vector, sphere.vertices.T) * self.model.Lambda / np.pi))\n self.model.cache_set('gqi_vector', sphere, self.gqi_vector)\n return np.dot(self.data, self.gqi_vector)\n\n\ndef normalize_qa(qa, max_qa=None):\n \"\"\" Normalize quantitative anisotropy.\n\n Used mostly with GQI rather than GQI2.\n\n Parameters\n ----------\n qa : array, shape (X, Y, Z, N)\n where N is the maximum number of peaks stored\n max_qa : float,\n maximum qa value. Usually found in the CSF (corticospinal fluid).\n\n Returns\n -------\n nqa : array, shape (x, Y, Z, N)\n normalized quantitative anisotropy\n\n Notes\n -----\n Normalized quantitative anisotropy has the very useful property\n to be very small near gray matter and background areas. Therefore,\n it can be used to mask out white matter areas.\n\n \"\"\"\n if max_qa is None:\n return qa / qa.max()\n return qa / max_qa\n\n\ndef squared_radial_component(x, tol=0.01):\n \"\"\" Part of the GQI2 integral\n\n Eq.8 in the referenced paper by Yeh et al. 2010\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n result = (2 * x * np.cos(x) + (x * x - 2) * np.sin(x)) / x ** 3\n x_near_zero = (x < tol) & (x > -tol)\n return np.where(x_near_zero, 1.0 / 3, result)\n\n\ndef npa(self, odf, width=5):\n \"\"\" non-parametric anisotropy\n\n Nimmo-Smith et al. ISMRM 2011\n \"\"\"\n t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)\n psi0 = t0[1] ** 2\n psi1 = t1[1] ** 2\n psi2 = t2[1] ** 2\n npa = np.sqrt((psi0 - psi1) ** 2 + (psi1 - psi2) ** 2 + (psi2 - psi0) ** 2\n ) / np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2))\n return t0, t1, t2, npa\n\n\ndef equatorial_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial zone conjugate\n to 'pole' with width half 'width' degrees\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) <\n np.abs(np.sin(np.pi * width / 180))]\n\n\ndef polar_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial band around\n the 'pole' of radius 'width' degrees\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef upper_hemi_map(v):\n \"\"\"\n maps a 3-vector into the z-upper hemisphere\n \"\"\"\n return np.sign(v[2]) * v\n\n\ndef equatorial_maximum(vertices, odf, pole, width):\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty equatorial band at %s pole with width %f' % (np.\n array_str(pole), width))\n return None, None\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef patch_vertices(vertices, pole, width):\n \"\"\"\n find 'vertices' within the cone of 'width' degrees around 'pole'\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef patch_maximum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null, np.Null\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef odf_sum(odf):\n return np.sum(odf)\n\n\ndef patch_sum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null\n return np.sum([odf[i] for i in eqvert])\n\n\ndef triple_odf_maxima(vertices, odf, width):\n indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])\n odfmax1 = odf[indmax1]\n pole = vertices[indmax1]\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)\n indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p\n ])) for p in eqvert])]\n odfmax3 = odf[indmax3]\n \"\"\"\n cross12 = np.cross(vertices[indmax1],vertices[indmax2])\n cross12 = cross12/np.sqrt(np.sum(cross12**2))\n indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)\n \"\"\"\n return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]\n",
"step-5": "\"\"\" Classes and functions for generalized q-sampling \"\"\"\nimport numpy as np\nfrom dipy.reconst.odf import OdfModel, OdfFit, gfa\nfrom dipy.reconst.cache import Cache\nimport warnings\nfrom dipy.reconst.multi_voxel import multi_voxel_fit\nfrom dipy.reconst.recspeed import local_maxima, remove_similar_vertices\n\n\nclass GeneralizedQSamplingModel(OdfModel, Cache):\n def __init__(self,\n gtab,\n method='gqi2',\n sampling_length=1.2,\n normalize_peaks=False):\n r\"\"\" Generalized Q-Sampling Imaging [1]_\n\n This model has the same assumptions as the DSI method i.e. Cartesian\n grid sampling in q-space and fast gradient switching.\n\n Implements equations 2.14 from [2]_ for standard GQI and equation 2.16\n from [2]_ for GQI2. You can think of GQI2 as an analytical solution of\n the DSI ODF.\n\n Parameters\n ----------\n gtab : object,\n GradientTable\n method : str,\n 'standard' or 'gqi2'\n sampling_length : float,\n diffusion sampling length (lambda in eq. 2.14 and 2.16)\n\n References\n ----------\n .. [1] Yeh F-C et al., \"Generalized Q-Sampling Imaging\", IEEE TMI, 2010\n\n .. [2] Garyfallidis E, \"Towards an accurate brain tractography\", PhD\n thesis, University of Cambridge, 2012.\n\n Notes\n -----\n As of version 0.9, range of the sampling length in GQI2 has changed\n to match the same scale used in the 'standard' method [1]_. This\n means that the value of `sampling_length` should be approximately\n 1 - 1.3 (see [1]_, pg. 1628).\n\n Examples\n --------\n Here we create an example where we provide the data, a gradient table\n and a reconstruction sphere and calculate the ODF for the first\n voxel in the data.\n\n >>> from dipy.data import dsi_voxels\n >>> data, gtab = dsi_voxels()\n >>> from dipy.core.subdivide_octahedron import create_unit_sphere\n >>> sphere = create_unit_sphere(5)\n >>> from dipy.reconst.gqi import GeneralizedQSamplingModel\n >>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)\n >>> voxel_signal = data[0, 0, 0]\n >>> odf = gq.fit(voxel_signal).odf(sphere)\n\n See Also\n --------\n dipy.reconst.dsi.DiffusionSpectrumModel\n\n \"\"\"\n OdfModel.__init__(self, gtab)\n self.method = method\n self.Lambda = sampling_length\n self.normalize_peaks = normalize_peaks\n # 0.01506 = 6*D where D is the free water diffusion coefficient\n # l_values sqrt(6 D tau) D free water diffusion coefficient and\n # tau included in the b-value\n scaling = np.sqrt(self.gtab.bvals * 0.01506)\n tmp = np.tile(scaling, (3, 1))\n gradsT = self.gtab.bvecs.T\n b_vector = gradsT * tmp # element-wise product\n self.b_vector = b_vector.T\n\n @multi_voxel_fit\n def fit(self, data):\n return GeneralizedQSamplingFit(self, data)\n\n\nclass GeneralizedQSamplingFit(OdfFit):\n\n def __init__(self, model, data):\n \"\"\" Calculates PDF and ODF for a single voxel\n\n Parameters\n ----------\n model : object,\n DiffusionSpectrumModel\n data : 1d ndarray,\n signal values\n\n \"\"\"\n OdfFit.__init__(self, model, data)\n self._gfa = None\n self.npeaks = 5\n self._peak_values = None\n self._peak_indices = None\n self._qa = None\n\n def odf(self, sphere):\n \"\"\" Calculates the discrete ODF for a given discrete sphere.\n \"\"\"\n self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)\n if self.gqi_vector is None:\n if self.model.method == 'gqi2':\n H = squared_radial_component\n # print self.gqi_vector.shape\n self.gqi_vector = np.real(H(np.dot(\n self.model.b_vector, sphere.vertices.T) *\n self.model.Lambda))\n if self.model.method == 'standard':\n self.gqi_vector = np.real(np.sinc(np.dot(\n self.model.b_vector, sphere.vertices.T) *\n self.model.Lambda / np.pi))\n self.model.cache_set('gqi_vector', sphere, self.gqi_vector)\n\n return np.dot(self.data, self.gqi_vector)\n\n\ndef normalize_qa(qa, max_qa=None):\n \"\"\" Normalize quantitative anisotropy.\n\n Used mostly with GQI rather than GQI2.\n\n Parameters\n ----------\n qa : array, shape (X, Y, Z, N)\n where N is the maximum number of peaks stored\n max_qa : float,\n maximum qa value. Usually found in the CSF (corticospinal fluid).\n\n Returns\n -------\n nqa : array, shape (x, Y, Z, N)\n normalized quantitative anisotropy\n\n Notes\n -----\n Normalized quantitative anisotropy has the very useful property\n to be very small near gray matter and background areas. Therefore,\n it can be used to mask out white matter areas.\n\n \"\"\"\n if max_qa is None:\n return qa / qa.max()\n return qa / max_qa\n\n\ndef squared_radial_component(x, tol=0.01):\n \"\"\" Part of the GQI2 integral\n\n Eq.8 in the referenced paper by Yeh et al. 2010\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n result = (2 * x * np.cos(x) + (x * x - 2) * np.sin(x)) / (x ** 3)\n x_near_zero = (x < tol) & (x > -tol)\n return np.where(x_near_zero, 1./3, result)\n\n\ndef npa(self, odf, width=5):\n \"\"\" non-parametric anisotropy\n\n Nimmo-Smith et al. ISMRM 2011\n \"\"\"\n # odf = self.odf(s)\n t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)\n psi0 = t0[1] ** 2\n psi1 = t1[1] ** 2\n psi2 = t2[1] ** 2\n npa = (np.sqrt(\n (psi0 - psi1) ** 2 +\n (psi1 - psi2) ** 2 +\n (psi2 - psi0) ** 2) /\n np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2)))\n # print 'tom >>>> ',t0,t1,t2,npa\n\n return t0, t1, t2, npa\n\n\ndef equatorial_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial zone conjugate\n to 'pole' with width half 'width' degrees\n \"\"\"\n return [i\n for i, v in enumerate(vertices)\n if np.abs(np.dot(v, pole)) < np.abs(np.sin(np.pi * width / 180))]\n\n\ndef polar_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial band around\n the 'pole' of radius 'width' degrees\n \"\"\"\n return [i\n for i, v in enumerate(vertices)\n if np.abs(np.dot(v, pole)) > np.abs(np.cos(np.pi * width / 180))]\n\n\ndef upper_hemi_map(v):\n \"\"\"\n maps a 3-vector into the z-upper hemisphere\n \"\"\"\n return np.sign(v[2])*v\n\n\ndef equatorial_maximum(vertices, odf, pole, width):\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n # need to test for whether eqvert is empty or not\n if len(eqvert) == 0:\n print('empty equatorial band at %s pole with width %f' %\n (np.array_str(pole), width))\n return None, None\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n\n return eqvertmax, eqvalmax\n\n\ndef patch_vertices(vertices, pole, width):\n \"\"\"\n find 'vertices' within the cone of 'width' degrees around 'pole'\n \"\"\"\n return [i\n for i, v in enumerate(vertices)\n if np.abs(np.dot(v, pole)) > np.abs(np.cos(np.pi * width / 180))]\n\n\ndef patch_maximum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n # need to test for whether eqvert is empty or not\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' %\n (np.array_str(pole), width))\n return np.Null, np.Null\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef odf_sum(odf):\n return np.sum(odf)\n\n\ndef patch_sum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n # need to test for whether eqvert is empty or not\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' %\n (np.array_str(pole), width))\n return np.Null\n return np.sum([odf[i] for i in eqvert])\n\n\ndef triple_odf_maxima(vertices, odf, width):\n\n indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])\n odfmax1 = odf[indmax1]\n pole = vertices[indmax1]\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)\n indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p]))\n for p in eqvert])]\n odfmax3 = odf[indmax3]\n \"\"\"\n cross12 = np.cross(vertices[indmax1],vertices[indmax2])\n cross12 = cross12/np.sqrt(np.sum(cross12**2))\n indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)\n \"\"\"\n return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]\n",
"step-ids": [
14,
16,
17,
19,
20
]
}
|
[
14,
16,
17,
19,
20
] |
<|reserved_special_token_0|>
def resizeXY(X, Y, occurrency, dx, dz):
"""This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values
contained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed"""
sumY = sum(Y)
sumX = sum(X)
visitedY = [False] * len(Y)
for y_index in range(len(Y)):
update = True
for x_index in range(len(X)):
if occurrency[x_index][y_index] == False:
update = False
if update:
sumY = sumY - Y[y_index]
sumX = sumX - X[y_index]
dx = dx - X[y_index]
dz = dz - Y[y_index]
for x_index in range(len(X)):
modifyX = False
for y_index in range(len(Y)):
if occurrency[x_index][y_index] == False and visitedY[y_index
] == False:
Y[y_index] = dz * Y[y_index] / sumY
visitedY[y_index] = True
modifyX = True
if occurrency[x_index][y_index] == False and visitedY[y_index
] == True and not modifyX:
modifyX = True
if modifyX:
X[x_index] = dx * X[x_index] / sumX
def window(windowX, windowY, occurrency):
"""This function, given three array, X, Y and occurrency, return the HPC model of the window
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for 'scaling'"""
def window0(dx, dy, dz):
resizeXY(windowX, windowY, occurrency, dx, dz)
model = []
for xIndex in range(len(windowX)):
yQuotes = []
xSum = sum(windowX[:xIndex])
for yIndex in range(len(windowY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-windowY[yIndex])
else:
yQuotes.append(windowY[yIndex])
model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(
yQuotes)]))
result = STRUCT(model)
result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))
windowFrame = STRUCT([result])
windowFrame = TEXTURE(['iron.jpg'])(windowFrame)
glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(
result)[0] * 0.95])
glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)
glass = TEXTURE(['glass2.jpg'])(glass)
window = STRUCT([windowFrame, glass])
window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(
window)[0], dz / SIZE([3])(window)[0]])(window)
return window
return window0
def door(doorX, doorY, occurrency):
"""This function takes in input three array, X, Y and occurrency and returns the HPC model of the door
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for scaling the resulting door by the three parameter dx, dy, dz."""
def door0(dx, dy, dz):
model = []
for xIndex in range(len(doorX)):
yQuotes = []
xSum = sum(doorX[:xIndex])
for yIndex in range(len(doorY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-doorY[yIndex])
else:
yQuotes.append(doorY[yIndex])
model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))
res = PROD([STRUCT(model), Q(dy)])
res = MAP([S2, S3, S1])(res)
res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],
dz / SIZE([3])(res)[0]])(res)
door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))
glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *
0.94])
glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)
glass = TEXTURE(['glass.jpg'])(glass)
refiner = CUBOID([0.03, 0.01, dz])
refiner = T([1, 2])([dx / 2, dy])(refiner)
refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)
handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))
handler2 = CUBOID([0.05, 0.02, 0.05])
handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))
handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))
handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy,
dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)
finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(
res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,
handler]))
return finalDoor
return door0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def resizeXY(X, Y, occurrency, dx, dz):
"""This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values
contained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed"""
sumY = sum(Y)
sumX = sum(X)
visitedY = [False] * len(Y)
for y_index in range(len(Y)):
update = True
for x_index in range(len(X)):
if occurrency[x_index][y_index] == False:
update = False
if update:
sumY = sumY - Y[y_index]
sumX = sumX - X[y_index]
dx = dx - X[y_index]
dz = dz - Y[y_index]
for x_index in range(len(X)):
modifyX = False
for y_index in range(len(Y)):
if occurrency[x_index][y_index] == False and visitedY[y_index
] == False:
Y[y_index] = dz * Y[y_index] / sumY
visitedY[y_index] = True
modifyX = True
if occurrency[x_index][y_index] == False and visitedY[y_index
] == True and not modifyX:
modifyX = True
if modifyX:
X[x_index] = dx * X[x_index] / sumX
def window(windowX, windowY, occurrency):
"""This function, given three array, X, Y and occurrency, return the HPC model of the window
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for 'scaling'"""
def window0(dx, dy, dz):
resizeXY(windowX, windowY, occurrency, dx, dz)
model = []
for xIndex in range(len(windowX)):
yQuotes = []
xSum = sum(windowX[:xIndex])
for yIndex in range(len(windowY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-windowY[yIndex])
else:
yQuotes.append(windowY[yIndex])
model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(
yQuotes)]))
result = STRUCT(model)
result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))
windowFrame = STRUCT([result])
windowFrame = TEXTURE(['iron.jpg'])(windowFrame)
glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(
result)[0] * 0.95])
glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)
glass = TEXTURE(['glass2.jpg'])(glass)
window = STRUCT([windowFrame, glass])
window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(
window)[0], dz / SIZE([3])(window)[0]])(window)
return window
return window0
def door(doorX, doorY, occurrency):
"""This function takes in input three array, X, Y and occurrency and returns the HPC model of the door
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for scaling the resulting door by the three parameter dx, dy, dz."""
def door0(dx, dy, dz):
model = []
for xIndex in range(len(doorX)):
yQuotes = []
xSum = sum(doorX[:xIndex])
for yIndex in range(len(doorY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-doorY[yIndex])
else:
yQuotes.append(doorY[yIndex])
model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))
res = PROD([STRUCT(model), Q(dy)])
res = MAP([S2, S3, S1])(res)
res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],
dz / SIZE([3])(res)[0]])(res)
door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))
glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *
0.94])
glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)
glass = TEXTURE(['glass.jpg'])(glass)
refiner = CUBOID([0.03, 0.01, dz])
refiner = T([1, 2])([dx / 2, dy])(refiner)
refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)
handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))
handler2 = CUBOID([0.05, 0.02, 0.05])
handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))
handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))
handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy,
dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)
finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(
res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,
handler]))
return finalDoor
return door0
VIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))
VIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
doorY = [0.2, 0.18, 0.08, 0.18, 0.08, 0.18, 0.4, 0.18, 0.08, 0.18, 0.08,
0.18, 0.2]
doorX = [0.2, 0.5, 0.2, 1.8, 0.08, 0.18, 0.08, 0.18, 0.2]
doorOccurrency = [[True] * 13, [True, False, True, False, True, False, True,
False, True, False, True, False, True], [True] * 13, [True, False, True,
False, True, False, True, False, True, False, True, False, True], [True,
False, True, False, True, True, True, True, True, False, True, False,
True], [True, False, True, False, False, False, True, False, False,
False, True, False, True], [True, False, True, True, True, True, True,
True, True, True, True, False, True], [True, False, False, False, False,
False, True, False, False, False, False, False, True], [True] * 13]
windowY = [0.04, 0.04, 0.2, 0.02, 0.16, 0.02, 0.2, 0.04, 0.04]
windowX = [0.02, 0.8, 0.05, 0.02, 0.4, 0.02, 0.4, 0.05, 0.04]
windowOccurrency = [[True] * 9, [True, False, False, False, False, False,
False, False, True], [True] * 9, [True] * 9, [True, True, False, True,
False, True, False, True, True], [True] * 9, [True, True, False, True,
False, True, False, True, True], [True] * 9, [True] * 9]
def resizeXY(X, Y, occurrency, dx, dz):
"""This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values
contained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed"""
sumY = sum(Y)
sumX = sum(X)
visitedY = [False] * len(Y)
for y_index in range(len(Y)):
update = True
for x_index in range(len(X)):
if occurrency[x_index][y_index] == False:
update = False
if update:
sumY = sumY - Y[y_index]
sumX = sumX - X[y_index]
dx = dx - X[y_index]
dz = dz - Y[y_index]
for x_index in range(len(X)):
modifyX = False
for y_index in range(len(Y)):
if occurrency[x_index][y_index] == False and visitedY[y_index
] == False:
Y[y_index] = dz * Y[y_index] / sumY
visitedY[y_index] = True
modifyX = True
if occurrency[x_index][y_index] == False and visitedY[y_index
] == True and not modifyX:
modifyX = True
if modifyX:
X[x_index] = dx * X[x_index] / sumX
def window(windowX, windowY, occurrency):
"""This function, given three array, X, Y and occurrency, return the HPC model of the window
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for 'scaling'"""
def window0(dx, dy, dz):
resizeXY(windowX, windowY, occurrency, dx, dz)
model = []
for xIndex in range(len(windowX)):
yQuotes = []
xSum = sum(windowX[:xIndex])
for yIndex in range(len(windowY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-windowY[yIndex])
else:
yQuotes.append(windowY[yIndex])
model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(
yQuotes)]))
result = STRUCT(model)
result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))
windowFrame = STRUCT([result])
windowFrame = TEXTURE(['iron.jpg'])(windowFrame)
glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(
result)[0] * 0.95])
glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)
glass = TEXTURE(['glass2.jpg'])(glass)
window = STRUCT([windowFrame, glass])
window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(
window)[0], dz / SIZE([3])(window)[0]])(window)
return window
return window0
def door(doorX, doorY, occurrency):
"""This function takes in input three array, X, Y and occurrency and returns the HPC model of the door
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for scaling the resulting door by the three parameter dx, dy, dz."""
def door0(dx, dy, dz):
model = []
for xIndex in range(len(doorX)):
yQuotes = []
xSum = sum(doorX[:xIndex])
for yIndex in range(len(doorY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-doorY[yIndex])
else:
yQuotes.append(doorY[yIndex])
model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))
res = PROD([STRUCT(model), Q(dy)])
res = MAP([S2, S3, S1])(res)
res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],
dz / SIZE([3])(res)[0]])(res)
door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))
glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *
0.94])
glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)
glass = TEXTURE(['glass.jpg'])(glass)
refiner = CUBOID([0.03, 0.01, dz])
refiner = T([1, 2])([dx / 2, dy])(refiner)
refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)
handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))
handler2 = CUBOID([0.05, 0.02, 0.05])
handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))
handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))
handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy,
dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)
finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(
res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,
handler]))
return finalDoor
return door0
VIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))
VIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))
<|reserved_special_token_1|>
from pyplasm import *
doorY = [0.2, 0.18, 0.08, 0.18, 0.08, 0.18, 0.4, 0.18, 0.08, 0.18, 0.08,
0.18, 0.2]
doorX = [0.2, 0.5, 0.2, 1.8, 0.08, 0.18, 0.08, 0.18, 0.2]
doorOccurrency = [[True] * 13, [True, False, True, False, True, False, True,
False, True, False, True, False, True], [True] * 13, [True, False, True,
False, True, False, True, False, True, False, True, False, True], [True,
False, True, False, True, True, True, True, True, False, True, False,
True], [True, False, True, False, False, False, True, False, False,
False, True, False, True], [True, False, True, True, True, True, True,
True, True, True, True, False, True], [True, False, False, False, False,
False, True, False, False, False, False, False, True], [True] * 13]
windowY = [0.04, 0.04, 0.2, 0.02, 0.16, 0.02, 0.2, 0.04, 0.04]
windowX = [0.02, 0.8, 0.05, 0.02, 0.4, 0.02, 0.4, 0.05, 0.04]
windowOccurrency = [[True] * 9, [True, False, False, False, False, False,
False, False, True], [True] * 9, [True] * 9, [True, True, False, True,
False, True, False, True, True], [True] * 9, [True, True, False, True,
False, True, False, True, True], [True] * 9, [True] * 9]
def resizeXY(X, Y, occurrency, dx, dz):
"""This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values
contained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed"""
sumY = sum(Y)
sumX = sum(X)
visitedY = [False] * len(Y)
for y_index in range(len(Y)):
update = True
for x_index in range(len(X)):
if occurrency[x_index][y_index] == False:
update = False
if update:
sumY = sumY - Y[y_index]
sumX = sumX - X[y_index]
dx = dx - X[y_index]
dz = dz - Y[y_index]
for x_index in range(len(X)):
modifyX = False
for y_index in range(len(Y)):
if occurrency[x_index][y_index] == False and visitedY[y_index
] == False:
Y[y_index] = dz * Y[y_index] / sumY
visitedY[y_index] = True
modifyX = True
if occurrency[x_index][y_index] == False and visitedY[y_index
] == True and not modifyX:
modifyX = True
if modifyX:
X[x_index] = dx * X[x_index] / sumX
def window(windowX, windowY, occurrency):
"""This function, given three array, X, Y and occurrency, return the HPC model of the window
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for 'scaling'"""
def window0(dx, dy, dz):
resizeXY(windowX, windowY, occurrency, dx, dz)
model = []
for xIndex in range(len(windowX)):
yQuotes = []
xSum = sum(windowX[:xIndex])
for yIndex in range(len(windowY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-windowY[yIndex])
else:
yQuotes.append(windowY[yIndex])
model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(
yQuotes)]))
result = STRUCT(model)
result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))
windowFrame = STRUCT([result])
windowFrame = TEXTURE(['iron.jpg'])(windowFrame)
glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(
result)[0] * 0.95])
glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)
glass = TEXTURE(['glass2.jpg'])(glass)
window = STRUCT([windowFrame, glass])
window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(
window)[0], dz / SIZE([3])(window)[0]])(window)
return window
return window0
def door(doorX, doorY, occurrency):
"""This function takes in input three array, X, Y and occurrency and returns the HPC model of the door
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for scaling the resulting door by the three parameter dx, dy, dz."""
def door0(dx, dy, dz):
model = []
for xIndex in range(len(doorX)):
yQuotes = []
xSum = sum(doorX[:xIndex])
for yIndex in range(len(doorY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-doorY[yIndex])
else:
yQuotes.append(doorY[yIndex])
model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))
res = PROD([STRUCT(model), Q(dy)])
res = MAP([S2, S3, S1])(res)
res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],
dz / SIZE([3])(res)[0]])(res)
door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))
glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *
0.94])
glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)
glass = TEXTURE(['glass.jpg'])(glass)
refiner = CUBOID([0.03, 0.01, dz])
refiner = T([1, 2])([dx / 2, dy])(refiner)
refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)
handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))
handler2 = CUBOID([0.05, 0.02, 0.05])
handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))
handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))
handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy,
dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)
finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(
res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,
handler]))
return finalDoor
return door0
VIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))
VIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))
<|reserved_special_token_1|>
from pyplasm import *
doorY = [.2,.18,.08,.18,.08,.18,.4,.18,.08,.18,.08,.18,.2]
doorX = [.2,.5,.2,1.8,.08,.18,.08,.18,.2]
doorOccurrency = [[True]*13,
[True, False, True, False, True, False, True, False, True, False, True, False, True],
[True]*13,
[True, False, True, False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, True, True, True, True, False, True, False, True],
[True, False, True, False, False, False, True, False, False, False, True, False, True],
[True, False, True, True, True, True, True, True, True, True, True, False, True],
[True, False, False, False, False, False, True, False, False, False, False, False, True],
[True]*13]
windowY = [0.04,0.04,0.2,0.02,0.16,0.02,0.2,0.04,0.04]
windowX = [0.02,0.8,0.05,0.02,0.4,0.02,0.4,0.05,0.04]
windowOccurrency = [[True]*9,
[True, False, False, False, False, False, False, False, True],
[True]*9,
[True]*9,
[True, True, False, True, False, True, False, True, True],
[True]*9,
[True, True, False, True, False, True, False, True, True],
[True]*9,
[True]*9]
def resizeXY(X, Y, occurrency, dx, dz):
"""This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values
contained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed"""
sumY = sum(Y)
sumX = sum(X)
visitedY = [False]*len(Y)
for y_index in range(len(Y)):
update = True
for x_index in range(len(X)):
if(occurrency[x_index][y_index] == False):
update = False
if(update):
sumY = sumY - Y[y_index]
sumX = sumX - X[y_index]
dx = dx - X[y_index]
dz = dz - Y[y_index]
for x_index in range(len(X)):
modifyX = False
for y_index in range(len(Y)):
if(occurrency[x_index][y_index] == False and visitedY[y_index] == False):
Y[y_index] = (dz * Y[y_index])/sumY
visitedY[y_index] = True
modifyX = True
if(occurrency[x_index][y_index] == False and visitedY[y_index] == True and not modifyX):
modifyX = True
if(modifyX):
X[x_index] = (dx * X[x_index])/sumX
def window(windowX, windowY, occurrency):
"""This function, given three array, X, Y and occurrency, return the HPC model of the window
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for 'scaling'"""
def window0(dx, dy, dz):
resizeXY(windowX,windowY,occurrency, dx, dz)
model = []
for xIndex in range(len(windowX)):
yQuotes = []
xSum = sum(windowX[:xIndex])
for yIndex in range(len(windowY)):
if(occurrency[xIndex][yIndex] == False):
yQuotes.append(-windowY[yIndex])
else:
yQuotes.append(windowY[yIndex])
model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)]))
result = STRUCT(model)
result = MAP([S2,S3,S1])(PROD([result, Q(dy)]))
windowFrame = STRUCT([result])
windowFrame = TEXTURE(["iron.jpg"])(windowFrame)
glass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95])
glass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass)
glass = TEXTURE(["glass2.jpg"])(glass)
window = STRUCT([windowFrame, glass])
window = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window)
return window
return window0
def door(doorX, doorY, occurrency):
"""This function takes in input three array, X, Y and occurrency and returns the HPC model of the door
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for scaling the resulting door by the three parameter dx, dy, dz."""
def door0(dx, dy, dz):
model = []
for xIndex in range(len(doorX)):
yQuotes = []
xSum = sum(doorX[:xIndex])
for yIndex in range(len(doorY)):
if(occurrency[xIndex][yIndex] == False):
yQuotes.append(-doorY[yIndex])
else:
yQuotes.append(doorY[yIndex])
model.append(PROD([ QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))
res = PROD([STRUCT(model), Q(dy)])
res = MAP([S2,S3,S1])(res)
res = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (res)
door = TEXTURE(["wood.jpg", True, False, 1, 1, 0, 1, 1])(STRUCT([res]))
glass = CUBOID([SIZE([1])(res)[0]*0.94, 0.01, SIZE([3])(res)[0]*0.94])
glass = T([1,2,3])([dx*0.003, dy/2, dz*0.005])(glass)
glass = TEXTURE(["glass.jpg"])(glass)
refiner = CUBOID([0.03, 0.01,dz])
refiner = T([1,2])([dx/2,dy])(refiner)
refiner = TEXTURE(["wood.jpg", True, False, 1, 1, 0, 1, 1])(refiner)
handler1 = T(3)(.15)(CUBOID([.05,.02,.2]))
handler2 = CUBOID([.05,.02,.05])
handler3 = T([1,2])([.01,.02])(CUBOID([.03,.02,.2]))
handler = TEXTURE("bronze.jpg")(STRUCT([handler3, handler2, handler1]))
handler = T([1,2,3])([dx/2.-2*SIZE([1])(handler)[0],dy, dz/2.-1.5*SIZE([3])(handler)[0]])(handler)
finalDoor = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (STRUCT([door, glass, refiner, handler]))
return finalDoor
return door0
VIEW(door(doorX, doorY, doorOccurrency)(2.2, .4, 2.8))
VIEW(window(windowX,windowY,windowOccurrency)(.6,.1,1.2))
|
flexible
|
{
"blob_id": "9bc955def6250908050a1f3046dd78480f25e0a1",
"index": 1898,
"step-1": "<mask token>\n\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n \"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n sumY = sum(Y)\n sumX = sum(X)\n visitedY = [False] * len(Y)\n for y_index in range(len(Y)):\n update = True\n for x_index in range(len(X)):\n if occurrency[x_index][y_index] == False:\n update = False\n if update:\n sumY = sumY - Y[y_index]\n sumX = sumX - X[y_index]\n dx = dx - X[y_index]\n dz = dz - Y[y_index]\n for x_index in range(len(X)):\n modifyX = False\n for y_index in range(len(Y)):\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == False:\n Y[y_index] = dz * Y[y_index] / sumY\n visitedY[y_index] = True\n modifyX = True\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == True and not modifyX:\n modifyX = True\n if modifyX:\n X[x_index] = dx * X[x_index] / sumX\n\n\ndef window(windowX, windowY, occurrency):\n \"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\n def window0(dx, dy, dz):\n resizeXY(windowX, windowY, occurrency, dx, dz)\n model = []\n for xIndex in range(len(windowX)):\n yQuotes = []\n xSum = sum(windowX[:xIndex])\n for yIndex in range(len(windowY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-windowY[yIndex])\n else:\n yQuotes.append(windowY[yIndex])\n model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(\n yQuotes)]))\n result = STRUCT(model)\n result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))\n windowFrame = STRUCT([result])\n windowFrame = TEXTURE(['iron.jpg'])(windowFrame)\n glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(\n result)[0] * 0.95])\n glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)\n glass = TEXTURE(['glass2.jpg'])(glass)\n window = STRUCT([windowFrame, glass])\n window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(\n window)[0], dz / SIZE([3])(window)[0]])(window)\n return window\n return window0\n\n\ndef door(doorX, doorY, occurrency):\n \"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\n def door0(dx, dy, dz):\n model = []\n for xIndex in range(len(doorX)):\n yQuotes = []\n xSum = sum(doorX[:xIndex])\n for yIndex in range(len(doorY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-doorY[yIndex])\n else:\n yQuotes.append(doorY[yIndex])\n model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n res = PROD([STRUCT(model), Q(dy)])\n res = MAP([S2, S3, S1])(res)\n res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],\n dz / SIZE([3])(res)[0]])(res)\n door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *\n 0.94])\n glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)\n glass = TEXTURE(['glass.jpg'])(glass)\n refiner = CUBOID([0.03, 0.01, dz])\n refiner = T([1, 2])([dx / 2, dy])(refiner)\n refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)\n handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))\n handler2 = CUBOID([0.05, 0.02, 0.05])\n handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))\n handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))\n handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy, \n dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)\n finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(\n res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,\n handler]))\n return finalDoor\n return door0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n \"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n sumY = sum(Y)\n sumX = sum(X)\n visitedY = [False] * len(Y)\n for y_index in range(len(Y)):\n update = True\n for x_index in range(len(X)):\n if occurrency[x_index][y_index] == False:\n update = False\n if update:\n sumY = sumY - Y[y_index]\n sumX = sumX - X[y_index]\n dx = dx - X[y_index]\n dz = dz - Y[y_index]\n for x_index in range(len(X)):\n modifyX = False\n for y_index in range(len(Y)):\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == False:\n Y[y_index] = dz * Y[y_index] / sumY\n visitedY[y_index] = True\n modifyX = True\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == True and not modifyX:\n modifyX = True\n if modifyX:\n X[x_index] = dx * X[x_index] / sumX\n\n\ndef window(windowX, windowY, occurrency):\n \"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\n def window0(dx, dy, dz):\n resizeXY(windowX, windowY, occurrency, dx, dz)\n model = []\n for xIndex in range(len(windowX)):\n yQuotes = []\n xSum = sum(windowX[:xIndex])\n for yIndex in range(len(windowY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-windowY[yIndex])\n else:\n yQuotes.append(windowY[yIndex])\n model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(\n yQuotes)]))\n result = STRUCT(model)\n result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))\n windowFrame = STRUCT([result])\n windowFrame = TEXTURE(['iron.jpg'])(windowFrame)\n glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(\n result)[0] * 0.95])\n glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)\n glass = TEXTURE(['glass2.jpg'])(glass)\n window = STRUCT([windowFrame, glass])\n window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(\n window)[0], dz / SIZE([3])(window)[0]])(window)\n return window\n return window0\n\n\ndef door(doorX, doorY, occurrency):\n \"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\n def door0(dx, dy, dz):\n model = []\n for xIndex in range(len(doorX)):\n yQuotes = []\n xSum = sum(doorX[:xIndex])\n for yIndex in range(len(doorY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-doorY[yIndex])\n else:\n yQuotes.append(doorY[yIndex])\n model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n res = PROD([STRUCT(model), Q(dy)])\n res = MAP([S2, S3, S1])(res)\n res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],\n dz / SIZE([3])(res)[0]])(res)\n door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *\n 0.94])\n glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)\n glass = TEXTURE(['glass.jpg'])(glass)\n refiner = CUBOID([0.03, 0.01, dz])\n refiner = T([1, 2])([dx / 2, dy])(refiner)\n refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)\n handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))\n handler2 = CUBOID([0.05, 0.02, 0.05])\n handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))\n handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))\n handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy, \n dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)\n finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(\n res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,\n handler]))\n return finalDoor\n return door0\n\n\nVIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))\nVIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))\n",
"step-3": "<mask token>\ndoorY = [0.2, 0.18, 0.08, 0.18, 0.08, 0.18, 0.4, 0.18, 0.08, 0.18, 0.08, \n 0.18, 0.2]\ndoorX = [0.2, 0.5, 0.2, 1.8, 0.08, 0.18, 0.08, 0.18, 0.2]\ndoorOccurrency = [[True] * 13, [True, False, True, False, True, False, True,\n False, True, False, True, False, True], [True] * 13, [True, False, True,\n False, True, False, True, False, True, False, True, False, True], [True,\n False, True, False, True, True, True, True, True, False, True, False, \n True], [True, False, True, False, False, False, True, False, False, \n False, True, False, True], [True, False, True, True, True, True, True, \n True, True, True, True, False, True], [True, False, False, False, False,\n False, True, False, False, False, False, False, True], [True] * 13]\nwindowY = [0.04, 0.04, 0.2, 0.02, 0.16, 0.02, 0.2, 0.04, 0.04]\nwindowX = [0.02, 0.8, 0.05, 0.02, 0.4, 0.02, 0.4, 0.05, 0.04]\nwindowOccurrency = [[True] * 9, [True, False, False, False, False, False, \n False, False, True], [True] * 9, [True] * 9, [True, True, False, True, \n False, True, False, True, True], [True] * 9, [True, True, False, True, \n False, True, False, True, True], [True] * 9, [True] * 9]\n\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n \"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n sumY = sum(Y)\n sumX = sum(X)\n visitedY = [False] * len(Y)\n for y_index in range(len(Y)):\n update = True\n for x_index in range(len(X)):\n if occurrency[x_index][y_index] == False:\n update = False\n if update:\n sumY = sumY - Y[y_index]\n sumX = sumX - X[y_index]\n dx = dx - X[y_index]\n dz = dz - Y[y_index]\n for x_index in range(len(X)):\n modifyX = False\n for y_index in range(len(Y)):\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == False:\n Y[y_index] = dz * Y[y_index] / sumY\n visitedY[y_index] = True\n modifyX = True\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == True and not modifyX:\n modifyX = True\n if modifyX:\n X[x_index] = dx * X[x_index] / sumX\n\n\ndef window(windowX, windowY, occurrency):\n \"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\n def window0(dx, dy, dz):\n resizeXY(windowX, windowY, occurrency, dx, dz)\n model = []\n for xIndex in range(len(windowX)):\n yQuotes = []\n xSum = sum(windowX[:xIndex])\n for yIndex in range(len(windowY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-windowY[yIndex])\n else:\n yQuotes.append(windowY[yIndex])\n model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(\n yQuotes)]))\n result = STRUCT(model)\n result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))\n windowFrame = STRUCT([result])\n windowFrame = TEXTURE(['iron.jpg'])(windowFrame)\n glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(\n result)[0] * 0.95])\n glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)\n glass = TEXTURE(['glass2.jpg'])(glass)\n window = STRUCT([windowFrame, glass])\n window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(\n window)[0], dz / SIZE([3])(window)[0]])(window)\n return window\n return window0\n\n\ndef door(doorX, doorY, occurrency):\n \"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\n def door0(dx, dy, dz):\n model = []\n for xIndex in range(len(doorX)):\n yQuotes = []\n xSum = sum(doorX[:xIndex])\n for yIndex in range(len(doorY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-doorY[yIndex])\n else:\n yQuotes.append(doorY[yIndex])\n model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n res = PROD([STRUCT(model), Q(dy)])\n res = MAP([S2, S3, S1])(res)\n res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],\n dz / SIZE([3])(res)[0]])(res)\n door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *\n 0.94])\n glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)\n glass = TEXTURE(['glass.jpg'])(glass)\n refiner = CUBOID([0.03, 0.01, dz])\n refiner = T([1, 2])([dx / 2, dy])(refiner)\n refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)\n handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))\n handler2 = CUBOID([0.05, 0.02, 0.05])\n handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))\n handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))\n handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy, \n dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)\n finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(\n res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,\n handler]))\n return finalDoor\n return door0\n\n\nVIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))\nVIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))\n",
"step-4": "from pyplasm import *\ndoorY = [0.2, 0.18, 0.08, 0.18, 0.08, 0.18, 0.4, 0.18, 0.08, 0.18, 0.08, \n 0.18, 0.2]\ndoorX = [0.2, 0.5, 0.2, 1.8, 0.08, 0.18, 0.08, 0.18, 0.2]\ndoorOccurrency = [[True] * 13, [True, False, True, False, True, False, True,\n False, True, False, True, False, True], [True] * 13, [True, False, True,\n False, True, False, True, False, True, False, True, False, True], [True,\n False, True, False, True, True, True, True, True, False, True, False, \n True], [True, False, True, False, False, False, True, False, False, \n False, True, False, True], [True, False, True, True, True, True, True, \n True, True, True, True, False, True], [True, False, False, False, False,\n False, True, False, False, False, False, False, True], [True] * 13]\nwindowY = [0.04, 0.04, 0.2, 0.02, 0.16, 0.02, 0.2, 0.04, 0.04]\nwindowX = [0.02, 0.8, 0.05, 0.02, 0.4, 0.02, 0.4, 0.05, 0.04]\nwindowOccurrency = [[True] * 9, [True, False, False, False, False, False, \n False, False, True], [True] * 9, [True] * 9, [True, True, False, True, \n False, True, False, True, True], [True] * 9, [True, True, False, True, \n False, True, False, True, True], [True] * 9, [True] * 9]\n\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n \"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n sumY = sum(Y)\n sumX = sum(X)\n visitedY = [False] * len(Y)\n for y_index in range(len(Y)):\n update = True\n for x_index in range(len(X)):\n if occurrency[x_index][y_index] == False:\n update = False\n if update:\n sumY = sumY - Y[y_index]\n sumX = sumX - X[y_index]\n dx = dx - X[y_index]\n dz = dz - Y[y_index]\n for x_index in range(len(X)):\n modifyX = False\n for y_index in range(len(Y)):\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == False:\n Y[y_index] = dz * Y[y_index] / sumY\n visitedY[y_index] = True\n modifyX = True\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == True and not modifyX:\n modifyX = True\n if modifyX:\n X[x_index] = dx * X[x_index] / sumX\n\n\ndef window(windowX, windowY, occurrency):\n \"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\n def window0(dx, dy, dz):\n resizeXY(windowX, windowY, occurrency, dx, dz)\n model = []\n for xIndex in range(len(windowX)):\n yQuotes = []\n xSum = sum(windowX[:xIndex])\n for yIndex in range(len(windowY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-windowY[yIndex])\n else:\n yQuotes.append(windowY[yIndex])\n model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(\n yQuotes)]))\n result = STRUCT(model)\n result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))\n windowFrame = STRUCT([result])\n windowFrame = TEXTURE(['iron.jpg'])(windowFrame)\n glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(\n result)[0] * 0.95])\n glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)\n glass = TEXTURE(['glass2.jpg'])(glass)\n window = STRUCT([windowFrame, glass])\n window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(\n window)[0], dz / SIZE([3])(window)[0]])(window)\n return window\n return window0\n\n\ndef door(doorX, doorY, occurrency):\n \"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\n def door0(dx, dy, dz):\n model = []\n for xIndex in range(len(doorX)):\n yQuotes = []\n xSum = sum(doorX[:xIndex])\n for yIndex in range(len(doorY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-doorY[yIndex])\n else:\n yQuotes.append(doorY[yIndex])\n model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n res = PROD([STRUCT(model), Q(dy)])\n res = MAP([S2, S3, S1])(res)\n res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],\n dz / SIZE([3])(res)[0]])(res)\n door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *\n 0.94])\n glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)\n glass = TEXTURE(['glass.jpg'])(glass)\n refiner = CUBOID([0.03, 0.01, dz])\n refiner = T([1, 2])([dx / 2, dy])(refiner)\n refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)\n handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))\n handler2 = CUBOID([0.05, 0.02, 0.05])\n handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))\n handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))\n handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy, \n dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)\n finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(\n res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,\n handler]))\n return finalDoor\n return door0\n\n\nVIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))\nVIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))\n",
"step-5": "from pyplasm import *\n\ndoorY = [.2,.18,.08,.18,.08,.18,.4,.18,.08,.18,.08,.18,.2]\ndoorX = [.2,.5,.2,1.8,.08,.18,.08,.18,.2]\n\ndoorOccurrency = [[True]*13,\n\t\t\t\t\t[True, False, True, False, True, False, True, False, True, False, True, False, True],\n\t\t\t\t\t[True]*13,\n\t\t\t\t\t[True, False, True, False, True, False, True, False, True, False, True, False, True],\n\t\t\t\t\t[True, False, True, False, True, True, True, True, True, False, True, False, True],\n\t\t\t\t\t[True, False, True, False, False, False, True, False, False, False, True, False, True],\n\t\t\t\t\t[True, False, True, True, True, True, True, True, True, True, True, False, True],\n\t\t\t\t\t[True, False, False, False, False, False, True, False, False, False, False, False, True],\n\t\t\t\t\t[True]*13]\n\nwindowY = [0.04,0.04,0.2,0.02,0.16,0.02,0.2,0.04,0.04]\nwindowX = [0.02,0.8,0.05,0.02,0.4,0.02,0.4,0.05,0.04]\n\nwindowOccurrency = [[True]*9,\n\t\t\t\t\t[True, False, False, False, False, False, False, False, True],\n\t\t\t\t\t[True]*9,\n\t\t\t\t\t[True]*9,\n\t\t\t\t\t[True, True, False, True, False, True, False, True, True],\n\t\t\t\t\t[True]*9,\n\t\t\t\t\t[True, True, False, True, False, True, False, True, True],\n\t\t\t\t\t[True]*9,\n\t\t\t\t\t[True]*9]\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n\t\"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n\tsumY = sum(Y) \n\tsumX = sum(X)\n\tvisitedY = [False]*len(Y)\n\tfor y_index in range(len(Y)):\n\t\tupdate = True\n\t\tfor x_index in range(len(X)):\n\t\t\tif(occurrency[x_index][y_index] == False):\n\t\t\t\tupdate = False \n\t\tif(update):\n\t\t\tsumY = sumY - Y[y_index]\n\t\t\tsumX = sumX - X[y_index]\n\t\t\tdx = dx - X[y_index]\n\t\t\tdz = dz - Y[y_index]\n\n\tfor x_index in range(len(X)):\n\t\tmodifyX = False\n\t\tfor y_index in range(len(Y)):\n\t\t\tif(occurrency[x_index][y_index] == False and visitedY[y_index] == False):\n\t\t\t\tY[y_index] = (dz * Y[y_index])/sumY\n\t\t\t\tvisitedY[y_index] = True\n\t\t\t\tmodifyX = True\n\t\t\tif(occurrency[x_index][y_index] == False and visitedY[y_index] == True and not modifyX):\n\t\t\t\tmodifyX = True\n\t\tif(modifyX):\n\t\t\tX[x_index] = (dx * X[x_index])/sumX\n\n\ndef window(windowX, windowY, occurrency):\n\t\"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\tdef window0(dx, dy, dz):\n\n\t\tresizeXY(windowX,windowY,occurrency, dx, dz)\n\n\t\tmodel = []\n\t\tfor xIndex in range(len(windowX)):\n\t\t\tyQuotes = []\n\t\t\txSum = sum(windowX[:xIndex])\n\t\t\tfor yIndex in range(len(windowY)):\n\t\t\t\tif(occurrency[xIndex][yIndex] == False):\n\t\t\t\t\tyQuotes.append(-windowY[yIndex])\n\t\t\t\telse:\n\t\t\t\t\tyQuotes.append(windowY[yIndex])\n\t\t\tmodel.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)]))\n\n\t\tresult = STRUCT(model)\n\t\tresult = MAP([S2,S3,S1])(PROD([result, Q(dy)]))\n\t\twindowFrame = STRUCT([result])\n\t\twindowFrame = TEXTURE([\"iron.jpg\"])(windowFrame)\n\n\t\tglass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95])\n\t\tglass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass)\n\t\tglass = TEXTURE([\"glass2.jpg\"])(glass) \n\n\t\twindow = STRUCT([windowFrame, glass])\n\t\twindow = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window)\n\t\t\n\t\treturn window\n\n\treturn window0\n\n\ndef door(doorX, doorY, occurrency):\n\t\"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\tdef door0(dx, dy, dz):\n\n\t\tmodel = []\n\n\t\tfor xIndex in range(len(doorX)):\n\t\t\tyQuotes = []\n\t\t\txSum = sum(doorX[:xIndex])\n\t\t\tfor yIndex in range(len(doorY)):\n\t\t\t\tif(occurrency[xIndex][yIndex] == False):\n\t\t\t\t\tyQuotes.append(-doorY[yIndex])\n\t\t\t\telse:\n\t\t\t\t\tyQuotes.append(doorY[yIndex])\n\t\t\tmodel.append(PROD([ QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n\n\t\tres = PROD([STRUCT(model), Q(dy)])\n\t\tres = MAP([S2,S3,S1])(res)\n\t\tres = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (res)\n\n\t\tdoor = TEXTURE([\"wood.jpg\", True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n\n\t\tglass = CUBOID([SIZE([1])(res)[0]*0.94, 0.01, SIZE([3])(res)[0]*0.94])\n\t\tglass = T([1,2,3])([dx*0.003, dy/2, dz*0.005])(glass)\n\t\tglass = TEXTURE([\"glass.jpg\"])(glass)\n\n\t\trefiner = CUBOID([0.03, 0.01,dz])\n\t\trefiner = T([1,2])([dx/2,dy])(refiner)\n\t\trefiner = TEXTURE([\"wood.jpg\", True, False, 1, 1, 0, 1, 1])(refiner)\n\n\t\thandler1 = T(3)(.15)(CUBOID([.05,.02,.2]))\n\t\thandler2 = CUBOID([.05,.02,.05])\n\t\thandler3 = T([1,2])([.01,.02])(CUBOID([.03,.02,.2]))\n\t\thandler = TEXTURE(\"bronze.jpg\")(STRUCT([handler3, handler2, handler1]))\n\t\thandler = T([1,2,3])([dx/2.-2*SIZE([1])(handler)[0],dy, dz/2.-1.5*SIZE([3])(handler)[0]])(handler)\n\n\t\tfinalDoor = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (STRUCT([door, glass, refiner, handler]))\n\n\t\treturn finalDoor\n\n\treturn door0\n\nVIEW(door(doorX, doorY, doorOccurrency)(2.2, .4, 2.8))\nVIEW(window(windowX,windowY,windowOccurrency)(.6,.1,1.2))",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def app(page):
if not login_status():
title_container = st.empty()
remail_input_container = st.empty()
rpw_input_container = st.empty()
rregister_button_container = st.empty()
email = remail_input_container.text_input('Email ')
password = rpw_input_container.text_input('Password ', type='password')
rregister_button = rregister_button_container.button('Register')
if rregister_button:
title_container.empty()
remail_input_container.empty()
rpw_input_container.empty()
rregister_button_container.empty()
login()
page.app()
st.experimental_rerun()
<|reserved_special_token_1|>
import streamlit as st
from streamlit.components.v1 import components
from streamlit.report_thread import get_report_ctx
from util.session import *
from multipage import MultiPage
from pages import register
def app(page):
if not login_status():
title_container = st.empty()
remail_input_container = st.empty()
rpw_input_container = st.empty()
rregister_button_container = st.empty()
email = remail_input_container.text_input('Email ')
password = rpw_input_container.text_input('Password ', type='password')
rregister_button = rregister_button_container.button('Register')
if rregister_button:
title_container.empty()
remail_input_container.empty()
rpw_input_container.empty()
rregister_button_container.empty()
login()
page.app()
st.experimental_rerun()
<|reserved_special_token_1|>
import streamlit as st
from streamlit.components.v1 import components
from streamlit.report_thread import get_report_ctx
from util.session import *
from multipage import MultiPage
from pages import register
def app(page):
if not login_status():
title_container = st.empty()
remail_input_container = st.empty()
rpw_input_container = st.empty()
rregister_button_container = st.empty()
# title_container.write("Register")
email = remail_input_container.text_input("Email ")
password = rpw_input_container.text_input("Password ", type="password")
rregister_button = rregister_button_container.button('Register')
if rregister_button:
title_container.empty()
remail_input_container.empty()
rpw_input_container.empty()
rregister_button_container.empty()
login()
page.app()
st.experimental_rerun()
|
flexible
|
{
"blob_id": "41cfd558824b6561114a48a694b1e6e6a7cb8c05",
"index": 7,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef app(page):\n if not login_status():\n title_container = st.empty()\n remail_input_container = st.empty()\n rpw_input_container = st.empty()\n rregister_button_container = st.empty()\n email = remail_input_container.text_input('Email ')\n password = rpw_input_container.text_input('Password ', type='password')\n rregister_button = rregister_button_container.button('Register')\n if rregister_button:\n title_container.empty()\n remail_input_container.empty()\n rpw_input_container.empty()\n rregister_button_container.empty()\n login()\n page.app()\n st.experimental_rerun()\n",
"step-3": "import streamlit as st\nfrom streamlit.components.v1 import components\nfrom streamlit.report_thread import get_report_ctx\nfrom util.session import *\nfrom multipage import MultiPage\nfrom pages import register\n\n\ndef app(page):\n if not login_status():\n title_container = st.empty()\n remail_input_container = st.empty()\n rpw_input_container = st.empty()\n rregister_button_container = st.empty()\n email = remail_input_container.text_input('Email ')\n password = rpw_input_container.text_input('Password ', type='password')\n rregister_button = rregister_button_container.button('Register')\n if rregister_button:\n title_container.empty()\n remail_input_container.empty()\n rpw_input_container.empty()\n rregister_button_container.empty()\n login()\n page.app()\n st.experimental_rerun()\n",
"step-4": "import streamlit as st\nfrom streamlit.components.v1 import components\nfrom streamlit.report_thread import get_report_ctx\nfrom util.session import *\nfrom multipage import MultiPage\nfrom pages import register\n\ndef app(page):\n if not login_status():\n title_container = st.empty()\n remail_input_container = st.empty()\n rpw_input_container = st.empty()\n rregister_button_container = st.empty()\n\n # title_container.write(\"Register\")\n email = remail_input_container.text_input(\"Email \")\n password = rpw_input_container.text_input(\"Password \", type=\"password\")\n rregister_button = rregister_button_container.button('Register')\n\n if rregister_button:\n title_container.empty()\n remail_input_container.empty()\n rpw_input_container.empty()\n rregister_button_container.empty()\n login()\n page.app()\n st.experimental_rerun()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TestLempelZivWelchDecoder(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestLempelZivWelchDecoder(unittest.TestCase):
def test_decode(self):
test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']
run_length_decoder = LempelZivWelchDecoder()
self.assertRaises(ValueError, lambda : run_length_decoder.decode())
self.assertTrue(run_length_decoder.input is None)
run_length_decoder.input = test_value
self.assertEqual(run_length_decoder.input, test_value)
self.assertEqual(run_length_decoder.decode(), 'ttttttessst1')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestLempelZivWelchDecoder(unittest.TestCase):
def test_decode(self):
test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']
run_length_decoder = LempelZivWelchDecoder()
self.assertRaises(ValueError, lambda : run_length_decoder.decode())
self.assertTrue(run_length_decoder.input is None)
run_length_decoder.input = test_value
self.assertEqual(run_length_decoder.input, test_value)
self.assertEqual(run_length_decoder.decode(), 'ttttttessst1')
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
from LempelZivWelchDecoder import LempelZivWelchDecoder
class TestLempelZivWelchDecoder(unittest.TestCase):
def test_decode(self):
test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']
run_length_decoder = LempelZivWelchDecoder()
self.assertRaises(ValueError, lambda : run_length_decoder.decode())
self.assertTrue(run_length_decoder.input is None)
run_length_decoder.input = test_value
self.assertEqual(run_length_decoder.input, test_value)
self.assertEqual(run_length_decoder.decode(), 'ttttttessst1')
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
from LempelZivWelchDecoder import LempelZivWelchDecoder
class TestLempelZivWelchDecoder(unittest.TestCase):
def test_decode(self):
test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']
run_length_decoder = LempelZivWelchDecoder()
self.assertRaises(ValueError,
lambda: run_length_decoder.decode()) # assert if method raises error when there is no input
self.assertTrue(run_length_decoder.input is None) # assert if input is none when it's not set
run_length_decoder.input = test_value
self.assertEqual(run_length_decoder.input, test_value) # assert that input is initialized with proper value
self.assertEqual(run_length_decoder.decode(),
"ttttttessst1") # assert that result is correct
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "8126af930ec75e2818455d959f00285bdc08c044",
"index": 1899,
"step-1": "<mask token>\n\n\nclass TestLempelZivWelchDecoder(unittest.TestCase):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestLempelZivWelchDecoder(unittest.TestCase):\n\n def test_decode(self):\n test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']\n run_length_decoder = LempelZivWelchDecoder()\n self.assertRaises(ValueError, lambda : run_length_decoder.decode())\n self.assertTrue(run_length_decoder.input is None)\n run_length_decoder.input = test_value\n self.assertEqual(run_length_decoder.input, test_value)\n self.assertEqual(run_length_decoder.decode(), 'ttttttessst1')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestLempelZivWelchDecoder(unittest.TestCase):\n\n def test_decode(self):\n test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']\n run_length_decoder = LempelZivWelchDecoder()\n self.assertRaises(ValueError, lambda : run_length_decoder.decode())\n self.assertTrue(run_length_decoder.input is None)\n run_length_decoder.input = test_value\n self.assertEqual(run_length_decoder.input, test_value)\n self.assertEqual(run_length_decoder.decode(), 'ttttttessst1')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom LempelZivWelchDecoder import LempelZivWelchDecoder\n\n\nclass TestLempelZivWelchDecoder(unittest.TestCase):\n\n def test_decode(self):\n test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']\n run_length_decoder = LempelZivWelchDecoder()\n self.assertRaises(ValueError, lambda : run_length_decoder.decode())\n self.assertTrue(run_length_decoder.input is None)\n run_length_decoder.input = test_value\n self.assertEqual(run_length_decoder.input, test_value)\n self.assertEqual(run_length_decoder.decode(), 'ttttttessst1')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\n\nfrom LempelZivWelchDecoder import LempelZivWelchDecoder\n\n\nclass TestLempelZivWelchDecoder(unittest.TestCase):\n def test_decode(self):\n test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']\n run_length_decoder = LempelZivWelchDecoder()\n\n self.assertRaises(ValueError,\n lambda: run_length_decoder.decode()) # assert if method raises error when there is no input\n self.assertTrue(run_length_decoder.input is None) # assert if input is none when it's not set\n\n run_length_decoder.input = test_value\n self.assertEqual(run_length_decoder.input, test_value) # assert that input is initialized with proper value\n self.assertEqual(run_length_decoder.decode(),\n \"ttttttessst1\") # assert that result is correct\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Vertex:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_connections(self):
return self.connections.keys()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Graph:
def __init__(self):
self.vertices = {}
self.num_vertices = 0
def add_vertex(self, key):
new_vertex = Vertex(key)
self.num_vertices += 1
self.vertices[key] = new_vertex
return new_vertex
def get_vertex(self, key):
if key in self.vertices:
return self.vertices[key]
else:
return None
def add_edge(self, origin, dest, weight=0):
if origin not in self.vertices:
self.add_vertex(origin)
if dest not in self.vertices:
self.add_vertex(dest)
self.vertices[origin].add_neighbor(self.vertices[dest], weight)
def get_vertices(self):
return self.vertices.keys()
def __iter__(self):
return iter(self.vertices.values())
def __contains__(self, n):
return n in self.vertices
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Vertex:
def __init__(self, key):
self.id = key
self.connections = {}
def add_neighbor(self, nbr, weight=0):
self.connections[nbr] = weight
def get_connections(self):
return self.connections.keys()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Graph:
def __init__(self):
self.vertices = {}
self.num_vertices = 0
def add_vertex(self, key):
new_vertex = Vertex(key)
self.num_vertices += 1
self.vertices[key] = new_vertex
return new_vertex
def get_vertex(self, key):
if key in self.vertices:
return self.vertices[key]
else:
return None
def add_edge(self, origin, dest, weight=0):
if origin not in self.vertices:
self.add_vertex(origin)
if dest not in self.vertices:
self.add_vertex(dest)
self.vertices[origin].add_neighbor(self.vertices[dest], weight)
def get_vertices(self):
return self.vertices.keys()
def __iter__(self):
return iter(self.vertices.values())
def __contains__(self, n):
return n in self.vertices
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Vertex:
def __init__(self, key):
self.id = key
self.connections = {}
def add_neighbor(self, nbr, weight=0):
self.connections[nbr] = weight
def get_connections(self):
return self.connections.keys()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
connections = str([x.id for x in self.connections])
return f'{str(self.id)} connected to: {connections}'
class Graph:
def __init__(self):
self.vertices = {}
self.num_vertices = 0
def add_vertex(self, key):
new_vertex = Vertex(key)
self.num_vertices += 1
self.vertices[key] = new_vertex
return new_vertex
def get_vertex(self, key):
if key in self.vertices:
return self.vertices[key]
else:
return None
def add_edge(self, origin, dest, weight=0):
if origin not in self.vertices:
self.add_vertex(origin)
if dest not in self.vertices:
self.add_vertex(dest)
self.vertices[origin].add_neighbor(self.vertices[dest], weight)
def get_vertices(self):
return self.vertices.keys()
def __iter__(self):
return iter(self.vertices.values())
def __contains__(self, n):
return n in self.vertices
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Vertex:
def __init__(self, key):
self.id = key
self.connections = {}
def add_neighbor(self, nbr, weight=0):
self.connections[nbr] = weight
def get_connections(self):
return self.connections.keys()
def get_id(self):
return self.id
def get_weight(self, nbr):
return self.connections[nbr]
def __str__(self):
connections = str([x.id for x in self.connections])
return f'{str(self.id)} connected to: {connections}'
class Graph:
def __init__(self):
self.vertices = {}
self.num_vertices = 0
def add_vertex(self, key):
new_vertex = Vertex(key)
self.num_vertices += 1
self.vertices[key] = new_vertex
return new_vertex
def get_vertex(self, key):
if key in self.vertices:
return self.vertices[key]
else:
return None
def add_edge(self, origin, dest, weight=0):
if origin not in self.vertices:
self.add_vertex(origin)
if dest not in self.vertices:
self.add_vertex(dest)
self.vertices[origin].add_neighbor(self.vertices[dest], weight)
def get_vertices(self):
return self.vertices.keys()
def __iter__(self):
return iter(self.vertices.values())
def __contains__(self, n):
return n in self.vertices
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Vertex():
def __init__(self, key):
self.id = key
self.connections = {}
def add_neighbor(self, nbr, weight=0):
self.connections[nbr] = weight
def get_connections(self):
return self.connections.keys()
def get_id(self):
return self.id
def get_weight(self, nbr):
return self.connections[nbr]
def __str__(self):
connections = str([x.id for x in self.connections])
return f'{str(self.id)} connected to: {connections}'
class Graph():
def __init__(self):
self.vertices = {}
self.num_vertices = 0
def add_vertex(self, key):
new_vertex = Vertex(key)
self.num_vertices += 1
self.vertices[key] = new_vertex
return new_vertex
def get_vertex(self, key):
if key in self.vertices:
return self.vertices[key]
else:
return None
def add_edge(self, origin, dest, weight=0):
if origin not in self.vertices:
self.add_vertex(origin)
if dest not in self.vertices:
self.add_vertex(dest)
self.vertices[origin].add_neighbor(self.vertices[dest], weight)
def get_vertices(self):
return self.vertices.keys()
def __iter__(self):
return iter(self.vertices.values())
def __contains__(self, n):
return n in self.vertices
if __name__ == '__main__':
g = Graph()
for i in range(6):
g.add_vertex(i)
print(g.vertices)
g.add_edge(0, 1, 2)
for vertex in g:
print(vertex)
print(vertex.get_connections)
print('---------------------')
|
flexible
|
{
"blob_id": "3af78dcc0bb0b6f253af01d2945ad6ada02ca7a0",
"index": 7270,
"step-1": "class Vertex:\n <mask token>\n <mask token>\n\n def get_connections(self):\n return self.connections.keys()\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Graph:\n\n def __init__(self):\n self.vertices = {}\n self.num_vertices = 0\n\n def add_vertex(self, key):\n new_vertex = Vertex(key)\n self.num_vertices += 1\n self.vertices[key] = new_vertex\n return new_vertex\n\n def get_vertex(self, key):\n if key in self.vertices:\n return self.vertices[key]\n else:\n return None\n\n def add_edge(self, origin, dest, weight=0):\n if origin not in self.vertices:\n self.add_vertex(origin)\n if dest not in self.vertices:\n self.add_vertex(dest)\n self.vertices[origin].add_neighbor(self.vertices[dest], weight)\n\n def get_vertices(self):\n return self.vertices.keys()\n\n def __iter__(self):\n return iter(self.vertices.values())\n\n def __contains__(self, n):\n return n in self.vertices\n\n\n<mask token>\n",
"step-2": "class Vertex:\n\n def __init__(self, key):\n self.id = key\n self.connections = {}\n\n def add_neighbor(self, nbr, weight=0):\n self.connections[nbr] = weight\n\n def get_connections(self):\n return self.connections.keys()\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Graph:\n\n def __init__(self):\n self.vertices = {}\n self.num_vertices = 0\n\n def add_vertex(self, key):\n new_vertex = Vertex(key)\n self.num_vertices += 1\n self.vertices[key] = new_vertex\n return new_vertex\n\n def get_vertex(self, key):\n if key in self.vertices:\n return self.vertices[key]\n else:\n return None\n\n def add_edge(self, origin, dest, weight=0):\n if origin not in self.vertices:\n self.add_vertex(origin)\n if dest not in self.vertices:\n self.add_vertex(dest)\n self.vertices[origin].add_neighbor(self.vertices[dest], weight)\n\n def get_vertices(self):\n return self.vertices.keys()\n\n def __iter__(self):\n return iter(self.vertices.values())\n\n def __contains__(self, n):\n return n in self.vertices\n\n\n<mask token>\n",
"step-3": "class Vertex:\n\n def __init__(self, key):\n self.id = key\n self.connections = {}\n\n def add_neighbor(self, nbr, weight=0):\n self.connections[nbr] = weight\n\n def get_connections(self):\n return self.connections.keys()\n <mask token>\n <mask token>\n\n def __str__(self):\n connections = str([x.id for x in self.connections])\n return f'{str(self.id)} connected to: {connections}'\n\n\nclass Graph:\n\n def __init__(self):\n self.vertices = {}\n self.num_vertices = 0\n\n def add_vertex(self, key):\n new_vertex = Vertex(key)\n self.num_vertices += 1\n self.vertices[key] = new_vertex\n return new_vertex\n\n def get_vertex(self, key):\n if key in self.vertices:\n return self.vertices[key]\n else:\n return None\n\n def add_edge(self, origin, dest, weight=0):\n if origin not in self.vertices:\n self.add_vertex(origin)\n if dest not in self.vertices:\n self.add_vertex(dest)\n self.vertices[origin].add_neighbor(self.vertices[dest], weight)\n\n def get_vertices(self):\n return self.vertices.keys()\n\n def __iter__(self):\n return iter(self.vertices.values())\n\n def __contains__(self, n):\n return n in self.vertices\n\n\n<mask token>\n",
"step-4": "class Vertex:\n\n def __init__(self, key):\n self.id = key\n self.connections = {}\n\n def add_neighbor(self, nbr, weight=0):\n self.connections[nbr] = weight\n\n def get_connections(self):\n return self.connections.keys()\n\n def get_id(self):\n return self.id\n\n def get_weight(self, nbr):\n return self.connections[nbr]\n\n def __str__(self):\n connections = str([x.id for x in self.connections])\n return f'{str(self.id)} connected to: {connections}'\n\n\nclass Graph:\n\n def __init__(self):\n self.vertices = {}\n self.num_vertices = 0\n\n def add_vertex(self, key):\n new_vertex = Vertex(key)\n self.num_vertices += 1\n self.vertices[key] = new_vertex\n return new_vertex\n\n def get_vertex(self, key):\n if key in self.vertices:\n return self.vertices[key]\n else:\n return None\n\n def add_edge(self, origin, dest, weight=0):\n if origin not in self.vertices:\n self.add_vertex(origin)\n if dest not in self.vertices:\n self.add_vertex(dest)\n self.vertices[origin].add_neighbor(self.vertices[dest], weight)\n\n def get_vertices(self):\n return self.vertices.keys()\n\n def __iter__(self):\n return iter(self.vertices.values())\n\n def __contains__(self, n):\n return n in self.vertices\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nclass Vertex():\n\n def __init__(self, key):\n self.id = key\n self.connections = {}\n\n def add_neighbor(self, nbr, weight=0):\n self.connections[nbr] = weight\n\n def get_connections(self):\n return self.connections.keys()\n\n def get_id(self):\n return self.id\n\n def get_weight(self, nbr):\n return self.connections[nbr]\n\n def __str__(self):\n connections = str([x.id for x in self.connections])\n return f'{str(self.id)} connected to: {connections}'\n\n\nclass Graph():\n\n def __init__(self):\n self.vertices = {}\n self.num_vertices = 0\n\n def add_vertex(self, key):\n new_vertex = Vertex(key)\n self.num_vertices += 1\n self.vertices[key] = new_vertex\n return new_vertex\n\n def get_vertex(self, key):\n if key in self.vertices:\n return self.vertices[key]\n else:\n return None\n\n def add_edge(self, origin, dest, weight=0):\n if origin not in self.vertices:\n self.add_vertex(origin)\n if dest not in self.vertices:\n self.add_vertex(dest)\n\n self.vertices[origin].add_neighbor(self.vertices[dest], weight)\n\n def get_vertices(self):\n return self.vertices.keys()\n\n def __iter__(self):\n return iter(self.vertices.values())\n\n def __contains__(self, n):\n return n in self.vertices\n\n\nif __name__ == '__main__':\n g = Graph()\n for i in range(6):\n g.add_vertex(i)\n print(g.vertices)\n g.add_edge(0, 1, 2)\n for vertex in g:\n print(vertex)\n print(vertex.get_connections)\n print('---------------------')\n",
"step-ids": [
10,
12,
13,
15,
17
]
}
|
[
10,
12,
13,
15,
17
] |
import sys
from pypregel import Pypregel
from pypregel.vertex import Vertex, Edge
from pypregel.reader import Reader
from pypregel.writer import Writer
from pypregel.combiner import Combiner
class PageRankVertex(Vertex):
def compute(self):
if self.superstep() >= 1:
s = 0
while self.has_message():
msg = self.get_message()
s += msg
self.set_value(0.15 / self.get_num_of_vertices() + 0.85 * s)
if self.superstep() < 30:
n = len(self.get_out_edges())
if n > 0:
self.send_message_to_all_neighbors(self.get_value() / n)
else:
self.vote_to_halt()
class PageRankReader(Reader):
def read_num_of_vertices(self):
line = self.config_fp.readline()
return int(line)
def read_vertex(self):
line = self.graph_fp.readline()
if not line:
return None
line = line.strip().split(':')
vertex_id = int(line[0])
edges = []
if line[1]:
for e in line[1].split(' '):
edges.append(Edge(int(e), None))
return PageRankVertex(vertex_id, None, edges)
class PageRankWriter(Writer):
def write_vertex(self, vertex):
return vertex.get_vertex_id(), str(vertex.get_value())
class PageRankCombiner(Combiner):
def combine(self, msg_x, msg_y):
msg_x_value = msg_x[1]
msg_y_value = msg_y[1]
return None, msg_x_value + msg_y_value
def main():
if len(sys.argv) < 4:
print("usage: python %s [config] [graph] [out_file]" % sys.argv[0])
return
pagerank_reader = PageRankReader(sys.argv[1], sys.argv[2])
pagerank_writer = PageRankWriter(sys.argv[3])
pagerank_combiner = PageRankCombiner()
pagerank = Pypregel(
reader=pagerank_reader,
writer=pagerank_writer,
combiner=pagerank_combiner
)
pagerank.run()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "6db7189d26c63ca9f9667045b780ec11994bac28",
"index": 788,
"step-1": "<mask token>\n\n\nclass PageRankReader(Reader):\n\n def read_num_of_vertices(self):\n line = self.config_fp.readline()\n return int(line)\n\n def read_vertex(self):\n line = self.graph_fp.readline()\n if not line:\n return None\n line = line.strip().split(':')\n vertex_id = int(line[0])\n edges = []\n if line[1]:\n for e in line[1].split(' '):\n edges.append(Edge(int(e), None))\n return PageRankVertex(vertex_id, None, edges)\n\n\nclass PageRankWriter(Writer):\n\n def write_vertex(self, vertex):\n return vertex.get_vertex_id(), str(vertex.get_value())\n\n\nclass PageRankCombiner(Combiner):\n\n def combine(self, msg_x, msg_y):\n msg_x_value = msg_x[1]\n msg_y_value = msg_y[1]\n return None, msg_x_value + msg_y_value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PageRankVertex(Vertex):\n\n def compute(self):\n if self.superstep() >= 1:\n s = 0\n while self.has_message():\n msg = self.get_message()\n s += msg\n self.set_value(0.15 / self.get_num_of_vertices() + 0.85 * s)\n if self.superstep() < 30:\n n = len(self.get_out_edges())\n if n > 0:\n self.send_message_to_all_neighbors(self.get_value() / n)\n else:\n self.vote_to_halt()\n\n\nclass PageRankReader(Reader):\n\n def read_num_of_vertices(self):\n line = self.config_fp.readline()\n return int(line)\n\n def read_vertex(self):\n line = self.graph_fp.readline()\n if not line:\n return None\n line = line.strip().split(':')\n vertex_id = int(line[0])\n edges = []\n if line[1]:\n for e in line[1].split(' '):\n edges.append(Edge(int(e), None))\n return PageRankVertex(vertex_id, None, edges)\n\n\nclass PageRankWriter(Writer):\n\n def write_vertex(self, vertex):\n return vertex.get_vertex_id(), str(vertex.get_value())\n\n\nclass PageRankCombiner(Combiner):\n\n def combine(self, msg_x, msg_y):\n msg_x_value = msg_x[1]\n msg_y_value = msg_y[1]\n return None, msg_x_value + msg_y_value\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PageRankVertex(Vertex):\n\n def compute(self):\n if self.superstep() >= 1:\n s = 0\n while self.has_message():\n msg = self.get_message()\n s += msg\n self.set_value(0.15 / self.get_num_of_vertices() + 0.85 * s)\n if self.superstep() < 30:\n n = len(self.get_out_edges())\n if n > 0:\n self.send_message_to_all_neighbors(self.get_value() / n)\n else:\n self.vote_to_halt()\n\n\nclass PageRankReader(Reader):\n\n def read_num_of_vertices(self):\n line = self.config_fp.readline()\n return int(line)\n\n def read_vertex(self):\n line = self.graph_fp.readline()\n if not line:\n return None\n line = line.strip().split(':')\n vertex_id = int(line[0])\n edges = []\n if line[1]:\n for e in line[1].split(' '):\n edges.append(Edge(int(e), None))\n return PageRankVertex(vertex_id, None, edges)\n\n\nclass PageRankWriter(Writer):\n\n def write_vertex(self, vertex):\n return vertex.get_vertex_id(), str(vertex.get_value())\n\n\nclass PageRankCombiner(Combiner):\n\n def combine(self, msg_x, msg_y):\n msg_x_value = msg_x[1]\n msg_y_value = msg_y[1]\n return None, msg_x_value + msg_y_value\n\n\ndef main():\n if len(sys.argv) < 4:\n print('usage: python %s [config] [graph] [out_file]' % sys.argv[0])\n return\n pagerank_reader = PageRankReader(sys.argv[1], sys.argv[2])\n pagerank_writer = PageRankWriter(sys.argv[3])\n pagerank_combiner = PageRankCombiner()\n pagerank = Pypregel(reader=pagerank_reader, writer=pagerank_writer,\n combiner=pagerank_combiner)\n pagerank.run()\n\n\n<mask token>\n",
"step-4": "import sys\nfrom pypregel import Pypregel\nfrom pypregel.vertex import Vertex, Edge\nfrom pypregel.reader import Reader\nfrom pypregel.writer import Writer\nfrom pypregel.combiner import Combiner\n\n\nclass PageRankVertex(Vertex):\n\n def compute(self):\n if self.superstep() >= 1:\n s = 0\n while self.has_message():\n msg = self.get_message()\n s += msg\n self.set_value(0.15 / self.get_num_of_vertices() + 0.85 * s)\n if self.superstep() < 30:\n n = len(self.get_out_edges())\n if n > 0:\n self.send_message_to_all_neighbors(self.get_value() / n)\n else:\n self.vote_to_halt()\n\n\nclass PageRankReader(Reader):\n\n def read_num_of_vertices(self):\n line = self.config_fp.readline()\n return int(line)\n\n def read_vertex(self):\n line = self.graph_fp.readline()\n if not line:\n return None\n line = line.strip().split(':')\n vertex_id = int(line[0])\n edges = []\n if line[1]:\n for e in line[1].split(' '):\n edges.append(Edge(int(e), None))\n return PageRankVertex(vertex_id, None, edges)\n\n\nclass PageRankWriter(Writer):\n\n def write_vertex(self, vertex):\n return vertex.get_vertex_id(), str(vertex.get_value())\n\n\nclass PageRankCombiner(Combiner):\n\n def combine(self, msg_x, msg_y):\n msg_x_value = msg_x[1]\n msg_y_value = msg_y[1]\n return None, msg_x_value + msg_y_value\n\n\ndef main():\n if len(sys.argv) < 4:\n print('usage: python %s [config] [graph] [out_file]' % sys.argv[0])\n return\n pagerank_reader = PageRankReader(sys.argv[1], sys.argv[2])\n pagerank_writer = PageRankWriter(sys.argv[3])\n pagerank_combiner = PageRankCombiner()\n pagerank = Pypregel(reader=pagerank_reader, writer=pagerank_writer,\n combiner=pagerank_combiner)\n pagerank.run()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import sys\n\nfrom pypregel import Pypregel\nfrom pypregel.vertex import Vertex, Edge\nfrom pypregel.reader import Reader\nfrom pypregel.writer import Writer\nfrom pypregel.combiner import Combiner\n\n\nclass PageRankVertex(Vertex):\n def compute(self):\n if self.superstep() >= 1:\n s = 0\n while self.has_message():\n msg = self.get_message()\n s += msg\n\n self.set_value(0.15 / self.get_num_of_vertices() + 0.85 * s)\n\n if self.superstep() < 30:\n n = len(self.get_out_edges())\n if n > 0:\n self.send_message_to_all_neighbors(self.get_value() / n)\n else:\n self.vote_to_halt()\n\n\nclass PageRankReader(Reader):\n def read_num_of_vertices(self):\n line = self.config_fp.readline()\n return int(line)\n\n def read_vertex(self):\n line = self.graph_fp.readline()\n if not line:\n return None\n\n line = line.strip().split(':')\n vertex_id = int(line[0])\n\n edges = []\n if line[1]:\n for e in line[1].split(' '):\n edges.append(Edge(int(e), None))\n\n return PageRankVertex(vertex_id, None, edges)\n\n\nclass PageRankWriter(Writer):\n def write_vertex(self, vertex):\n return vertex.get_vertex_id(), str(vertex.get_value())\n\n\nclass PageRankCombiner(Combiner):\n def combine(self, msg_x, msg_y):\n msg_x_value = msg_x[1]\n msg_y_value = msg_y[1]\n return None, msg_x_value + msg_y_value\n\n\ndef main():\n if len(sys.argv) < 4:\n print(\"usage: python %s [config] [graph] [out_file]\" % sys.argv[0])\n return\n\n pagerank_reader = PageRankReader(sys.argv[1], sys.argv[2])\n pagerank_writer = PageRankWriter(sys.argv[3])\n pagerank_combiner = PageRankCombiner()\n pagerank = Pypregel(\n reader=pagerank_reader,\n writer=pagerank_writer,\n combiner=pagerank_combiner\n )\n\n pagerank.run()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
7,
9,
10,
12,
13
]
}
|
[
7,
9,
10,
12,
13
] |
import math
print(dir(math))
# Prints a list of entities residing in the math module
|
normal
|
{
"blob_id": "94056e8920d265831da67bd1d999330a47a7ef0d",
"index": 1991,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(dir(math))\n",
"step-3": "import math\nprint(dir(math))\n",
"step-4": "import math\nprint(dir(math))\n\n# Prints a list of entities residing in the math module",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TestCreateSummaryReport(unittest.TestCase):
def setUp(self):
redi.configure_logging(DEFAULT_DATA_DIRECTORY)
self.test_report_params = {'project': 'hcvtarget-uf',
'report_file_path': proj_root + 'config/report.xml',
'redcap_uri': 'https://hostname.org'}
self.test_report_data = {'total_subjects': 5, 'form_details': {
'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},
'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1
}, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {
'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,
'chemistry_Forms': 16}}, 'errors': []}
self.specimen_taken_time_summary = {'total': 15, 'blank': 3}
self.test_alert_summary = {'multiple_values_alert': [
'This is multiple values alert 1',
'This is multiple values alert 2',
'This is multiple values alert 3'], 'max_event_alert': [
'This is max event alert 1', 'This is max event alert 2',
'This is max event alert 3']}
self.expected_xml = """
<report>
<header>
<project>hcvtarget-uf</project>
<date>""" + time.strftime('%m/%d/%Y') + """</date>
<redcapServerAddress>https://hostname.org</redcapServerAddress>
</header>
<summary>
<subjectCount>5</subjectCount>
<forms>
<form>
<form_name>Total_cbc_Forms</form_name>
<form_count>53</form_count>
</form>
<form>
<form_name>Total_chemistry_Forms</form_name>
<form_count>22</form_count>
</form>
</forms>
</summary>
<alerts>
<tooManyForms>
<eventAlert>
<message>This is max event alert 1</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 2</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 3</message>
</eventAlert>
</tooManyForms>
<tooManyValues>
<valuesAlert>
<message>This is multiple values alert 1</message>
</valuesAlert>
<valuesAlert>
<message>This is multiple values alert 2</message>
</valuesAlert>
<valuesAlert><message>This is multiple values alert 3</message>
</valuesAlert></tooManyValues>
</alerts>
<subjectsDetails>
<Subject><ID>59</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>39</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>16</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>60</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>1</form_count></form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject><ID>61</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>2</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>63</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>11</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>4</form_count>
</form>
</forms>
</Subject>
</subjectsDetails>
<errors/>
<summaryOfSpecimenTakenTimes>
<total>15</total>
<blank>3</blank>
<percent>20.0</percent>
</summaryOfSpecimenTakenTimes>
</report>"""
self.schema_str = StringIO(
""" <xs:schema attributeFormDefault="unqualified" elementFormDefault="qualified" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="report">
<xs:complexType>
<xs:sequence>
<xs:element name="header">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="project"/>
<xs:element type="xs:string" name="date"/>
<xs:element type="xs:string" name="redcapServerAddress"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="summary">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="subjectCount"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="alerts">
<xs:complexType>
<xs:sequence>
<xs:element name="tooManyForms">
<xs:complexType>
<xs:sequence>
<xs:element name="eventAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="tooManyValues">
<xs:complexType>
<xs:sequence>
<xs:element name="valuesAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="subjectsDetails">
<xs:complexType>
<xs:sequence>
<xs:element name="Subject" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="ID"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="errors">
</xs:element>
<xs:element name="summaryOfSpecimenTakenTimes">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="total"/>
<xs:element type="xs:byte" name="blank"/>
<xs:element type="xs:float" name="percent"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>"""
)
return
def test_create_summary_report(self):
sys.path.append('config')
self.newpath = proj_root + 'config'
self.configFolderCreatedNow = False
if not os.path.exists(self.newpath):
self.configFolderCreatedNow = True
os.makedirs(self.newpath)
result = redi.create_summary_report(self.test_report_params, self.
test_report_data, self.test_alert_summary, self.
specimen_taken_time_summary)
result_string = etree.tostring(result)
xmlschema_doc = etree.parse(self.schema_str)
xml_schema = etree.XMLSchema(xmlschema_doc)
self.assertEqual(xml_schema.validate(result), True)
parser = etree.XMLParser(remove_blank_text=True)
clean_tree = etree.XML(self.expected_xml, parser=parser)
self.expected_xml = etree.tostring(clean_tree)
self.assertEqual(self.expected_xml, result_string)
def tearDown(self):
with open(proj_root + 'config/report.xml'):
os.remove(proj_root + 'config/report.xml')
if self.configFolderCreatedNow:
os.rmdir(self.newpath)
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestCreateSummaryReport(unittest.TestCase):
def setUp(self):
redi.configure_logging(DEFAULT_DATA_DIRECTORY)
self.test_report_params = {'project': 'hcvtarget-uf',
'report_file_path': proj_root + 'config/report.xml',
'redcap_uri': 'https://hostname.org'}
self.test_report_data = {'total_subjects': 5, 'form_details': {
'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},
'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1
}, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {
'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,
'chemistry_Forms': 16}}, 'errors': []}
self.specimen_taken_time_summary = {'total': 15, 'blank': 3}
self.test_alert_summary = {'multiple_values_alert': [
'This is multiple values alert 1',
'This is multiple values alert 2',
'This is multiple values alert 3'], 'max_event_alert': [
'This is max event alert 1', 'This is max event alert 2',
'This is max event alert 3']}
self.expected_xml = """
<report>
<header>
<project>hcvtarget-uf</project>
<date>""" + time.strftime('%m/%d/%Y') + """</date>
<redcapServerAddress>https://hostname.org</redcapServerAddress>
</header>
<summary>
<subjectCount>5</subjectCount>
<forms>
<form>
<form_name>Total_cbc_Forms</form_name>
<form_count>53</form_count>
</form>
<form>
<form_name>Total_chemistry_Forms</form_name>
<form_count>22</form_count>
</form>
</forms>
</summary>
<alerts>
<tooManyForms>
<eventAlert>
<message>This is max event alert 1</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 2</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 3</message>
</eventAlert>
</tooManyForms>
<tooManyValues>
<valuesAlert>
<message>This is multiple values alert 1</message>
</valuesAlert>
<valuesAlert>
<message>This is multiple values alert 2</message>
</valuesAlert>
<valuesAlert><message>This is multiple values alert 3</message>
</valuesAlert></tooManyValues>
</alerts>
<subjectsDetails>
<Subject><ID>59</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>39</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>16</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>60</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>1</form_count></form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject><ID>61</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>2</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>63</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>11</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>4</form_count>
</form>
</forms>
</Subject>
</subjectsDetails>
<errors/>
<summaryOfSpecimenTakenTimes>
<total>15</total>
<blank>3</blank>
<percent>20.0</percent>
</summaryOfSpecimenTakenTimes>
</report>"""
self.schema_str = StringIO(
""" <xs:schema attributeFormDefault="unqualified" elementFormDefault="qualified" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="report">
<xs:complexType>
<xs:sequence>
<xs:element name="header">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="project"/>
<xs:element type="xs:string" name="date"/>
<xs:element type="xs:string" name="redcapServerAddress"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="summary">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="subjectCount"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="alerts">
<xs:complexType>
<xs:sequence>
<xs:element name="tooManyForms">
<xs:complexType>
<xs:sequence>
<xs:element name="eventAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="tooManyValues">
<xs:complexType>
<xs:sequence>
<xs:element name="valuesAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="subjectsDetails">
<xs:complexType>
<xs:sequence>
<xs:element name="Subject" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="ID"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="errors">
</xs:element>
<xs:element name="summaryOfSpecimenTakenTimes">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="total"/>
<xs:element type="xs:byte" name="blank"/>
<xs:element type="xs:float" name="percent"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>"""
)
return
def test_create_summary_report(self):
sys.path.append('config')
self.newpath = proj_root + 'config'
self.configFolderCreatedNow = False
if not os.path.exists(self.newpath):
self.configFolderCreatedNow = True
os.makedirs(self.newpath)
result = redi.create_summary_report(self.test_report_params, self.
test_report_data, self.test_alert_summary, self.
specimen_taken_time_summary)
result_string = etree.tostring(result)
xmlschema_doc = etree.parse(self.schema_str)
xml_schema = etree.XMLSchema(xmlschema_doc)
self.assertEqual(xml_schema.validate(result), True)
parser = etree.XMLParser(remove_blank_text=True)
clean_tree = etree.XML(self.expected_xml, parser=parser)
self.expected_xml = etree.tostring(clean_tree)
self.assertEqual(self.expected_xml, result_string)
def tearDown(self):
with open(proj_root + 'config/report.xml'):
os.remove(proj_root + 'config/report.xml')
if self.configFolderCreatedNow:
os.rmdir(self.newpath)
return
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, '../')
proj_root = os.path.abspath(goal_dir) + '/'
DEFAULT_DATA_DIRECTORY = os.getcwd()
class TestCreateSummaryReport(unittest.TestCase):
def setUp(self):
redi.configure_logging(DEFAULT_DATA_DIRECTORY)
self.test_report_params = {'project': 'hcvtarget-uf',
'report_file_path': proj_root + 'config/report.xml',
'redcap_uri': 'https://hostname.org'}
self.test_report_data = {'total_subjects': 5, 'form_details': {
'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},
'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1
}, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {
'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,
'chemistry_Forms': 16}}, 'errors': []}
self.specimen_taken_time_summary = {'total': 15, 'blank': 3}
self.test_alert_summary = {'multiple_values_alert': [
'This is multiple values alert 1',
'This is multiple values alert 2',
'This is multiple values alert 3'], 'max_event_alert': [
'This is max event alert 1', 'This is max event alert 2',
'This is max event alert 3']}
self.expected_xml = """
<report>
<header>
<project>hcvtarget-uf</project>
<date>""" + time.strftime('%m/%d/%Y') + """</date>
<redcapServerAddress>https://hostname.org</redcapServerAddress>
</header>
<summary>
<subjectCount>5</subjectCount>
<forms>
<form>
<form_name>Total_cbc_Forms</form_name>
<form_count>53</form_count>
</form>
<form>
<form_name>Total_chemistry_Forms</form_name>
<form_count>22</form_count>
</form>
</forms>
</summary>
<alerts>
<tooManyForms>
<eventAlert>
<message>This is max event alert 1</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 2</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 3</message>
</eventAlert>
</tooManyForms>
<tooManyValues>
<valuesAlert>
<message>This is multiple values alert 1</message>
</valuesAlert>
<valuesAlert>
<message>This is multiple values alert 2</message>
</valuesAlert>
<valuesAlert><message>This is multiple values alert 3</message>
</valuesAlert></tooManyValues>
</alerts>
<subjectsDetails>
<Subject><ID>59</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>39</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>16</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>60</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>1</form_count></form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject><ID>61</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>2</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>63</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>11</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>4</form_count>
</form>
</forms>
</Subject>
</subjectsDetails>
<errors/>
<summaryOfSpecimenTakenTimes>
<total>15</total>
<blank>3</blank>
<percent>20.0</percent>
</summaryOfSpecimenTakenTimes>
</report>"""
self.schema_str = StringIO(
""" <xs:schema attributeFormDefault="unqualified" elementFormDefault="qualified" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="report">
<xs:complexType>
<xs:sequence>
<xs:element name="header">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="project"/>
<xs:element type="xs:string" name="date"/>
<xs:element type="xs:string" name="redcapServerAddress"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="summary">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="subjectCount"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="alerts">
<xs:complexType>
<xs:sequence>
<xs:element name="tooManyForms">
<xs:complexType>
<xs:sequence>
<xs:element name="eventAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="tooManyValues">
<xs:complexType>
<xs:sequence>
<xs:element name="valuesAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="subjectsDetails">
<xs:complexType>
<xs:sequence>
<xs:element name="Subject" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="ID"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="errors">
</xs:element>
<xs:element name="summaryOfSpecimenTakenTimes">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="total"/>
<xs:element type="xs:byte" name="blank"/>
<xs:element type="xs:float" name="percent"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>"""
)
return
def test_create_summary_report(self):
sys.path.append('config')
self.newpath = proj_root + 'config'
self.configFolderCreatedNow = False
if not os.path.exists(self.newpath):
self.configFolderCreatedNow = True
os.makedirs(self.newpath)
result = redi.create_summary_report(self.test_report_params, self.
test_report_data, self.test_alert_summary, self.
specimen_taken_time_summary)
result_string = etree.tostring(result)
xmlschema_doc = etree.parse(self.schema_str)
xml_schema = etree.XMLSchema(xmlschema_doc)
self.assertEqual(xml_schema.validate(result), True)
parser = etree.XMLParser(remove_blank_text=True)
clean_tree = etree.XML(self.expected_xml, parser=parser)
self.expected_xml = etree.tostring(clean_tree)
self.assertEqual(self.expected_xml, result_string)
def tearDown(self):
with open(proj_root + 'config/report.xml'):
os.remove(proj_root + 'config/report.xml')
if self.configFolderCreatedNow:
os.rmdir(self.newpath)
return
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import unittest
import os
import sys
from lxml import etree
from StringIO import StringIO
import time
import redi
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, '../')
proj_root = os.path.abspath(goal_dir) + '/'
DEFAULT_DATA_DIRECTORY = os.getcwd()
class TestCreateSummaryReport(unittest.TestCase):
def setUp(self):
redi.configure_logging(DEFAULT_DATA_DIRECTORY)
self.test_report_params = {'project': 'hcvtarget-uf',
'report_file_path': proj_root + 'config/report.xml',
'redcap_uri': 'https://hostname.org'}
self.test_report_data = {'total_subjects': 5, 'form_details': {
'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},
'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1
}, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {
'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,
'chemistry_Forms': 16}}, 'errors': []}
self.specimen_taken_time_summary = {'total': 15, 'blank': 3}
self.test_alert_summary = {'multiple_values_alert': [
'This is multiple values alert 1',
'This is multiple values alert 2',
'This is multiple values alert 3'], 'max_event_alert': [
'This is max event alert 1', 'This is max event alert 2',
'This is max event alert 3']}
self.expected_xml = """
<report>
<header>
<project>hcvtarget-uf</project>
<date>""" + time.strftime('%m/%d/%Y') + """</date>
<redcapServerAddress>https://hostname.org</redcapServerAddress>
</header>
<summary>
<subjectCount>5</subjectCount>
<forms>
<form>
<form_name>Total_cbc_Forms</form_name>
<form_count>53</form_count>
</form>
<form>
<form_name>Total_chemistry_Forms</form_name>
<form_count>22</form_count>
</form>
</forms>
</summary>
<alerts>
<tooManyForms>
<eventAlert>
<message>This is max event alert 1</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 2</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 3</message>
</eventAlert>
</tooManyForms>
<tooManyValues>
<valuesAlert>
<message>This is multiple values alert 1</message>
</valuesAlert>
<valuesAlert>
<message>This is multiple values alert 2</message>
</valuesAlert>
<valuesAlert><message>This is multiple values alert 3</message>
</valuesAlert></tooManyValues>
</alerts>
<subjectsDetails>
<Subject><ID>59</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>39</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>16</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>60</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>1</form_count></form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject><ID>61</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>2</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>63</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>11</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>4</form_count>
</form>
</forms>
</Subject>
</subjectsDetails>
<errors/>
<summaryOfSpecimenTakenTimes>
<total>15</total>
<blank>3</blank>
<percent>20.0</percent>
</summaryOfSpecimenTakenTimes>
</report>"""
self.schema_str = StringIO(
""" <xs:schema attributeFormDefault="unqualified" elementFormDefault="qualified" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="report">
<xs:complexType>
<xs:sequence>
<xs:element name="header">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="project"/>
<xs:element type="xs:string" name="date"/>
<xs:element type="xs:string" name="redcapServerAddress"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="summary">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="subjectCount"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="alerts">
<xs:complexType>
<xs:sequence>
<xs:element name="tooManyForms">
<xs:complexType>
<xs:sequence>
<xs:element name="eventAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="tooManyValues">
<xs:complexType>
<xs:sequence>
<xs:element name="valuesAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="subjectsDetails">
<xs:complexType>
<xs:sequence>
<xs:element name="Subject" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="ID"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="errors">
</xs:element>
<xs:element name="summaryOfSpecimenTakenTimes">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="total"/>
<xs:element type="xs:byte" name="blank"/>
<xs:element type="xs:float" name="percent"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>"""
)
return
def test_create_summary_report(self):
sys.path.append('config')
self.newpath = proj_root + 'config'
self.configFolderCreatedNow = False
if not os.path.exists(self.newpath):
self.configFolderCreatedNow = True
os.makedirs(self.newpath)
result = redi.create_summary_report(self.test_report_params, self.
test_report_data, self.test_alert_summary, self.
specimen_taken_time_summary)
result_string = etree.tostring(result)
xmlschema_doc = etree.parse(self.schema_str)
xml_schema = etree.XMLSchema(xmlschema_doc)
self.assertEqual(xml_schema.validate(result), True)
parser = etree.XMLParser(remove_blank_text=True)
clean_tree = etree.XML(self.expected_xml, parser=parser)
self.expected_xml = etree.tostring(clean_tree)
self.assertEqual(self.expected_xml, result_string)
def tearDown(self):
with open(proj_root + 'config/report.xml'):
os.remove(proj_root + 'config/report.xml')
if self.configFolderCreatedNow:
os.rmdir(self.newpath)
return
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
'''
Unit test for `redi.create_summary_report()`
'''
import unittest
import os
import sys
from lxml import etree
from StringIO import StringIO
import time
import redi
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, "../")
proj_root = os.path.abspath(goal_dir)+'/'
DEFAULT_DATA_DIRECTORY = os.getcwd()
class TestCreateSummaryReport(unittest.TestCase):
def setUp(self):
redi.configure_logging(DEFAULT_DATA_DIRECTORY)
self.test_report_params = {
'project': 'hcvtarget-uf',
'report_file_path': proj_root + 'config/report.xml',
'redcap_uri': 'https://hostname.org'}
self.test_report_data = {
'total_subjects': 5,
'form_details': {
'Total_chemistry_Forms': 22,
'Total_cbc_Forms': 53
},
'subject_details': {
'60': {'cbc_Forms': 1, 'chemistry_Forms': 1},
'61': {'cbc_Forms': 2, 'chemistry_Forms': 1},
'63': {'cbc_Forms': 11, 'chemistry_Forms': 4},
'59': {'cbc_Forms': 39, 'chemistry_Forms': 16}
},
'errors' : [],
}
self.specimen_taken_time_summary = {'total': 15, 'blank': 3}
self.test_alert_summary = {
'multiple_values_alert': [
'This is multiple values alert 1',
'This is multiple values alert 2',
'This is multiple values alert 3'],
'max_event_alert': [
'This is max event alert 1',
'This is max event alert 2',
'This is max event alert 3']
}
self.expected_xml = '''
<report>
<header>
<project>hcvtarget-uf</project>
<date>'''+time.strftime("%m/%d/%Y")+'''</date>
<redcapServerAddress>https://hostname.org</redcapServerAddress>
</header>
<summary>
<subjectCount>5</subjectCount>
<forms>
<form>
<form_name>Total_cbc_Forms</form_name>
<form_count>53</form_count>
</form>
<form>
<form_name>Total_chemistry_Forms</form_name>
<form_count>22</form_count>
</form>
</forms>
</summary>
<alerts>
<tooManyForms>
<eventAlert>
<message>This is max event alert 1</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 2</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 3</message>
</eventAlert>
</tooManyForms>
<tooManyValues>
<valuesAlert>
<message>This is multiple values alert 1</message>
</valuesAlert>
<valuesAlert>
<message>This is multiple values alert 2</message>
</valuesAlert>
<valuesAlert><message>This is multiple values alert 3</message>
</valuesAlert></tooManyValues>
</alerts>
<subjectsDetails>
<Subject><ID>59</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>39</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>16</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>60</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>1</form_count></form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject><ID>61</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>2</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>63</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>11</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>4</form_count>
</form>
</forms>
</Subject>
</subjectsDetails>
<errors/>
<summaryOfSpecimenTakenTimes>
<total>15</total>
<blank>3</blank>
<percent>20.0</percent>
</summaryOfSpecimenTakenTimes>
</report>'''
self.schema_str = StringIO('''\
<xs:schema attributeFormDefault="unqualified" elementFormDefault="qualified" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="report">
<xs:complexType>
<xs:sequence>
<xs:element name="header">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="project"/>
<xs:element type="xs:string" name="date"/>
<xs:element type="xs:string" name="redcapServerAddress"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="summary">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="subjectCount"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="alerts">
<xs:complexType>
<xs:sequence>
<xs:element name="tooManyForms">
<xs:complexType>
<xs:sequence>
<xs:element name="eventAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="tooManyValues">
<xs:complexType>
<xs:sequence>
<xs:element name="valuesAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="subjectsDetails">
<xs:complexType>
<xs:sequence>
<xs:element name="Subject" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="ID"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="errors">
</xs:element>
<xs:element name="summaryOfSpecimenTakenTimes">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="total"/>
<xs:element type="xs:byte" name="blank"/>
<xs:element type="xs:float" name="percent"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>''')
return
def test_create_summary_report(self):
sys.path.append('config')
self.newpath = proj_root+'config'
self.configFolderCreatedNow = False
if not os.path.exists(self.newpath):
self.configFolderCreatedNow = True
os.makedirs(self.newpath)
result = redi.create_summary_report(\
self.test_report_params, \
self.test_report_data, \
self.test_alert_summary, \
self.specimen_taken_time_summary)
result_string = etree.tostring(result)
#print result_string
xmlschema_doc = etree.parse(self.schema_str)
xml_schema = etree.XMLSchema(xmlschema_doc)
# validate the xml against the xsd schema
self.assertEqual(xml_schema.validate(result), True)
# validate the actual data in xml but strip the white space first
parser = etree.XMLParser(remove_blank_text=True)
clean_tree = etree.XML(self.expected_xml, parser=parser)
self.expected_xml = etree.tostring(clean_tree)
self.assertEqual(self.expected_xml, result_string)
def tearDown(self):
# delete the created xml file
with open(proj_root + 'config/report.xml'):
os.remove(proj_root + 'config/report.xml')
if self.configFolderCreatedNow:
os.rmdir(self.newpath)
return
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "f9dd21aac7915b9bbf91eeffb5fd58ffdb43c6c3",
"index": 5857,
"step-1": "<mask token>\n\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n self.test_report_data = {'total_subjects': 5, 'form_details': {\n 'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},\n 'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1\n }, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {\n 'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,\n 'chemistry_Forms': 16}}, 'errors': []}\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'], 'max_event_alert': [\n 'This is max event alert 1', 'This is max event alert 2',\n 'This is max event alert 3']}\n self.expected_xml = \"\"\"\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>\"\"\" + time.strftime('%m/%d/%Y') + \"\"\"</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>\"\"\"\n self.schema_str = StringIO(\n \"\"\" <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>\"\"\"\n )\n return\n\n def test_create_summary_report(self):\n sys.path.append('config')\n self.newpath = proj_root + 'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n result = redi.create_summary_report(self.test_report_params, self.\n test_report_data, self.test_alert_summary, self.\n specimen_taken_time_summary)\n result_string = etree.tostring(result)\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n self.assertEqual(xml_schema.validate(result), True)\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n self.test_report_data = {'total_subjects': 5, 'form_details': {\n 'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},\n 'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1\n }, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {\n 'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,\n 'chemistry_Forms': 16}}, 'errors': []}\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'], 'max_event_alert': [\n 'This is max event alert 1', 'This is max event alert 2',\n 'This is max event alert 3']}\n self.expected_xml = \"\"\"\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>\"\"\" + time.strftime('%m/%d/%Y') + \"\"\"</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>\"\"\"\n self.schema_str = StringIO(\n \"\"\" <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>\"\"\"\n )\n return\n\n def test_create_summary_report(self):\n sys.path.append('config')\n self.newpath = proj_root + 'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n result = redi.create_summary_report(self.test_report_params, self.\n test_report_data, self.test_alert_summary, self.\n specimen_taken_time_summary)\n result_string = etree.tostring(result)\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n self.assertEqual(xml_schema.validate(result), True)\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-3": "<mask token>\nfile_dir = os.path.dirname(os.path.realpath(__file__))\ngoal_dir = os.path.join(file_dir, '../')\nproj_root = os.path.abspath(goal_dir) + '/'\nDEFAULT_DATA_DIRECTORY = os.getcwd()\n\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n self.test_report_data = {'total_subjects': 5, 'form_details': {\n 'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},\n 'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1\n }, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {\n 'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,\n 'chemistry_Forms': 16}}, 'errors': []}\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'], 'max_event_alert': [\n 'This is max event alert 1', 'This is max event alert 2',\n 'This is max event alert 3']}\n self.expected_xml = \"\"\"\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>\"\"\" + time.strftime('%m/%d/%Y') + \"\"\"</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>\"\"\"\n self.schema_str = StringIO(\n \"\"\" <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>\"\"\"\n )\n return\n\n def test_create_summary_report(self):\n sys.path.append('config')\n self.newpath = proj_root + 'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n result = redi.create_summary_report(self.test_report_params, self.\n test_report_data, self.test_alert_summary, self.\n specimen_taken_time_summary)\n result_string = etree.tostring(result)\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n self.assertEqual(xml_schema.validate(result), True)\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport unittest\nimport os\nimport sys\nfrom lxml import etree\nfrom StringIO import StringIO\nimport time\nimport redi\nfile_dir = os.path.dirname(os.path.realpath(__file__))\ngoal_dir = os.path.join(file_dir, '../')\nproj_root = os.path.abspath(goal_dir) + '/'\nDEFAULT_DATA_DIRECTORY = os.getcwd()\n\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n self.test_report_data = {'total_subjects': 5, 'form_details': {\n 'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},\n 'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1\n }, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {\n 'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,\n 'chemistry_Forms': 16}}, 'errors': []}\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'], 'max_event_alert': [\n 'This is max event alert 1', 'This is max event alert 2',\n 'This is max event alert 3']}\n self.expected_xml = \"\"\"\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>\"\"\" + time.strftime('%m/%d/%Y') + \"\"\"</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>\"\"\"\n self.schema_str = StringIO(\n \"\"\" <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>\"\"\"\n )\n return\n\n def test_create_summary_report(self):\n sys.path.append('config')\n self.newpath = proj_root + 'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n result = redi.create_summary_report(self.test_report_params, self.\n test_report_data, self.test_alert_summary, self.\n specimen_taken_time_summary)\n result_string = etree.tostring(result)\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n self.assertEqual(xml_schema.validate(result), True)\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "'''\nUnit test for `redi.create_summary_report()`\n'''\nimport unittest\nimport os\nimport sys\nfrom lxml import etree\nfrom StringIO import StringIO\nimport time\nimport redi\n\nfile_dir = os.path.dirname(os.path.realpath(__file__))\ngoal_dir = os.path.join(file_dir, \"../\")\nproj_root = os.path.abspath(goal_dir)+'/'\n\nDEFAULT_DATA_DIRECTORY = os.getcwd()\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {\n 'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n\n self.test_report_data = {\n 'total_subjects': 5,\n 'form_details': {\n 'Total_chemistry_Forms': 22,\n 'Total_cbc_Forms': 53\n },\n 'subject_details': {\n '60': {'cbc_Forms': 1, 'chemistry_Forms': 1},\n '61': {'cbc_Forms': 2, 'chemistry_Forms': 1},\n '63': {'cbc_Forms': 11, 'chemistry_Forms': 4},\n '59': {'cbc_Forms': 39, 'chemistry_Forms': 16}\n },\n 'errors' : [],\n }\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {\n 'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'],\n 'max_event_alert': [\n 'This is max event alert 1',\n 'This is max event alert 2',\n 'This is max event alert 3']\n }\n self.expected_xml = '''\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>'''+time.strftime(\"%m/%d/%Y\")+'''</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>'''\n\n self.schema_str = StringIO('''\\\n <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>''')\n return\n\n def test_create_summary_report(self):\n\n sys.path.append('config')\n self.newpath = proj_root+'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n\n result = redi.create_summary_report(\\\n self.test_report_params, \\\n self.test_report_data, \\\n self.test_alert_summary, \\\n self.specimen_taken_time_summary)\n result_string = etree.tostring(result)\n #print result_string\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n # validate the xml against the xsd schema\n self.assertEqual(xml_schema.validate(result), True)\n # validate the actual data in xml but strip the white space first\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n # delete the created xml file\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class decl_cmd1(Command):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class decl_cmd2(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class decl_cmd1(Command):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def finalize_options(self):
pass
def run(self):
pass
class decl_cmd2(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class decl_cmd1(Command):
<|reserved_special_token_0|>
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
class decl_cmd2(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class decl_cmd1(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
class decl_cmd2(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
<|reserved_special_token_1|>
from setuptools import Command
class decl_cmd1(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
class decl_cmd2(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
|
flexible
|
{
"blob_id": "70b8efa844395592131382d1d1e2c39150804f99",
"index": 4111,
"step-1": "<mask token>\n\n\nclass decl_cmd1(Command):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass decl_cmd2(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n",
"step-2": "<mask token>\n\n\nclass decl_cmd1(Command):\n <mask token>\n <mask token>\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n\n\nclass decl_cmd2(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n",
"step-3": "<mask token>\n\n\nclass decl_cmd1(Command):\n <mask token>\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n\n\nclass decl_cmd2(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n",
"step-4": "<mask token>\n\n\nclass decl_cmd1(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n\n\nclass decl_cmd2(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n",
"step-5": "from setuptools import Command\n\n\nclass decl_cmd1(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n\n\nclass decl_cmd2(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Hi buddy! Today we will play a game ' + name + '!')
print('Are you ready?')
<|reserved_special_token_0|>
print(name + ' we are starting!')
<|reserved_special_token_0|>
print(liste1 + liste2 + liste3 + liste4)
<|reserved_special_token_1|>
name = input('Enter your name: ')
print('Hi buddy! Today we will play a game ' + name + '!')
print('Are you ready?')
question = input('Are you ready ? Yes or no: ')
print(name + ' we are starting!')
liste1 = ['My neighbor ', 'My girlfriend ', 'My boyfriend ', 'My dog ']
num = input('Enter a number: ')
liste1 = liste1[int(num)]
liste2 = ['hates ', 'loves ', 'enjoys ', 'ridicules ']
num = input('Enter a number: ')
liste2 = liste2[int(num)]
liste3 = ['with me ', 'with my grandma ', 'with our home staff ',
'with our money ']
num = input('Enter a number: ')
liste3 = liste3[int(num)]
liste4 = ['in every situation ! ', 'until end of the world ! ']
num = input('Enter a number: ')
liste4 = liste4[int(num)]
print(liste1 + liste2 + liste3 + liste4)
<|reserved_special_token_1|>
name = input("Enter your name: ")
print("Hi buddy! Today we will play a game " + name + "!")
print("Are you ready?")
question = input("Are you ready ? Yes or no: ")
print(name + " we are starting!")
liste1 = ['My neighbor ', 'My girlfriend ', 'My boyfriend ', 'My dog ']
num = input("Enter a number: ")
liste1 = liste1[int(num)]
liste2 = ['hates ', 'loves ', 'enjoys ', 'ridicules ']
num = input("Enter a number: ")
liste2 = liste2[int(num)]
liste3 = ['with me ', 'with my grandma ', 'with our home staff ', 'with our money ']
num = input("Enter a number: ")
liste3 = liste3[int(num)]
liste4 = ['in every situation ! ', 'until end of the world ! ']
num = input("Enter a number: ")
liste4 = liste4[int(num)]
print(liste1 + liste2 + liste3 + liste4)
|
flexible
|
{
"blob_id": "4ef6002480fcaa514f41227978bae76f6e02c22d",
"index": 6401,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Hi buddy! Today we will play a game ' + name + '!')\nprint('Are you ready?')\n<mask token>\nprint(name + ' we are starting!')\n<mask token>\nprint(liste1 + liste2 + liste3 + liste4)\n",
"step-3": "name = input('Enter your name: ')\nprint('Hi buddy! Today we will play a game ' + name + '!')\nprint('Are you ready?')\nquestion = input('Are you ready ? Yes or no: ')\nprint(name + ' we are starting!')\nliste1 = ['My neighbor ', 'My girlfriend ', 'My boyfriend ', 'My dog ']\nnum = input('Enter a number: ')\nliste1 = liste1[int(num)]\nliste2 = ['hates ', 'loves ', 'enjoys ', 'ridicules ']\nnum = input('Enter a number: ')\nliste2 = liste2[int(num)]\nliste3 = ['with me ', 'with my grandma ', 'with our home staff ',\n 'with our money ']\nnum = input('Enter a number: ')\nliste3 = liste3[int(num)]\nliste4 = ['in every situation ! ', 'until end of the world ! ']\nnum = input('Enter a number: ')\nliste4 = liste4[int(num)]\nprint(liste1 + liste2 + liste3 + liste4)\n",
"step-4": "name = input(\"Enter your name: \")\r\nprint(\"Hi buddy! Today we will play a game \" + name + \"!\")\r\n\r\nprint(\"Are you ready?\")\r\n\r\nquestion = input(\"Are you ready ? Yes or no: \")\r\nprint(name + \" we are starting!\")\r\n\r\n\r\nliste1 = ['My neighbor ', 'My girlfriend ', 'My boyfriend ', 'My dog ']\r\nnum = input(\"Enter a number: \")\r\n\r\nliste1 = liste1[int(num)]\r\n\r\nliste2 = ['hates ', 'loves ', 'enjoys ', 'ridicules ']\r\nnum = input(\"Enter a number: \")\r\n\r\nliste2 = liste2[int(num)]\r\n\r\nliste3 = ['with me ', 'with my grandma ', 'with our home staff ', 'with our money ']\r\nnum = input(\"Enter a number: \")\r\n\r\nliste3 = liste3[int(num)]\r\n\r\nliste4 = ['in every situation ! ', 'until end of the world ! ']\r\nnum = input(\"Enter a number: \")\r\n\r\nliste4 = liste4[int(num)]\r\n\r\nprint(liste1 + liste2 + liste3 + liste4)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pandas as pd
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import BayesianRidge, LinearRegression
import os
import sys
import sklearn.metrics as mets
from review import set_metrics as set_metrics
from algo import Regression
import draw
#https://datascience.stackexchange.com/questions/989/svm-using-scikit-learn-runs-endlessly-and-never-completes-execution
#https://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/
#https://datascienceplus.com/keras-regression-based-neural-networks/
#xgboost
#random forest
#lstm
#rnn
#dec tree
#logistic regression
#ann
#naive bayes
#monte carlo
def read_atomic_data(path):
if not path or not os.path.exists(path) or not os.path.isfile(path):
print("To begin with, your path to data should be proper!")
sys.exit(1)
df = pd.read_csv(path)
columns = df.columns.tolist() # get the columns
columns = columns[:-1]
df = pd.read_csv(path, usecols=columns)
return df, columns
def get_dataset(df, columns):
X = df[col[:-1]]
y = df.critical_temp
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
return (X_train, X_test, y_train, y_test)
df, col = read_atomic_data("unique_m.csv")
(X_train, X_test, y_train, y_test) = get_dataset(df, col)
from sklearn import preprocessing
X_train = preprocessing.scale(X_train)
X_test = preprocessing.scale(X_test)
results = {}
R = Regression(X_train, X_test, y_train, y_test)
dict = R.run()
print (dict)
draw.draw(dict, 'r2_score')
draw.draw(dict, 'max_error')
draw.draw(dict, 'explained_variance_score')
draw.draw(dict, 'mean_absolute_error')
draw.draw(dict, 'mean_squared_error')
draw.draw(dict, 'mean_squared_log_error')
draw.draw(dict, 'median_absolute_error')
sys.exit()
|
normal
|
{
"blob_id": "1e34087719f6fd0456d2722edbd0a7af68d37e4c",
"index": 1577,
"step-1": "<mask token>\n\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print('To begin with, your path to data should be proper!')\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist()\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=1)\n return X_train, X_test, y_train, y_test\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print('To begin with, your path to data should be proper!')\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist()\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=1)\n return X_train, X_test, y_train, y_test\n\n\n<mask token>\nprint(dict)\ndraw.draw(dict, 'r2_score')\ndraw.draw(dict, 'max_error')\ndraw.draw(dict, 'explained_variance_score')\ndraw.draw(dict, 'mean_absolute_error')\ndraw.draw(dict, 'mean_squared_error')\ndraw.draw(dict, 'mean_squared_log_error')\ndraw.draw(dict, 'median_absolute_error')\nsys.exit()\n",
"step-3": "<mask token>\n\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print('To begin with, your path to data should be proper!')\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist()\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=1)\n return X_train, X_test, y_train, y_test\n\n\ndf, col = read_atomic_data('unique_m.csv')\nX_train, X_test, y_train, y_test = get_dataset(df, col)\n<mask token>\nX_train = preprocessing.scale(X_train)\nX_test = preprocessing.scale(X_test)\nresults = {}\nR = Regression(X_train, X_test, y_train, y_test)\ndict = R.run()\nprint(dict)\ndraw.draw(dict, 'r2_score')\ndraw.draw(dict, 'max_error')\ndraw.draw(dict, 'explained_variance_score')\ndraw.draw(dict, 'mean_absolute_error')\ndraw.draw(dict, 'mean_squared_error')\ndraw.draw(dict, 'mean_squared_log_error')\ndraw.draw(dict, 'median_absolute_error')\nsys.exit()\n",
"step-4": "import pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.linear_model import BayesianRidge, LinearRegression\nimport os\nimport sys\nimport sklearn.metrics as mets\nfrom review import set_metrics as set_metrics\nfrom algo import Regression\nimport draw\n\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print('To begin with, your path to data should be proper!')\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist()\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=1)\n return X_train, X_test, y_train, y_test\n\n\ndf, col = read_atomic_data('unique_m.csv')\nX_train, X_test, y_train, y_test = get_dataset(df, col)\nfrom sklearn import preprocessing\nX_train = preprocessing.scale(X_train)\nX_test = preprocessing.scale(X_test)\nresults = {}\nR = Regression(X_train, X_test, y_train, y_test)\ndict = R.run()\nprint(dict)\ndraw.draw(dict, 'r2_score')\ndraw.draw(dict, 'max_error')\ndraw.draw(dict, 'explained_variance_score')\ndraw.draw(dict, 'mean_absolute_error')\ndraw.draw(dict, 'mean_squared_error')\ndraw.draw(dict, 'mean_squared_log_error')\ndraw.draw(dict, 'median_absolute_error')\nsys.exit()\n",
"step-5": "import pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier\nfrom sklearn.model_selection import train_test_split # Import train_test_split function\nfrom sklearn import metrics #Import scikit-learn metrics module for accuracy calculation\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.linear_model import BayesianRidge, LinearRegression\nimport os\nimport sys\nimport sklearn.metrics as mets\nfrom review import set_metrics as set_metrics\nfrom algo import Regression\nimport draw\n#https://datascience.stackexchange.com/questions/989/svm-using-scikit-learn-runs-endlessly-and-never-completes-execution\n#https://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/\n#https://datascienceplus.com/keras-regression-based-neural-networks/\n\n#xgboost\n#random forest\n#lstm\n#rnn\n#dec tree\n#logistic regression\n#ann\n#naive bayes\n#monte carlo\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print(\"To begin with, your path to data should be proper!\")\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist() # get the columns\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) \n return (X_train, X_test, y_train, y_test)\n\ndf, col = read_atomic_data(\"unique_m.csv\")\n(X_train, X_test, y_train, y_test) = get_dataset(df, col)\nfrom sklearn import preprocessing\nX_train = preprocessing.scale(X_train)\nX_test = preprocessing.scale(X_test)\nresults = {}\nR = Regression(X_train, X_test, y_train, y_test)\ndict = R.run()\nprint (dict)\ndraw.draw(dict, 'r2_score')\ndraw.draw(dict, 'max_error')\ndraw.draw(dict, 'explained_variance_score')\ndraw.draw(dict, 'mean_absolute_error')\ndraw.draw(dict, 'mean_squared_error')\ndraw.draw(dict, 'mean_squared_log_error')\ndraw.draw(dict, 'median_absolute_error')\n\nsys.exit()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def crear_addr_word(word):
priv = sha256(word)
pub = privtopub(priv)
addr = pubtoaddr(pub)
wif = encode_privkey(priv, 'wif')
return addr, priv, wif
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def crear_addr_word(word):
priv = sha256(word)
pub = privtopub(priv)
addr = pubtoaddr(pub)
wif = encode_privkey(priv, 'wif')
return addr, priv, wif
<|reserved_special_token_0|>
print('####################################################')
print('WORD: ' + word)
print('ADDR: ' + addr)
print('PRIV: ' + priv)
print('WIF: ' + wif)
print('####################################################')
<|reserved_special_token_1|>
__author__ = 'xcbtrader'
<|reserved_special_token_0|>
def crear_addr_word(word):
priv = sha256(word)
pub = privtopub(priv)
addr = pubtoaddr(pub)
wif = encode_privkey(priv, 'wif')
return addr, priv, wif
word = input('Entra la palabra para crear direccion bitcoin:? ')
addr, priv, wif = crear_addr_word(word)
print('####################################################')
print('WORD: ' + word)
print('ADDR: ' + addr)
print('PRIV: ' + priv)
print('WIF: ' + wif)
print('####################################################')
<|reserved_special_token_1|>
__author__ = 'xcbtrader'
from bitcoin import *
def crear_addr_word(word):
priv = sha256(word)
pub = privtopub(priv)
addr = pubtoaddr(pub)
wif = encode_privkey(priv, 'wif')
return addr, priv, wif
word = input('Entra la palabra para crear direccion bitcoin:? ')
addr, priv, wif = crear_addr_word(word)
print('####################################################')
print('WORD: ' + word)
print('ADDR: ' + addr)
print('PRIV: ' + priv)
print('WIF: ' + wif)
print('####################################################')
<|reserved_special_token_1|>
__author__ = 'xcbtrader'
# -*- coding: utf-8 -*-
from bitcoin import *
def crear_addr_word(word):
priv = sha256(word)
pub = privtopub(priv)
addr = pubtoaddr(pub)
wif = encode_privkey(priv, 'wif')
return addr, priv, wif
word = input('Entra la palabra para crear direccion bitcoin:? ')
addr, priv, wif = crear_addr_word(word)
print('####################################################')
print('WORD: ' + word)
print('ADDR: ' + addr)
print('PRIV: ' + priv)
print('WIF: ' + wif)
print('####################################################')
|
flexible
|
{
"blob_id": "cc7a44754dc1371733420fd3a1e51ab6b5e7c4d8",
"index": 6898,
"step-1": "<mask token>\n\n\ndef crear_addr_word(word):\n priv = sha256(word)\n pub = privtopub(priv)\n addr = pubtoaddr(pub)\n wif = encode_privkey(priv, 'wif')\n return addr, priv, wif\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef crear_addr_word(word):\n priv = sha256(word)\n pub = privtopub(priv)\n addr = pubtoaddr(pub)\n wif = encode_privkey(priv, 'wif')\n return addr, priv, wif\n\n\n<mask token>\nprint('####################################################')\nprint('WORD: ' + word)\nprint('ADDR: ' + addr)\nprint('PRIV: ' + priv)\nprint('WIF: ' + wif)\nprint('####################################################')\n",
"step-3": "__author__ = 'xcbtrader'\n<mask token>\n\n\ndef crear_addr_word(word):\n priv = sha256(word)\n pub = privtopub(priv)\n addr = pubtoaddr(pub)\n wif = encode_privkey(priv, 'wif')\n return addr, priv, wif\n\n\nword = input('Entra la palabra para crear direccion bitcoin:? ')\naddr, priv, wif = crear_addr_word(word)\nprint('####################################################')\nprint('WORD: ' + word)\nprint('ADDR: ' + addr)\nprint('PRIV: ' + priv)\nprint('WIF: ' + wif)\nprint('####################################################')\n",
"step-4": "__author__ = 'xcbtrader'\nfrom bitcoin import *\n\n\ndef crear_addr_word(word):\n priv = sha256(word)\n pub = privtopub(priv)\n addr = pubtoaddr(pub)\n wif = encode_privkey(priv, 'wif')\n return addr, priv, wif\n\n\nword = input('Entra la palabra para crear direccion bitcoin:? ')\naddr, priv, wif = crear_addr_word(word)\nprint('####################################################')\nprint('WORD: ' + word)\nprint('ADDR: ' + addr)\nprint('PRIV: ' + priv)\nprint('WIF: ' + wif)\nprint('####################################################')\n",
"step-5": "__author__ = 'xcbtrader'\n# -*- coding: utf-8 -*-\n\nfrom bitcoin import *\n\ndef crear_addr_word(word):\n\tpriv = sha256(word)\n\tpub = privtopub(priv)\n\taddr = pubtoaddr(pub)\n\twif = encode_privkey(priv, 'wif')\n\treturn addr, priv, wif\n\nword = input('Entra la palabra para crear direccion bitcoin:? ')\naddr, priv, wif = crear_addr_word(word)\nprint('####################################################')\nprint('WORD: ' + word)\nprint('ADDR: ' + addr)\nprint('PRIV: ' + priv)\nprint('WIF: ' + wif)\nprint('####################################################')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# _*_ coding: utf-8 _*_
# 按层打印二叉树
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class PrintTree(object):
def printTree(self, root):
if not root:
return
'''
定义next_last为下一层的最后一个,cur_last为当前层最后一个
temp用于存放当前行的值,resutl存放最终的结果
'''
next_last = cur_last = root
_queue = [root]
result, temp = [], []
while _queue:
# 在按层遍历的基础上,不断把下层最右边儿子赋值给next_last
_cur = _queue.pop(0)
temp.append(_cur.val)
if _cur.left:
_queue.append(_cur.left)
next_last = _cur.left
if _cur.right:
_queue.append(_cur.right)
next_last = _cur.right
# 如果当前节点为此层最后的节点时,
# 进行下层最后一个节点的赋值(cur_last=next_last),然后才由_queue.pop(0)进入下层
if _cur == cur_last:
result.append(temp)
temp = []
cur_last = next_last
return result
|
normal
|
{
"blob_id": "4ddff57790ad191fc29fc092bcc714f0b6273100",
"index": 7755,
"step-1": "<mask token>\n\n\nclass PrintTree(object):\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PrintTree(object):\n\n def printTree(self, root):\n if not root:\n return\n \"\"\"\n 定义next_last为下一层的最后一个,cur_last为当前层最后一个\n temp用于存放当前行的值,resutl存放最终的结果\n \"\"\"\n next_last = cur_last = root\n _queue = [root]\n result, temp = [], []\n while _queue:\n _cur = _queue.pop(0)\n temp.append(_cur.val)\n if _cur.left:\n _queue.append(_cur.left)\n next_last = _cur.left\n if _cur.right:\n _queue.append(_cur.right)\n next_last = _cur.right\n if _cur == cur_last:\n result.append(temp)\n temp = []\n cur_last = next_last\n return result\n",
"step-3": "class TreeNode(object):\n <mask token>\n\n\nclass PrintTree(object):\n\n def printTree(self, root):\n if not root:\n return\n \"\"\"\n 定义next_last为下一层的最后一个,cur_last为当前层最后一个\n temp用于存放当前行的值,resutl存放最终的结果\n \"\"\"\n next_last = cur_last = root\n _queue = [root]\n result, temp = [], []\n while _queue:\n _cur = _queue.pop(0)\n temp.append(_cur.val)\n if _cur.left:\n _queue.append(_cur.left)\n next_last = _cur.left\n if _cur.right:\n _queue.append(_cur.right)\n next_last = _cur.right\n if _cur == cur_last:\n result.append(temp)\n temp = []\n cur_last = next_last\n return result\n",
"step-4": "class TreeNode(object):\n\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\nclass PrintTree(object):\n\n def printTree(self, root):\n if not root:\n return\n \"\"\"\n 定义next_last为下一层的最后一个,cur_last为当前层最后一个\n temp用于存放当前行的值,resutl存放最终的结果\n \"\"\"\n next_last = cur_last = root\n _queue = [root]\n result, temp = [], []\n while _queue:\n _cur = _queue.pop(0)\n temp.append(_cur.val)\n if _cur.left:\n _queue.append(_cur.left)\n next_last = _cur.left\n if _cur.right:\n _queue.append(_cur.right)\n next_last = _cur.right\n if _cur == cur_last:\n result.append(temp)\n temp = []\n cur_last = next_last\n return result\n",
"step-5": "# _*_ coding: utf-8 _*_\n\n# 按层打印二叉树\n\n\nclass TreeNode(object):\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\nclass PrintTree(object):\n def printTree(self, root):\n if not root:\n return\n '''\n 定义next_last为下一层的最后一个,cur_last为当前层最后一个\n temp用于存放当前行的值,resutl存放最终的结果\n '''\n next_last = cur_last = root\n _queue = [root]\n result, temp = [], []\n while _queue:\n # 在按层遍历的基础上,不断把下层最右边儿子赋值给next_last\n _cur = _queue.pop(0)\n temp.append(_cur.val)\n if _cur.left:\n _queue.append(_cur.left)\n next_last = _cur.left\n if _cur.right:\n _queue.append(_cur.right)\n next_last = _cur.right\n # 如果当前节点为此层最后的节点时,\n # 进行下层最后一个节点的赋值(cur_last=next_last),然后才由_queue.pop(0)进入下层\n if _cur == cur_last:\n result.append(temp)\n temp = []\n cur_last = next_last\n return result\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def indent_wrap(s, indent=0, wrap=80):
"""
Wraps and indents a string ``s``.
Parameters
----------
s : str
The string to wrap.
indent : int
How far to indent each new line.
wrape : int
Number of character after which to wrap the string.
Returns
-------
s : str
Indented and wrapped string, each line has length ``wrap``, except the
last one, which may have less than ``wrap`` characters.
Example
-------
>>> s = 2 * "abcdefghijklmnopqrstuvwxyz"
>>> indent_wrap(s, indent=0, wrap=26)
'abcdefghijklmnopqrstuvwxyz
abcdefghijklmnopqrstuvwxyz'
>>> indent_wrap(s, indent=2, wrap=26)
' abcdefghijklmnopqrstuvwx
yzabcdefghijklmnopqrstuv
wxyz'
"""
split = wrap - indent
chunks = [(indent * ' ' + s[i:i + split]) for i in range(0, len(s), split)]
return '\n'.join(chunks)
def serialize_ndarrays(d):
"""
Recursively traverse through iterable object ``d`` and convert all occuring
ndarrays to lists to make it JSON serializable.
Note: Works for 1D dicts with ndarrays at first level. Certainly not tested
and meant to work for all use cases.
Made with code from: http://code.activestate.com/recipes/577504/
Parameters
----------
d : iterable
Can be dict, list, set, tuple or frozenset.
Returns
-------
d : iterable
Same as input, but all ndarrays replaced by lists.
"""
def dict_handler(d):
return d.items()
handlers = {list: enumerate, tuple: enumerate, set: enumerate,
frozenset: enumerate, dict: dict_handler}
def serialize(o):
for typ, handler in handlers.items():
if isinstance(o, typ):
for key, val in handler(o):
if isinstance(val, np.ndarray):
o[key] = val.tolist()
else:
o[key] = serialize_ndarrays(o[key])
return o
return serialize(d)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def arr2str(arr, sep=', ', fmt='{}'):
"""
Make a string from a list seperated by ``sep`` and each item formatted
with ``fmt``.
"""
return sep.join([fmt.format(v) for v in arr])
def indent_wrap(s, indent=0, wrap=80):
"""
Wraps and indents a string ``s``.
Parameters
----------
s : str
The string to wrap.
indent : int
How far to indent each new line.
wrape : int
Number of character after which to wrap the string.
Returns
-------
s : str
Indented and wrapped string, each line has length ``wrap``, except the
last one, which may have less than ``wrap`` characters.
Example
-------
>>> s = 2 * "abcdefghijklmnopqrstuvwxyz"
>>> indent_wrap(s, indent=0, wrap=26)
'abcdefghijklmnopqrstuvwxyz
abcdefghijklmnopqrstuvwxyz'
>>> indent_wrap(s, indent=2, wrap=26)
' abcdefghijklmnopqrstuvwx
yzabcdefghijklmnopqrstuv
wxyz'
"""
split = wrap - indent
chunks = [(indent * ' ' + s[i:i + split]) for i in range(0, len(s), split)]
return '\n'.join(chunks)
def serialize_ndarrays(d):
"""
Recursively traverse through iterable object ``d`` and convert all occuring
ndarrays to lists to make it JSON serializable.
Note: Works for 1D dicts with ndarrays at first level. Certainly not tested
and meant to work for all use cases.
Made with code from: http://code.activestate.com/recipes/577504/
Parameters
----------
d : iterable
Can be dict, list, set, tuple or frozenset.
Returns
-------
d : iterable
Same as input, but all ndarrays replaced by lists.
"""
def dict_handler(d):
return d.items()
handlers = {list: enumerate, tuple: enumerate, set: enumerate,
frozenset: enumerate, dict: dict_handler}
def serialize(o):
for typ, handler in handlers.items():
if isinstance(o, typ):
for key, val in handler(o):
if isinstance(val, np.ndarray):
o[key] = val.tolist()
else:
o[key] = serialize_ndarrays(o[key])
return o
return serialize(d)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def arr2str(arr, sep=', ', fmt='{}'):
"""
Make a string from a list seperated by ``sep`` and each item formatted
with ``fmt``.
"""
return sep.join([fmt.format(v) for v in arr])
def indent_wrap(s, indent=0, wrap=80):
"""
Wraps and indents a string ``s``.
Parameters
----------
s : str
The string to wrap.
indent : int
How far to indent each new line.
wrape : int
Number of character after which to wrap the string.
Returns
-------
s : str
Indented and wrapped string, each line has length ``wrap``, except the
last one, which may have less than ``wrap`` characters.
Example
-------
>>> s = 2 * "abcdefghijklmnopqrstuvwxyz"
>>> indent_wrap(s, indent=0, wrap=26)
'abcdefghijklmnopqrstuvwxyz
abcdefghijklmnopqrstuvwxyz'
>>> indent_wrap(s, indent=2, wrap=26)
' abcdefghijklmnopqrstuvwx
yzabcdefghijklmnopqrstuv
wxyz'
"""
split = wrap - indent
chunks = [(indent * ' ' + s[i:i + split]) for i in range(0, len(s), split)]
return '\n'.join(chunks)
def serialize_ndarrays(d):
"""
Recursively traverse through iterable object ``d`` and convert all occuring
ndarrays to lists to make it JSON serializable.
Note: Works for 1D dicts with ndarrays at first level. Certainly not tested
and meant to work for all use cases.
Made with code from: http://code.activestate.com/recipes/577504/
Parameters
----------
d : iterable
Can be dict, list, set, tuple or frozenset.
Returns
-------
d : iterable
Same as input, but all ndarrays replaced by lists.
"""
def dict_handler(d):
return d.items()
handlers = {list: enumerate, tuple: enumerate, set: enumerate,
frozenset: enumerate, dict: dict_handler}
def serialize(o):
for typ, handler in handlers.items():
if isinstance(o, typ):
for key, val in handler(o):
if isinstance(val, np.ndarray):
o[key] = val.tolist()
else:
o[key] = serialize_ndarrays(o[key])
return o
return serialize(d)
def fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):
"""
Populate dictionary with data from a given dict ``d``, and check if ``d``
has required and optional keys. Set optionals with default if not present.
If input ``d`` is None and ``required_keys`` is empty, just return
``opt_keys``.
Parameters
----------
d : dict or None
Input dictionary containing the data to be checked. If is ``None``, then
a copy of ``opt_keys`` is returned. If ``opt_keys`` is ``None``, a
``TypeError`` is raised. If ``d``is ``None`` and ``required_keys`` is
not, then a ``ValueError`` israised.
required_keys : list or None, optional
Keys that must be present and set in ``d``. (default: None)
opt_keys : dict or None, optional
Keys that are optional. ``opt_keys`` provides optional keys and default
values ``d`` is filled with if not present in ``d``. (default: None)
noleft : bool, optional
If True, raises a ``KeyError``, when ``d`` contains etxra keys, other
than those given in ``required_keys`` and ``opt_keys``. (default: True)
Returns
-------
out : dict
Contains all required and optional keys, using default values, where
optional keys were missing. If ``d`` was None, a copy of ``opt_keys`` is
returned, if ``opt_keys`` was not ``None``.
"""
if required_keys is None:
required_keys = []
if opt_keys is None:
opt_keys = {}
if d is None:
if not required_keys:
if opt_keys is None:
raise TypeError('`d` and òpt_keys` are both None.')
return opt_keys.copy()
else:
raise ValueError('`d` is None, but `required_keys` is not empty.')
d = d.copy()
out = {}
for key in required_keys:
if key in d:
out[key] = d.pop(key)
else:
raise KeyError("Dict is missing required key '{}'.".format(key))
for key, val in opt_keys.items():
out[key] = d.pop(key, val)
if d and noleft:
raise KeyError("Leftover keys ['{}'].".format("', '".join(list(d.
keys()))))
return out
<|reserved_special_token_1|>
from __future__ import absolute_import
import numpy as np
def arr2str(arr, sep=', ', fmt='{}'):
"""
Make a string from a list seperated by ``sep`` and each item formatted
with ``fmt``.
"""
return sep.join([fmt.format(v) for v in arr])
def indent_wrap(s, indent=0, wrap=80):
"""
Wraps and indents a string ``s``.
Parameters
----------
s : str
The string to wrap.
indent : int
How far to indent each new line.
wrape : int
Number of character after which to wrap the string.
Returns
-------
s : str
Indented and wrapped string, each line has length ``wrap``, except the
last one, which may have less than ``wrap`` characters.
Example
-------
>>> s = 2 * "abcdefghijklmnopqrstuvwxyz"
>>> indent_wrap(s, indent=0, wrap=26)
'abcdefghijklmnopqrstuvwxyz
abcdefghijklmnopqrstuvwxyz'
>>> indent_wrap(s, indent=2, wrap=26)
' abcdefghijklmnopqrstuvwx
yzabcdefghijklmnopqrstuv
wxyz'
"""
split = wrap - indent
chunks = [(indent * ' ' + s[i:i + split]) for i in range(0, len(s), split)]
return '\n'.join(chunks)
def serialize_ndarrays(d):
"""
Recursively traverse through iterable object ``d`` and convert all occuring
ndarrays to lists to make it JSON serializable.
Note: Works for 1D dicts with ndarrays at first level. Certainly not tested
and meant to work for all use cases.
Made with code from: http://code.activestate.com/recipes/577504/
Parameters
----------
d : iterable
Can be dict, list, set, tuple or frozenset.
Returns
-------
d : iterable
Same as input, but all ndarrays replaced by lists.
"""
def dict_handler(d):
return d.items()
handlers = {list: enumerate, tuple: enumerate, set: enumerate,
frozenset: enumerate, dict: dict_handler}
def serialize(o):
for typ, handler in handlers.items():
if isinstance(o, typ):
for key, val in handler(o):
if isinstance(val, np.ndarray):
o[key] = val.tolist()
else:
o[key] = serialize_ndarrays(o[key])
return o
return serialize(d)
def fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):
"""
Populate dictionary with data from a given dict ``d``, and check if ``d``
has required and optional keys. Set optionals with default if not present.
If input ``d`` is None and ``required_keys`` is empty, just return
``opt_keys``.
Parameters
----------
d : dict or None
Input dictionary containing the data to be checked. If is ``None``, then
a copy of ``opt_keys`` is returned. If ``opt_keys`` is ``None``, a
``TypeError`` is raised. If ``d``is ``None`` and ``required_keys`` is
not, then a ``ValueError`` israised.
required_keys : list or None, optional
Keys that must be present and set in ``d``. (default: None)
opt_keys : dict or None, optional
Keys that are optional. ``opt_keys`` provides optional keys and default
values ``d`` is filled with if not present in ``d``. (default: None)
noleft : bool, optional
If True, raises a ``KeyError``, when ``d`` contains etxra keys, other
than those given in ``required_keys`` and ``opt_keys``. (default: True)
Returns
-------
out : dict
Contains all required and optional keys, using default values, where
optional keys were missing. If ``d`` was None, a copy of ``opt_keys`` is
returned, if ``opt_keys`` was not ``None``.
"""
if required_keys is None:
required_keys = []
if opt_keys is None:
opt_keys = {}
if d is None:
if not required_keys:
if opt_keys is None:
raise TypeError('`d` and òpt_keys` are both None.')
return opt_keys.copy()
else:
raise ValueError('`d` is None, but `required_keys` is not empty.')
d = d.copy()
out = {}
for key in required_keys:
if key in d:
out[key] = d.pop(key)
else:
raise KeyError("Dict is missing required key '{}'.".format(key))
for key, val in opt_keys.items():
out[key] = d.pop(key, val)
if d and noleft:
raise KeyError("Leftover keys ['{}'].".format("', '".join(list(d.
keys()))))
return out
<|reserved_special_token_1|>
# coding: utf8
from __future__ import absolute_import
import numpy as np
def arr2str(arr, sep=", ", fmt="{}"):
"""
Make a string from a list seperated by ``sep`` and each item formatted
with ``fmt``.
"""
return sep.join([fmt.format(v) for v in arr])
def indent_wrap(s, indent=0, wrap=80):
"""
Wraps and indents a string ``s``.
Parameters
----------
s : str
The string to wrap.
indent : int
How far to indent each new line.
wrape : int
Number of character after which to wrap the string.
Returns
-------
s : str
Indented and wrapped string, each line has length ``wrap``, except the
last one, which may have less than ``wrap`` characters.
Example
-------
>>> s = 2 * "abcdefghijklmnopqrstuvwxyz"
>>> indent_wrap(s, indent=0, wrap=26)
'abcdefghijklmnopqrstuvwxyz\nabcdefghijklmnopqrstuvwxyz'
>>> indent_wrap(s, indent=2, wrap=26)
' abcdefghijklmnopqrstuvwx\n yzabcdefghijklmnopqrstuv\n wxyz'
"""
split = wrap - indent
chunks = [indent * " " + s[i:i + split] for i in range(0, len(s), split)]
return "\n".join(chunks)
def serialize_ndarrays(d):
"""
Recursively traverse through iterable object ``d`` and convert all occuring
ndarrays to lists to make it JSON serializable.
Note: Works for 1D dicts with ndarrays at first level. Certainly not tested
and meant to work for all use cases.
Made with code from: http://code.activestate.com/recipes/577504/
Parameters
----------
d : iterable
Can be dict, list, set, tuple or frozenset.
Returns
-------
d : iterable
Same as input, but all ndarrays replaced by lists.
"""
def dict_handler(d):
return d.items()
handlers = {list: enumerate, tuple: enumerate,
set: enumerate, frozenset: enumerate,
dict: dict_handler}
def serialize(o):
for typ, handler in handlers.items():
if isinstance(o, typ):
for key, val in handler(o):
if isinstance(val, np.ndarray):
o[key] = val.tolist()
else:
o[key] = serialize_ndarrays(o[key])
return o
return serialize(d)
def fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):
"""
Populate dictionary with data from a given dict ``d``, and check if ``d``
has required and optional keys. Set optionals with default if not present.
If input ``d`` is None and ``required_keys`` is empty, just return
``opt_keys``.
Parameters
----------
d : dict or None
Input dictionary containing the data to be checked. If is ``None``, then
a copy of ``opt_keys`` is returned. If ``opt_keys`` is ``None``, a
``TypeError`` is raised. If ``d``is ``None`` and ``required_keys`` is
not, then a ``ValueError`` israised.
required_keys : list or None, optional
Keys that must be present and set in ``d``. (default: None)
opt_keys : dict or None, optional
Keys that are optional. ``opt_keys`` provides optional keys and default
values ``d`` is filled with if not present in ``d``. (default: None)
noleft : bool, optional
If True, raises a ``KeyError``, when ``d`` contains etxra keys, other
than those given in ``required_keys`` and ``opt_keys``. (default: True)
Returns
-------
out : dict
Contains all required and optional keys, using default values, where
optional keys were missing. If ``d`` was None, a copy of ``opt_keys`` is
returned, if ``opt_keys`` was not ``None``.
"""
if required_keys is None:
required_keys = []
if opt_keys is None:
opt_keys = {}
if d is None:
if not required_keys:
if opt_keys is None:
raise TypeError("`d` and òpt_keys` are both None.")
return opt_keys.copy()
else:
raise ValueError("`d` is None, but `required_keys` is not empty.")
d = d.copy()
out = {}
# Set required keys
for key in required_keys:
if key in d:
out[key] = d.pop(key)
else:
raise KeyError("Dict is missing required key '{}'.".format(key))
# Set optional values, if key not given
for key, val in opt_keys.items():
out[key] = d.pop(key, val)
# Complain when extra keys are left and noleft is True
if d and noleft:
raise KeyError("Leftover keys ['{}'].".format(
"', '".join(list(d.keys()))))
return out
|
flexible
|
{
"blob_id": "3b4799f43ec497978bea3ac7ecf8c6aaeb2180b4",
"index": 3867,
"step-1": "<mask token>\n\n\ndef indent_wrap(s, indent=0, wrap=80):\n \"\"\"\n Wraps and indents a string ``s``.\n\n Parameters\n ----------\n s : str\n The string to wrap.\n indent : int\n How far to indent each new line.\n wrape : int\n Number of character after which to wrap the string.\n\n Returns\n -------\n s : str\n Indented and wrapped string, each line has length ``wrap``, except the\n last one, which may have less than ``wrap`` characters.\n\n Example\n -------\n >>> s = 2 * \"abcdefghijklmnopqrstuvwxyz\"\n >>> indent_wrap(s, indent=0, wrap=26)\n 'abcdefghijklmnopqrstuvwxyz\nabcdefghijklmnopqrstuvwxyz'\n >>> indent_wrap(s, indent=2, wrap=26)\n ' abcdefghijklmnopqrstuvwx\n yzabcdefghijklmnopqrstuv\n wxyz'\n \"\"\"\n split = wrap - indent\n chunks = [(indent * ' ' + s[i:i + split]) for i in range(0, len(s), split)]\n return '\\n'.join(chunks)\n\n\ndef serialize_ndarrays(d):\n \"\"\"\n Recursively traverse through iterable object ``d`` and convert all occuring\n ndarrays to lists to make it JSON serializable.\n\n Note: Works for 1D dicts with ndarrays at first level. Certainly not tested\n and meant to work for all use cases.\n Made with code from: http://code.activestate.com/recipes/577504/\n\n Parameters\n ----------\n d : iterable\n Can be dict, list, set, tuple or frozenset.\n\n Returns\n -------\n d : iterable\n Same as input, but all ndarrays replaced by lists.\n \"\"\"\n\n def dict_handler(d):\n return d.items()\n handlers = {list: enumerate, tuple: enumerate, set: enumerate,\n frozenset: enumerate, dict: dict_handler}\n\n def serialize(o):\n for typ, handler in handlers.items():\n if isinstance(o, typ):\n for key, val in handler(o):\n if isinstance(val, np.ndarray):\n o[key] = val.tolist()\n else:\n o[key] = serialize_ndarrays(o[key])\n return o\n return serialize(d)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef arr2str(arr, sep=', ', fmt='{}'):\n \"\"\"\n Make a string from a list seperated by ``sep`` and each item formatted\n with ``fmt``.\n \"\"\"\n return sep.join([fmt.format(v) for v in arr])\n\n\ndef indent_wrap(s, indent=0, wrap=80):\n \"\"\"\n Wraps and indents a string ``s``.\n\n Parameters\n ----------\n s : str\n The string to wrap.\n indent : int\n How far to indent each new line.\n wrape : int\n Number of character after which to wrap the string.\n\n Returns\n -------\n s : str\n Indented and wrapped string, each line has length ``wrap``, except the\n last one, which may have less than ``wrap`` characters.\n\n Example\n -------\n >>> s = 2 * \"abcdefghijklmnopqrstuvwxyz\"\n >>> indent_wrap(s, indent=0, wrap=26)\n 'abcdefghijklmnopqrstuvwxyz\nabcdefghijklmnopqrstuvwxyz'\n >>> indent_wrap(s, indent=2, wrap=26)\n ' abcdefghijklmnopqrstuvwx\n yzabcdefghijklmnopqrstuv\n wxyz'\n \"\"\"\n split = wrap - indent\n chunks = [(indent * ' ' + s[i:i + split]) for i in range(0, len(s), split)]\n return '\\n'.join(chunks)\n\n\ndef serialize_ndarrays(d):\n \"\"\"\n Recursively traverse through iterable object ``d`` and convert all occuring\n ndarrays to lists to make it JSON serializable.\n\n Note: Works for 1D dicts with ndarrays at first level. Certainly not tested\n and meant to work for all use cases.\n Made with code from: http://code.activestate.com/recipes/577504/\n\n Parameters\n ----------\n d : iterable\n Can be dict, list, set, tuple or frozenset.\n\n Returns\n -------\n d : iterable\n Same as input, but all ndarrays replaced by lists.\n \"\"\"\n\n def dict_handler(d):\n return d.items()\n handlers = {list: enumerate, tuple: enumerate, set: enumerate,\n frozenset: enumerate, dict: dict_handler}\n\n def serialize(o):\n for typ, handler in handlers.items():\n if isinstance(o, typ):\n for key, val in handler(o):\n if isinstance(val, np.ndarray):\n o[key] = val.tolist()\n else:\n o[key] = serialize_ndarrays(o[key])\n return o\n return serialize(d)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef arr2str(arr, sep=', ', fmt='{}'):\n \"\"\"\n Make a string from a list seperated by ``sep`` and each item formatted\n with ``fmt``.\n \"\"\"\n return sep.join([fmt.format(v) for v in arr])\n\n\ndef indent_wrap(s, indent=0, wrap=80):\n \"\"\"\n Wraps and indents a string ``s``.\n\n Parameters\n ----------\n s : str\n The string to wrap.\n indent : int\n How far to indent each new line.\n wrape : int\n Number of character after which to wrap the string.\n\n Returns\n -------\n s : str\n Indented and wrapped string, each line has length ``wrap``, except the\n last one, which may have less than ``wrap`` characters.\n\n Example\n -------\n >>> s = 2 * \"abcdefghijklmnopqrstuvwxyz\"\n >>> indent_wrap(s, indent=0, wrap=26)\n 'abcdefghijklmnopqrstuvwxyz\nabcdefghijklmnopqrstuvwxyz'\n >>> indent_wrap(s, indent=2, wrap=26)\n ' abcdefghijklmnopqrstuvwx\n yzabcdefghijklmnopqrstuv\n wxyz'\n \"\"\"\n split = wrap - indent\n chunks = [(indent * ' ' + s[i:i + split]) for i in range(0, len(s), split)]\n return '\\n'.join(chunks)\n\n\ndef serialize_ndarrays(d):\n \"\"\"\n Recursively traverse through iterable object ``d`` and convert all occuring\n ndarrays to lists to make it JSON serializable.\n\n Note: Works for 1D dicts with ndarrays at first level. Certainly not tested\n and meant to work for all use cases.\n Made with code from: http://code.activestate.com/recipes/577504/\n\n Parameters\n ----------\n d : iterable\n Can be dict, list, set, tuple or frozenset.\n\n Returns\n -------\n d : iterable\n Same as input, but all ndarrays replaced by lists.\n \"\"\"\n\n def dict_handler(d):\n return d.items()\n handlers = {list: enumerate, tuple: enumerate, set: enumerate,\n frozenset: enumerate, dict: dict_handler}\n\n def serialize(o):\n for typ, handler in handlers.items():\n if isinstance(o, typ):\n for key, val in handler(o):\n if isinstance(val, np.ndarray):\n o[key] = val.tolist()\n else:\n o[key] = serialize_ndarrays(o[key])\n return o\n return serialize(d)\n\n\ndef fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):\n \"\"\"\n Populate dictionary with data from a given dict ``d``, and check if ``d``\n has required and optional keys. Set optionals with default if not present.\n\n If input ``d`` is None and ``required_keys`` is empty, just return\n ``opt_keys``.\n\n Parameters\n ----------\n d : dict or None\n Input dictionary containing the data to be checked. If is ``None``, then\n a copy of ``opt_keys`` is returned. If ``opt_keys`` is ``None``, a\n ``TypeError`` is raised. If ``d``is ``None`` and ``required_keys`` is\n not, then a ``ValueError`` israised.\n required_keys : list or None, optional\n Keys that must be present and set in ``d``. (default: None)\n opt_keys : dict or None, optional\n Keys that are optional. ``opt_keys`` provides optional keys and default\n values ``d`` is filled with if not present in ``d``. (default: None)\n noleft : bool, optional\n If True, raises a ``KeyError``, when ``d`` contains etxra keys, other\n than those given in ``required_keys`` and ``opt_keys``. (default: True)\n\n Returns\n -------\n out : dict\n Contains all required and optional keys, using default values, where\n optional keys were missing. If ``d`` was None, a copy of ``opt_keys`` is\n returned, if ``opt_keys`` was not ``None``.\n \"\"\"\n if required_keys is None:\n required_keys = []\n if opt_keys is None:\n opt_keys = {}\n if d is None:\n if not required_keys:\n if opt_keys is None:\n raise TypeError('`d` and òpt_keys` are both None.')\n return opt_keys.copy()\n else:\n raise ValueError('`d` is None, but `required_keys` is not empty.')\n d = d.copy()\n out = {}\n for key in required_keys:\n if key in d:\n out[key] = d.pop(key)\n else:\n raise KeyError(\"Dict is missing required key '{}'.\".format(key))\n for key, val in opt_keys.items():\n out[key] = d.pop(key, val)\n if d and noleft:\n raise KeyError(\"Leftover keys ['{}'].\".format(\"', '\".join(list(d.\n keys()))))\n return out\n",
"step-4": "from __future__ import absolute_import\nimport numpy as np\n\n\ndef arr2str(arr, sep=', ', fmt='{}'):\n \"\"\"\n Make a string from a list seperated by ``sep`` and each item formatted\n with ``fmt``.\n \"\"\"\n return sep.join([fmt.format(v) for v in arr])\n\n\ndef indent_wrap(s, indent=0, wrap=80):\n \"\"\"\n Wraps and indents a string ``s``.\n\n Parameters\n ----------\n s : str\n The string to wrap.\n indent : int\n How far to indent each new line.\n wrape : int\n Number of character after which to wrap the string.\n\n Returns\n -------\n s : str\n Indented and wrapped string, each line has length ``wrap``, except the\n last one, which may have less than ``wrap`` characters.\n\n Example\n -------\n >>> s = 2 * \"abcdefghijklmnopqrstuvwxyz\"\n >>> indent_wrap(s, indent=0, wrap=26)\n 'abcdefghijklmnopqrstuvwxyz\nabcdefghijklmnopqrstuvwxyz'\n >>> indent_wrap(s, indent=2, wrap=26)\n ' abcdefghijklmnopqrstuvwx\n yzabcdefghijklmnopqrstuv\n wxyz'\n \"\"\"\n split = wrap - indent\n chunks = [(indent * ' ' + s[i:i + split]) for i in range(0, len(s), split)]\n return '\\n'.join(chunks)\n\n\ndef serialize_ndarrays(d):\n \"\"\"\n Recursively traverse through iterable object ``d`` and convert all occuring\n ndarrays to lists to make it JSON serializable.\n\n Note: Works for 1D dicts with ndarrays at first level. Certainly not tested\n and meant to work for all use cases.\n Made with code from: http://code.activestate.com/recipes/577504/\n\n Parameters\n ----------\n d : iterable\n Can be dict, list, set, tuple or frozenset.\n\n Returns\n -------\n d : iterable\n Same as input, but all ndarrays replaced by lists.\n \"\"\"\n\n def dict_handler(d):\n return d.items()\n handlers = {list: enumerate, tuple: enumerate, set: enumerate,\n frozenset: enumerate, dict: dict_handler}\n\n def serialize(o):\n for typ, handler in handlers.items():\n if isinstance(o, typ):\n for key, val in handler(o):\n if isinstance(val, np.ndarray):\n o[key] = val.tolist()\n else:\n o[key] = serialize_ndarrays(o[key])\n return o\n return serialize(d)\n\n\ndef fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):\n \"\"\"\n Populate dictionary with data from a given dict ``d``, and check if ``d``\n has required and optional keys. Set optionals with default if not present.\n\n If input ``d`` is None and ``required_keys`` is empty, just return\n ``opt_keys``.\n\n Parameters\n ----------\n d : dict or None\n Input dictionary containing the data to be checked. If is ``None``, then\n a copy of ``opt_keys`` is returned. If ``opt_keys`` is ``None``, a\n ``TypeError`` is raised. If ``d``is ``None`` and ``required_keys`` is\n not, then a ``ValueError`` israised.\n required_keys : list or None, optional\n Keys that must be present and set in ``d``. (default: None)\n opt_keys : dict or None, optional\n Keys that are optional. ``opt_keys`` provides optional keys and default\n values ``d`` is filled with if not present in ``d``. (default: None)\n noleft : bool, optional\n If True, raises a ``KeyError``, when ``d`` contains etxra keys, other\n than those given in ``required_keys`` and ``opt_keys``. (default: True)\n\n Returns\n -------\n out : dict\n Contains all required and optional keys, using default values, where\n optional keys were missing. If ``d`` was None, a copy of ``opt_keys`` is\n returned, if ``opt_keys`` was not ``None``.\n \"\"\"\n if required_keys is None:\n required_keys = []\n if opt_keys is None:\n opt_keys = {}\n if d is None:\n if not required_keys:\n if opt_keys is None:\n raise TypeError('`d` and òpt_keys` are both None.')\n return opt_keys.copy()\n else:\n raise ValueError('`d` is None, but `required_keys` is not empty.')\n d = d.copy()\n out = {}\n for key in required_keys:\n if key in d:\n out[key] = d.pop(key)\n else:\n raise KeyError(\"Dict is missing required key '{}'.\".format(key))\n for key, val in opt_keys.items():\n out[key] = d.pop(key, val)\n if d and noleft:\n raise KeyError(\"Leftover keys ['{}'].\".format(\"', '\".join(list(d.\n keys()))))\n return out\n",
"step-5": "# coding: utf8\n\nfrom __future__ import absolute_import\n\nimport numpy as np\n\n\ndef arr2str(arr, sep=\", \", fmt=\"{}\"):\n \"\"\"\n Make a string from a list seperated by ``sep`` and each item formatted\n with ``fmt``.\n \"\"\"\n return sep.join([fmt.format(v) for v in arr])\n\n\ndef indent_wrap(s, indent=0, wrap=80):\n \"\"\"\n Wraps and indents a string ``s``.\n\n Parameters\n ----------\n s : str\n The string to wrap.\n indent : int\n How far to indent each new line.\n wrape : int\n Number of character after which to wrap the string.\n\n Returns\n -------\n s : str\n Indented and wrapped string, each line has length ``wrap``, except the\n last one, which may have less than ``wrap`` characters.\n\n Example\n -------\n >>> s = 2 * \"abcdefghijklmnopqrstuvwxyz\"\n >>> indent_wrap(s, indent=0, wrap=26)\n 'abcdefghijklmnopqrstuvwxyz\\nabcdefghijklmnopqrstuvwxyz'\n >>> indent_wrap(s, indent=2, wrap=26)\n ' abcdefghijklmnopqrstuvwx\\n yzabcdefghijklmnopqrstuv\\n wxyz'\n \"\"\"\n split = wrap - indent\n chunks = [indent * \" \" + s[i:i + split] for i in range(0, len(s), split)]\n return \"\\n\".join(chunks)\n\n\ndef serialize_ndarrays(d):\n \"\"\"\n Recursively traverse through iterable object ``d`` and convert all occuring\n ndarrays to lists to make it JSON serializable.\n\n Note: Works for 1D dicts with ndarrays at first level. Certainly not tested\n and meant to work for all use cases.\n Made with code from: http://code.activestate.com/recipes/577504/\n\n Parameters\n ----------\n d : iterable\n Can be dict, list, set, tuple or frozenset.\n\n Returns\n -------\n d : iterable\n Same as input, but all ndarrays replaced by lists.\n \"\"\"\n def dict_handler(d):\n return d.items()\n\n handlers = {list: enumerate, tuple: enumerate,\n set: enumerate, frozenset: enumerate,\n dict: dict_handler}\n\n def serialize(o):\n for typ, handler in handlers.items():\n if isinstance(o, typ):\n for key, val in handler(o):\n if isinstance(val, np.ndarray):\n o[key] = val.tolist()\n else:\n o[key] = serialize_ndarrays(o[key])\n return o\n\n return serialize(d)\n\n\ndef fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):\n \"\"\"\n Populate dictionary with data from a given dict ``d``, and check if ``d``\n has required and optional keys. Set optionals with default if not present.\n\n If input ``d`` is None and ``required_keys`` is empty, just return\n ``opt_keys``.\n\n Parameters\n ----------\n d : dict or None\n Input dictionary containing the data to be checked. If is ``None``, then\n a copy of ``opt_keys`` is returned. If ``opt_keys`` is ``None``, a\n ``TypeError`` is raised. If ``d``is ``None`` and ``required_keys`` is\n not, then a ``ValueError`` israised.\n required_keys : list or None, optional\n Keys that must be present and set in ``d``. (default: None)\n opt_keys : dict or None, optional\n Keys that are optional. ``opt_keys`` provides optional keys and default\n values ``d`` is filled with if not present in ``d``. (default: None)\n noleft : bool, optional\n If True, raises a ``KeyError``, when ``d`` contains etxra keys, other\n than those given in ``required_keys`` and ``opt_keys``. (default: True)\n\n Returns\n -------\n out : dict\n Contains all required and optional keys, using default values, where\n optional keys were missing. If ``d`` was None, a copy of ``opt_keys`` is\n returned, if ``opt_keys`` was not ``None``.\n \"\"\"\n if required_keys is None:\n required_keys = []\n if opt_keys is None:\n opt_keys = {}\n if d is None:\n if not required_keys:\n if opt_keys is None:\n raise TypeError(\"`d` and òpt_keys` are both None.\")\n return opt_keys.copy()\n else:\n raise ValueError(\"`d` is None, but `required_keys` is not empty.\")\n\n d = d.copy()\n out = {}\n # Set required keys\n for key in required_keys:\n if key in d:\n out[key] = d.pop(key)\n else:\n raise KeyError(\"Dict is missing required key '{}'.\".format(key))\n # Set optional values, if key not given\n for key, val in opt_keys.items():\n out[key] = d.pop(key, val)\n # Complain when extra keys are left and noleft is True\n if d and noleft:\n raise KeyError(\"Leftover keys ['{}'].\".format(\n \"', '\".join(list(d.keys()))))\n return out\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv) == 1:
photoscanname = 'C:\\Program Files\\Agisoft\\PhotoScan Pro\\photoscan.exe'
scriptname = (
'C:\\Users\\slocumr\\github\\SimUAS\\batchphotoscan\\agiproc.py')
xmlnames = (
'C:\\Users\\slocumr\\github\\SimUAS\\data\\testagiproc\\06_QUICKPROC\\*.xml'
)
nprocesses = 1
else:
photoscanname = sys.argv[1]
scriptname = sys.argv[2]
xmlnames = sys.argv[3]
nprocesses = 1
<|reserved_special_token_0|>
try:
nexist = 0
for i, fname in enumerate(xmlfiles):
rootdir, f = os.path.split(fname)
rootoutput = ET.parse(fname).getroot().find('export').get('rootname')
logname.append(rootdir + '/' + rootoutput + '/autoproc.log')
procind.append(i)
if os.path.exists(rootdir + '/' + rootoutput + '/autoproc.log'):
nexist = nexist + 1
print('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles))
proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles) + '\n')
for fname, i, logfile in zip(xmlfiles, procind, logname):
i = i + 1
if not os.path.exists(logfile):
currentind.append(i)
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(
))) + ' : START : ' + '{:3d}/{:3d}'.format(i, nfiles) +
' : ' + fname)
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(
time.time())) + ' : START : ' + '{:3d}/{:3d}'.format(i,
nfiles) + ' : ' + fname + '\n')
foldername, foo = os.path.split(logfile)
if not os.path.exists(foldername):
os.makedirs(foldername)
iloghandle = open(logfile, 'wt')
iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime
(time.time())) + '\n')
iloghandle.write(getpass.getuser() + '\n')
iloghandle.flush()
currentloghandles.append(iloghandle)
processes.append(subprocess.Popen([photoscanname, '-r',
scriptname, fname], stdin=iloghandle, stdout=iloghandle,
stderr=iloghandle))
procname.append(fname)
while len(processes) >= nprocesses:
time.sleep(SLEEPTIME)
if DODEBUG:
cpu_percent = psutil.cpu_percent()
ram_percent = psutil.virtual_memory().percent
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(
time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.
format(cpu_percent, ram_percent))
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.
gmtime(time.time())) +
' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,
ram_percent) + '\n')
for p, ind, name, log in zip(processes, currentind,
procname, currentloghandles):
if p.poll() is not None:
print(time.strftime('%b %d %Y %H:%M:%S', time.
gmtime(time.time())) + ' : DONE : ' +
'{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + fname)
proclog.write(time.strftime('%b %d %Y %H:%M:%S',
time.gmtime(time.time())) + ' : DONE : ' +
'{:3d}/{:3d}'.format(ind, nfiles) + ' : ' +
fname + '\n')
iloghandle.write(time.strftime('%b %d %Y %H:%M:%S',
time.gmtime(time.time())) + '\n')
iloghandle.flush()
iloghandle.close()
procname[:] = [n for n, p in zip(procname, processes) if p.
poll() is None]
currentind[:] = [ind for ind, p in zip(currentind,
processes) if p.poll() is None]
currentloghandles[:] = [log for log, p in zip(
currentloghandles, processes) if p.poll() is None]
processes[:] = [p for p in processes if p.poll() is None]
while len(processes) > 0:
time.sleep(SLEEPTIME)
if DODEBUG:
cpu_percent = psutil.cpu_percent()
ram_percent = psutil.virtual_memory().percent
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(
))) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,
ram_percent))
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(
time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(
cpu_percent, ram_percent) + '\n')
for p, ind, name, log in zip(processes, currentind, procname,
currentloghandles):
if p.poll() is not None:
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.
time())) + ' : DONE : ' + '{:3d}/{:3d}'.format(ind,
nfiles) + ' : ' + fname)
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.
gmtime(time.time())) + ' : DONE : ' + '{:3d}/{:3d}'.
format(ind, nfiles) + ' : ' + fname + '\n')
iloghandle = log
iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.
gmtime(time.time())) + '\n')
iloghandle.flush()
iloghandle.close()
procname[:] = [n for n, p in zip(procname, processes) if p.poll() is
None]
currentind[:] = [ind for ind, p in zip(currentind, processes) if p.
poll() is None]
currentloghandles[:] = [log for log, p in zip(currentloghandles,
processes) if p.poll() is None]
processes[:] = [p for p in processes if p.poll() is None]
except KeyboardInterrupt:
for p, ind, name, iloghandle in zip(processes, currentind, procname,
currentloghandles):
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time())) +
' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + name)
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.
time())) + ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) +
' : ' + name + '\n')
p.kill()
iloghandle.flush()
iloghandle.close()
time.sleep(0.1)
os.remove(logname[ind - 1])
proclog.flush()
proclog.close()
print('Done')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv) == 1:
photoscanname = 'C:\\Program Files\\Agisoft\\PhotoScan Pro\\photoscan.exe'
scriptname = (
'C:\\Users\\slocumr\\github\\SimUAS\\batchphotoscan\\agiproc.py')
xmlnames = (
'C:\\Users\\slocumr\\github\\SimUAS\\data\\testagiproc\\06_QUICKPROC\\*.xml'
)
nprocesses = 1
else:
photoscanname = sys.argv[1]
scriptname = sys.argv[2]
xmlnames = sys.argv[3]
nprocesses = 1
SLEEPTIME = 10
DODEBUG = True
xmlfiles = glob.glob(xmlnames)
nfiles = len(xmlfiles)
processes = []
procname = []
procind = []
logname = []
currentloghandles = []
currentind = []
proclog = open('simUASagiproc_log.log', 'at')
try:
nexist = 0
for i, fname in enumerate(xmlfiles):
rootdir, f = os.path.split(fname)
rootoutput = ET.parse(fname).getroot().find('export').get('rootname')
logname.append(rootdir + '/' + rootoutput + '/autoproc.log')
procind.append(i)
if os.path.exists(rootdir + '/' + rootoutput + '/autoproc.log'):
nexist = nexist + 1
print('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles))
proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles) + '\n')
for fname, i, logfile in zip(xmlfiles, procind, logname):
i = i + 1
if not os.path.exists(logfile):
currentind.append(i)
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(
))) + ' : START : ' + '{:3d}/{:3d}'.format(i, nfiles) +
' : ' + fname)
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(
time.time())) + ' : START : ' + '{:3d}/{:3d}'.format(i,
nfiles) + ' : ' + fname + '\n')
foldername, foo = os.path.split(logfile)
if not os.path.exists(foldername):
os.makedirs(foldername)
iloghandle = open(logfile, 'wt')
iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime
(time.time())) + '\n')
iloghandle.write(getpass.getuser() + '\n')
iloghandle.flush()
currentloghandles.append(iloghandle)
processes.append(subprocess.Popen([photoscanname, '-r',
scriptname, fname], stdin=iloghandle, stdout=iloghandle,
stderr=iloghandle))
procname.append(fname)
while len(processes) >= nprocesses:
time.sleep(SLEEPTIME)
if DODEBUG:
cpu_percent = psutil.cpu_percent()
ram_percent = psutil.virtual_memory().percent
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(
time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.
format(cpu_percent, ram_percent))
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.
gmtime(time.time())) +
' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,
ram_percent) + '\n')
for p, ind, name, log in zip(processes, currentind,
procname, currentloghandles):
if p.poll() is not None:
print(time.strftime('%b %d %Y %H:%M:%S', time.
gmtime(time.time())) + ' : DONE : ' +
'{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + fname)
proclog.write(time.strftime('%b %d %Y %H:%M:%S',
time.gmtime(time.time())) + ' : DONE : ' +
'{:3d}/{:3d}'.format(ind, nfiles) + ' : ' +
fname + '\n')
iloghandle.write(time.strftime('%b %d %Y %H:%M:%S',
time.gmtime(time.time())) + '\n')
iloghandle.flush()
iloghandle.close()
procname[:] = [n for n, p in zip(procname, processes) if p.
poll() is None]
currentind[:] = [ind for ind, p in zip(currentind,
processes) if p.poll() is None]
currentloghandles[:] = [log for log, p in zip(
currentloghandles, processes) if p.poll() is None]
processes[:] = [p for p in processes if p.poll() is None]
while len(processes) > 0:
time.sleep(SLEEPTIME)
if DODEBUG:
cpu_percent = psutil.cpu_percent()
ram_percent = psutil.virtual_memory().percent
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(
))) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,
ram_percent))
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(
time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(
cpu_percent, ram_percent) + '\n')
for p, ind, name, log in zip(processes, currentind, procname,
currentloghandles):
if p.poll() is not None:
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.
time())) + ' : DONE : ' + '{:3d}/{:3d}'.format(ind,
nfiles) + ' : ' + fname)
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.
gmtime(time.time())) + ' : DONE : ' + '{:3d}/{:3d}'.
format(ind, nfiles) + ' : ' + fname + '\n')
iloghandle = log
iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.
gmtime(time.time())) + '\n')
iloghandle.flush()
iloghandle.close()
procname[:] = [n for n, p in zip(procname, processes) if p.poll() is
None]
currentind[:] = [ind for ind, p in zip(currentind, processes) if p.
poll() is None]
currentloghandles[:] = [log for log, p in zip(currentloghandles,
processes) if p.poll() is None]
processes[:] = [p for p in processes if p.poll() is None]
except KeyboardInterrupt:
for p, ind, name, iloghandle in zip(processes, currentind, procname,
currentloghandles):
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time())) +
' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + name)
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.
time())) + ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) +
' : ' + name + '\n')
p.kill()
iloghandle.flush()
iloghandle.close()
time.sleep(0.1)
os.remove(logname[ind - 1])
proclog.flush()
proclog.close()
print('Done')
<|reserved_special_token_1|>
import subprocess
import glob
import os
import time
import sys
import xml.etree.ElementTree as ET
import getpass
import psutil
if len(sys.argv) == 1:
photoscanname = 'C:\\Program Files\\Agisoft\\PhotoScan Pro\\photoscan.exe'
scriptname = (
'C:\\Users\\slocumr\\github\\SimUAS\\batchphotoscan\\agiproc.py')
xmlnames = (
'C:\\Users\\slocumr\\github\\SimUAS\\data\\testagiproc\\06_QUICKPROC\\*.xml'
)
nprocesses = 1
else:
photoscanname = sys.argv[1]
scriptname = sys.argv[2]
xmlnames = sys.argv[3]
nprocesses = 1
SLEEPTIME = 10
DODEBUG = True
xmlfiles = glob.glob(xmlnames)
nfiles = len(xmlfiles)
processes = []
procname = []
procind = []
logname = []
currentloghandles = []
currentind = []
proclog = open('simUASagiproc_log.log', 'at')
try:
nexist = 0
for i, fname in enumerate(xmlfiles):
rootdir, f = os.path.split(fname)
rootoutput = ET.parse(fname).getroot().find('export').get('rootname')
logname.append(rootdir + '/' + rootoutput + '/autoproc.log')
procind.append(i)
if os.path.exists(rootdir + '/' + rootoutput + '/autoproc.log'):
nexist = nexist + 1
print('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles))
proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles) + '\n')
for fname, i, logfile in zip(xmlfiles, procind, logname):
i = i + 1
if not os.path.exists(logfile):
currentind.append(i)
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(
))) + ' : START : ' + '{:3d}/{:3d}'.format(i, nfiles) +
' : ' + fname)
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(
time.time())) + ' : START : ' + '{:3d}/{:3d}'.format(i,
nfiles) + ' : ' + fname + '\n')
foldername, foo = os.path.split(logfile)
if not os.path.exists(foldername):
os.makedirs(foldername)
iloghandle = open(logfile, 'wt')
iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime
(time.time())) + '\n')
iloghandle.write(getpass.getuser() + '\n')
iloghandle.flush()
currentloghandles.append(iloghandle)
processes.append(subprocess.Popen([photoscanname, '-r',
scriptname, fname], stdin=iloghandle, stdout=iloghandle,
stderr=iloghandle))
procname.append(fname)
while len(processes) >= nprocesses:
time.sleep(SLEEPTIME)
if DODEBUG:
cpu_percent = psutil.cpu_percent()
ram_percent = psutil.virtual_memory().percent
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(
time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.
format(cpu_percent, ram_percent))
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.
gmtime(time.time())) +
' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,
ram_percent) + '\n')
for p, ind, name, log in zip(processes, currentind,
procname, currentloghandles):
if p.poll() is not None:
print(time.strftime('%b %d %Y %H:%M:%S', time.
gmtime(time.time())) + ' : DONE : ' +
'{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + fname)
proclog.write(time.strftime('%b %d %Y %H:%M:%S',
time.gmtime(time.time())) + ' : DONE : ' +
'{:3d}/{:3d}'.format(ind, nfiles) + ' : ' +
fname + '\n')
iloghandle.write(time.strftime('%b %d %Y %H:%M:%S',
time.gmtime(time.time())) + '\n')
iloghandle.flush()
iloghandle.close()
procname[:] = [n for n, p in zip(procname, processes) if p.
poll() is None]
currentind[:] = [ind for ind, p in zip(currentind,
processes) if p.poll() is None]
currentloghandles[:] = [log for log, p in zip(
currentloghandles, processes) if p.poll() is None]
processes[:] = [p for p in processes if p.poll() is None]
while len(processes) > 0:
time.sleep(SLEEPTIME)
if DODEBUG:
cpu_percent = psutil.cpu_percent()
ram_percent = psutil.virtual_memory().percent
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(
))) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,
ram_percent))
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(
time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(
cpu_percent, ram_percent) + '\n')
for p, ind, name, log in zip(processes, currentind, procname,
currentloghandles):
if p.poll() is not None:
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.
time())) + ' : DONE : ' + '{:3d}/{:3d}'.format(ind,
nfiles) + ' : ' + fname)
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.
gmtime(time.time())) + ' : DONE : ' + '{:3d}/{:3d}'.
format(ind, nfiles) + ' : ' + fname + '\n')
iloghandle = log
iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.
gmtime(time.time())) + '\n')
iloghandle.flush()
iloghandle.close()
procname[:] = [n for n, p in zip(procname, processes) if p.poll() is
None]
currentind[:] = [ind for ind, p in zip(currentind, processes) if p.
poll() is None]
currentloghandles[:] = [log for log, p in zip(currentloghandles,
processes) if p.poll() is None]
processes[:] = [p for p in processes if p.poll() is None]
except KeyboardInterrupt:
for p, ind, name, iloghandle in zip(processes, currentind, procname,
currentloghandles):
print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time())) +
' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + name)
proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.
time())) + ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) +
' : ' + name + '\n')
p.kill()
iloghandle.flush()
iloghandle.close()
time.sleep(0.1)
os.remove(logname[ind - 1])
proclog.flush()
proclog.close()
print('Done')
<|reserved_special_token_1|>
import subprocess
import glob
import os
import time
import sys
import xml.etree.ElementTree as ET
import getpass
import psutil
if len(sys.argv)==1:
photoscanname = r"C:\Program Files\Agisoft\PhotoScan Pro\photoscan.exe"
scriptname = r"C:\Users\slocumr\github\SimUAS\batchphotoscan\agiproc.py"
#xmlnames = r"P:\Slocum\USVI_project\01_DATA\20180319_USVI_UAS_BATHY\02_PROCDATA\06_PROCIMAGES\*\06_QUICKPROC\*2.xml"
xmlnames = r"C:\Users\slocumr\github\SimUAS\data\testagiproc\06_QUICKPROC\*.xml"
nprocesses = 1
else:
photoscanname = sys.argv[1]
scriptname = sys.argv[2]
xmlnames = sys.argv[3]
nprocesses = 1
SLEEPTIME = 10
DODEBUG = True
# get xmlfiles
xmlfiles = glob.glob(xmlnames)
nfiles = len(xmlfiles)
# empty lists
processes = []
procname = []
procind = []
logname = []
currentloghandles = []
currentind = []
proclog = open("simUASagiproc_log.log",'at')
try:
# detect already processed or processing folders
nexist = 0
for i,fname in enumerate(xmlfiles):
rootdir,f = os.path.split(fname)
rootoutput = ET.parse(fname).getroot().find('export').get('rootname')
logname.append( rootdir + "/" + rootoutput + "/autoproc.log" )
procind.append(i)
if os.path.exists(rootdir + "/" + rootoutput + "/autoproc.log"):
nexist = nexist+1
print('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles))
proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles) + '\n')
for fname,i,logfile in zip(xmlfiles,procind,logname):
i = i+1
if not os.path.exists(logfile):
currentind.append(i)
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : START : " + '{:3d}/{:3d}'.format(i,nfiles) + " : " + fname)
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : START : " + '{:3d}/{:3d}'.format(i,nfiles) + " : " + fname + '\n')
foldername,foo = os.path.split(logfile)
if not os.path.exists(foldername):
os.makedirs(foldername)
iloghandle = open(logfile,'wt')
iloghandle.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + "\n")
iloghandle.write(getpass.getuser() + "\n")
iloghandle.flush()
currentloghandles.append(iloghandle)
processes.append(subprocess.Popen([photoscanname,"-r",scriptname,fname],stdin=iloghandle, stdout=iloghandle, stderr=iloghandle))
procname.append(fname)
while len(processes)>=nprocesses:
time.sleep(SLEEPTIME)
if DODEBUG:
cpu_percent = psutil.cpu_percent()
ram_percent = psutil.virtual_memory().percent
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent))
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\n')
for p, ind, name, log in zip(processes, currentind, procname, currentloghandles):
if p.poll() is not None:
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname)
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname + '\n')
iloghandle.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + "\n")
iloghandle.flush()
iloghandle.close()
procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None]
currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None]
currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None]
processes[:] = [p for p in processes if p.poll() is None]
# Wait for everything to finish
while len(processes)>0:
time.sleep(SLEEPTIME)
if DODEBUG:
cpu_percent = psutil.cpu_percent()
ram_percent = psutil.virtual_memory().percent
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent))
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\n')
for p, ind, name, log in zip(processes, currentind, procname, currentloghandles):
if p.poll() is not None:
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname)
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname + '\n')
iloghandle= log
iloghandle.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + "\n")
iloghandle.flush()
iloghandle.close()
procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None]
currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None]
currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None]
processes[:] = [p for p in processes if p.poll() is None]
except KeyboardInterrupt:
for p, ind, name, iloghandle in zip(processes, currentind, procname, currentloghandles):
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : KILL : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + name)
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : KILL : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + name + '\n')
p.kill()
iloghandle.flush()
iloghandle.close()
time.sleep(0.1)
os.remove(logname[ind-1])
proclog.flush()
proclog.close()
print("Done")
|
flexible
|
{
"blob_id": "00f95733505b3e853a76bbdd65439bcb230fa262",
"index": 3345,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) == 1:\n photoscanname = 'C:\\\\Program Files\\\\Agisoft\\\\PhotoScan Pro\\\\photoscan.exe'\n scriptname = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\batchphotoscan\\\\agiproc.py')\n xmlnames = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\data\\\\testagiproc\\\\06_QUICKPROC\\\\*.xml'\n )\n nprocesses = 1\nelse:\n photoscanname = sys.argv[1]\n scriptname = sys.argv[2]\n xmlnames = sys.argv[3]\n nprocesses = 1\n<mask token>\ntry:\n nexist = 0\n for i, fname in enumerate(xmlfiles):\n rootdir, f = os.path.split(fname)\n rootoutput = ET.parse(fname).getroot().find('export').get('rootname')\n logname.append(rootdir + '/' + rootoutput + '/autoproc.log')\n procind.append(i)\n if os.path.exists(rootdir + '/' + rootoutput + '/autoproc.log'):\n nexist = nexist + 1\n print('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles))\n proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles) + '\\n')\n for fname, i, logfile in zip(xmlfiles, procind, logname):\n i = i + 1\n if not os.path.exists(logfile):\n currentind.append(i)\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' : START : ' + '{:3d}/{:3d}'.format(i, nfiles) +\n ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' : START : ' + '{:3d}/{:3d}'.format(i,\n nfiles) + ' : ' + fname + '\\n')\n foldername, foo = os.path.split(logfile)\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n iloghandle = open(logfile, 'wt')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime\n (time.time())) + '\\n')\n iloghandle.write(getpass.getuser() + '\\n')\n iloghandle.flush()\n currentloghandles.append(iloghandle)\n processes.append(subprocess.Popen([photoscanname, '-r',\n scriptname, fname], stdin=iloghandle, stdout=iloghandle,\n stderr=iloghandle))\n procname.append(fname)\n while len(processes) >= nprocesses:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.\n format(cpu_percent, ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) +\n ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind,\n procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' +\n fname + '\\n')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.\n poll() is None]\n currentind[:] = [ind for ind, p in zip(currentind,\n processes) if p.poll() is None]\n currentloghandles[:] = [log for log, p in zip(\n currentloghandles, processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\n while len(processes) > 0:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(\n cpu_percent, ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname,\n currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : DONE : ' + '{:3d}/{:3d}'.format(ind,\n nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' + '{:3d}/{:3d}'.\n format(ind, nfiles) + ' : ' + fname + '\\n')\n iloghandle = log\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.poll() is\n None]\n currentind[:] = [ind for ind, p in zip(currentind, processes) if p.\n poll() is None]\n currentloghandles[:] = [log for log, p in zip(currentloghandles,\n processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\nexcept KeyboardInterrupt:\n for p, ind, name, iloghandle in zip(processes, currentind, procname,\n currentloghandles):\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time())) +\n ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + name)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) +\n ' : ' + name + '\\n')\n p.kill()\n iloghandle.flush()\n iloghandle.close()\n time.sleep(0.1)\n os.remove(logname[ind - 1])\nproclog.flush()\nproclog.close()\nprint('Done')\n",
"step-3": "<mask token>\nif len(sys.argv) == 1:\n photoscanname = 'C:\\\\Program Files\\\\Agisoft\\\\PhotoScan Pro\\\\photoscan.exe'\n scriptname = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\batchphotoscan\\\\agiproc.py')\n xmlnames = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\data\\\\testagiproc\\\\06_QUICKPROC\\\\*.xml'\n )\n nprocesses = 1\nelse:\n photoscanname = sys.argv[1]\n scriptname = sys.argv[2]\n xmlnames = sys.argv[3]\n nprocesses = 1\nSLEEPTIME = 10\nDODEBUG = True\nxmlfiles = glob.glob(xmlnames)\nnfiles = len(xmlfiles)\nprocesses = []\nprocname = []\nprocind = []\nlogname = []\ncurrentloghandles = []\ncurrentind = []\nproclog = open('simUASagiproc_log.log', 'at')\ntry:\n nexist = 0\n for i, fname in enumerate(xmlfiles):\n rootdir, f = os.path.split(fname)\n rootoutput = ET.parse(fname).getroot().find('export').get('rootname')\n logname.append(rootdir + '/' + rootoutput + '/autoproc.log')\n procind.append(i)\n if os.path.exists(rootdir + '/' + rootoutput + '/autoproc.log'):\n nexist = nexist + 1\n print('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles))\n proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles) + '\\n')\n for fname, i, logfile in zip(xmlfiles, procind, logname):\n i = i + 1\n if not os.path.exists(logfile):\n currentind.append(i)\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' : START : ' + '{:3d}/{:3d}'.format(i, nfiles) +\n ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' : START : ' + '{:3d}/{:3d}'.format(i,\n nfiles) + ' : ' + fname + '\\n')\n foldername, foo = os.path.split(logfile)\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n iloghandle = open(logfile, 'wt')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime\n (time.time())) + '\\n')\n iloghandle.write(getpass.getuser() + '\\n')\n iloghandle.flush()\n currentloghandles.append(iloghandle)\n processes.append(subprocess.Popen([photoscanname, '-r',\n scriptname, fname], stdin=iloghandle, stdout=iloghandle,\n stderr=iloghandle))\n procname.append(fname)\n while len(processes) >= nprocesses:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.\n format(cpu_percent, ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) +\n ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind,\n procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' +\n fname + '\\n')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.\n poll() is None]\n currentind[:] = [ind for ind, p in zip(currentind,\n processes) if p.poll() is None]\n currentloghandles[:] = [log for log, p in zip(\n currentloghandles, processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\n while len(processes) > 0:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(\n cpu_percent, ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname,\n currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : DONE : ' + '{:3d}/{:3d}'.format(ind,\n nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' + '{:3d}/{:3d}'.\n format(ind, nfiles) + ' : ' + fname + '\\n')\n iloghandle = log\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.poll() is\n None]\n currentind[:] = [ind for ind, p in zip(currentind, processes) if p.\n poll() is None]\n currentloghandles[:] = [log for log, p in zip(currentloghandles,\n processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\nexcept KeyboardInterrupt:\n for p, ind, name, iloghandle in zip(processes, currentind, procname,\n currentloghandles):\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time())) +\n ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + name)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) +\n ' : ' + name + '\\n')\n p.kill()\n iloghandle.flush()\n iloghandle.close()\n time.sleep(0.1)\n os.remove(logname[ind - 1])\nproclog.flush()\nproclog.close()\nprint('Done')\n",
"step-4": "import subprocess\nimport glob\nimport os\nimport time\nimport sys\nimport xml.etree.ElementTree as ET\nimport getpass\nimport psutil\nif len(sys.argv) == 1:\n photoscanname = 'C:\\\\Program Files\\\\Agisoft\\\\PhotoScan Pro\\\\photoscan.exe'\n scriptname = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\batchphotoscan\\\\agiproc.py')\n xmlnames = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\data\\\\testagiproc\\\\06_QUICKPROC\\\\*.xml'\n )\n nprocesses = 1\nelse:\n photoscanname = sys.argv[1]\n scriptname = sys.argv[2]\n xmlnames = sys.argv[3]\n nprocesses = 1\nSLEEPTIME = 10\nDODEBUG = True\nxmlfiles = glob.glob(xmlnames)\nnfiles = len(xmlfiles)\nprocesses = []\nprocname = []\nprocind = []\nlogname = []\ncurrentloghandles = []\ncurrentind = []\nproclog = open('simUASagiproc_log.log', 'at')\ntry:\n nexist = 0\n for i, fname in enumerate(xmlfiles):\n rootdir, f = os.path.split(fname)\n rootoutput = ET.parse(fname).getroot().find('export').get('rootname')\n logname.append(rootdir + '/' + rootoutput + '/autoproc.log')\n procind.append(i)\n if os.path.exists(rootdir + '/' + rootoutput + '/autoproc.log'):\n nexist = nexist + 1\n print('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles))\n proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles) + '\\n')\n for fname, i, logfile in zip(xmlfiles, procind, logname):\n i = i + 1\n if not os.path.exists(logfile):\n currentind.append(i)\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' : START : ' + '{:3d}/{:3d}'.format(i, nfiles) +\n ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' : START : ' + '{:3d}/{:3d}'.format(i,\n nfiles) + ' : ' + fname + '\\n')\n foldername, foo = os.path.split(logfile)\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n iloghandle = open(logfile, 'wt')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime\n (time.time())) + '\\n')\n iloghandle.write(getpass.getuser() + '\\n')\n iloghandle.flush()\n currentloghandles.append(iloghandle)\n processes.append(subprocess.Popen([photoscanname, '-r',\n scriptname, fname], stdin=iloghandle, stdout=iloghandle,\n stderr=iloghandle))\n procname.append(fname)\n while len(processes) >= nprocesses:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.\n format(cpu_percent, ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) +\n ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind,\n procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' +\n fname + '\\n')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.\n poll() is None]\n currentind[:] = [ind for ind, p in zip(currentind,\n processes) if p.poll() is None]\n currentloghandles[:] = [log for log, p in zip(\n currentloghandles, processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\n while len(processes) > 0:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(\n cpu_percent, ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname,\n currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : DONE : ' + '{:3d}/{:3d}'.format(ind,\n nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' + '{:3d}/{:3d}'.\n format(ind, nfiles) + ' : ' + fname + '\\n')\n iloghandle = log\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.poll() is\n None]\n currentind[:] = [ind for ind, p in zip(currentind, processes) if p.\n poll() is None]\n currentloghandles[:] = [log for log, p in zip(currentloghandles,\n processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\nexcept KeyboardInterrupt:\n for p, ind, name, iloghandle in zip(processes, currentind, procname,\n currentloghandles):\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time())) +\n ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + name)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) +\n ' : ' + name + '\\n')\n p.kill()\n iloghandle.flush()\n iloghandle.close()\n time.sleep(0.1)\n os.remove(logname[ind - 1])\nproclog.flush()\nproclog.close()\nprint('Done')\n",
"step-5": "import subprocess\nimport glob\nimport os\nimport time\nimport sys\nimport xml.etree.ElementTree as ET\nimport getpass\nimport psutil\n\nif len(sys.argv)==1:\n photoscanname = r\"C:\\Program Files\\Agisoft\\PhotoScan Pro\\photoscan.exe\"\n scriptname = r\"C:\\Users\\slocumr\\github\\SimUAS\\batchphotoscan\\agiproc.py\"\n #xmlnames = r\"P:\\Slocum\\USVI_project\\01_DATA\\20180319_USVI_UAS_BATHY\\02_PROCDATA\\06_PROCIMAGES\\*\\06_QUICKPROC\\*2.xml\"\n xmlnames = r\"C:\\Users\\slocumr\\github\\SimUAS\\data\\testagiproc\\06_QUICKPROC\\*.xml\"\n nprocesses = 1\nelse:\n photoscanname = sys.argv[1]\n scriptname = sys.argv[2]\n xmlnames = sys.argv[3]\n nprocesses = 1\n\nSLEEPTIME = 10\nDODEBUG = True\n\n# get xmlfiles\nxmlfiles = glob.glob(xmlnames)\nnfiles = len(xmlfiles)\n\n# empty lists\nprocesses = []\nprocname = []\nprocind = []\nlogname = []\ncurrentloghandles = []\ncurrentind = []\n\nproclog = open(\"simUASagiproc_log.log\",'at')\ntry:\n # detect already processed or processing folders\n nexist = 0\n for i,fname in enumerate(xmlfiles):\n rootdir,f = os.path.split(fname)\n rootoutput = ET.parse(fname).getroot().find('export').get('rootname')\n logname.append( rootdir + \"/\" + rootoutput + \"/autoproc.log\" )\n procind.append(i)\n if os.path.exists(rootdir + \"/\" + rootoutput + \"/autoproc.log\"):\n nexist = nexist+1\n print('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles))\n proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles) + '\\n')\n for fname,i,logfile in zip(xmlfiles,procind,logname):\n i = i+1\n if not os.path.exists(logfile):\n\n currentind.append(i)\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : START : \" + '{:3d}/{:3d}'.format(i,nfiles) + \" : \" + fname)\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : START : \" + '{:3d}/{:3d}'.format(i,nfiles) + \" : \" + fname + '\\n')\n foldername,foo = os.path.split(logfile)\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n iloghandle = open(logfile,'wt')\n iloghandle.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \"\\n\")\n iloghandle.write(getpass.getuser() + \"\\n\")\n iloghandle.flush()\n currentloghandles.append(iloghandle)\n processes.append(subprocess.Popen([photoscanname,\"-r\",scriptname,fname],stdin=iloghandle, stdout=iloghandle, stderr=iloghandle))\n procname.append(fname)\n while len(processes)>=nprocesses:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent))\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : DONE : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + fname)\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : DONE : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + fname + '\\n')\n iloghandle.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \"\\n\")\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None]\n currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None]\n currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\n \n # Wait for everything to finish\n while len(processes)>0:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent))\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : DONE : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + fname)\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : DONE : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + fname + '\\n')\n iloghandle= log\n iloghandle.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \"\\n\")\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None]\n currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None]\n currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\nexcept KeyboardInterrupt:\n for p, ind, name, iloghandle in zip(processes, currentind, procname, currentloghandles):\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : KILL : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + name)\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : KILL : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + name + '\\n')\n p.kill()\n iloghandle.flush()\n iloghandle.close()\n time.sleep(0.1)\n os.remove(logname[ind-1])\nproclog.flush()\nproclog.close()\nprint(\"Done\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(romaO_map)
except ImportError:
print('viscm not found, falling back on simple display')
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=
romaO_map)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cm_data = [[0.45137, 0.22346, 0.34187], [0.45418, 0.22244, 0.3361], [
0.45696, 0.22158, 0.33043], [0.45975, 0.2209, 0.32483], [0.46251,
0.22035, 0.31935], [0.46527, 0.21994, 0.31394], [0.46803, 0.21968,
0.30862], [0.47078, 0.21958, 0.30337], [0.47352, 0.21962, 0.29822], [
0.47628, 0.21982, 0.29316], [0.47902, 0.22017, 0.28818], [0.48178,
0.22067, 0.2833], [0.48453, 0.2213, 0.2785], [0.48731, 0.22208, 0.27379
], [0.49008, 0.22304, 0.26917], [0.49286, 0.22411, 0.26461], [0.49567,
0.22536, 0.26016], [0.4985, 0.22677, 0.25579], [0.50134, 0.22833,
0.25153], [0.50419, 0.22999, 0.24733], [0.50707, 0.23188, 0.24322], [
0.50997, 0.23387, 0.23923], [0.5129, 0.23605, 0.23533], [0.51584,
0.23835, 0.23151], [0.51884, 0.24082, 0.22779], [0.52184, 0.24345,
0.22414], [0.52489, 0.24625, 0.22065], [0.52797, 0.2492, 0.2172], [
0.53108, 0.25231, 0.21387], [0.53423, 0.25556, 0.21064], [0.53742,
0.25899, 0.20753], [0.54063, 0.26255, 0.20452], [0.54389, 0.26628,
0.20158], [0.54718, 0.27017, 0.19879], [0.55051, 0.27419, 0.19613], [
0.55389, 0.27839, 0.19356], [0.55731, 0.28273, 0.19109], [0.56075,
0.2872, 0.18877], [0.56424, 0.29186, 0.18655], [0.56777, 0.29665,
0.18446], [0.57134, 0.30157, 0.18248], [0.57495, 0.30666, 0.18065], [
0.5786, 0.31186, 0.17898], [0.58228, 0.31724, 0.17743], [0.58602,
0.32275, 0.17597], [0.58977, 0.32838, 0.17473], [0.59358, 0.33415,
0.17358], [0.59742, 0.34005, 0.17261], [0.60129, 0.34606, 0.17179], [
0.60519, 0.35223, 0.17114], [0.60915, 0.35851, 0.17065], [0.61311,
0.36491, 0.17034], [0.61713, 0.37143, 0.1702], [0.62118, 0.37808,
0.17023], [0.62526, 0.38483, 0.17046], [0.62937, 0.39171, 0.17087], [
0.63352, 0.39869, 0.17148], [0.63769, 0.40579, 0.17229], [0.6419,
0.41299, 0.17332], [0.64613, 0.42029, 0.17458], [0.65041, 0.42771,
0.176], [0.6547, 0.43522, 0.17774], [0.65904, 0.44283, 0.17962], [
0.66341, 0.45054, 0.18175], [0.6678, 0.45834, 0.18416], [0.67222,
0.46625, 0.1868], [0.67667, 0.47425, 0.18968], [0.68114, 0.48233,
0.19283], [0.68566, 0.49051, 0.19624], [0.69019, 0.49878, 0.19987], [
0.69474, 0.50712, 0.20384], [0.69933, 0.51554, 0.20803], [0.70394,
0.52406, 0.21251], [0.70858, 0.53265, 0.21726], [0.71322, 0.5413,
0.22229], [0.7179, 0.55003, 0.22761], [0.72257, 0.55881, 0.23318], [
0.72727, 0.56767, 0.23907], [0.73197, 0.57658, 0.24521], [0.73666,
0.58553, 0.25168], [0.74136, 0.59451, 0.25837], [0.74605, 0.60354,
0.26537], [0.75073, 0.61259, 0.27263], [0.75538, 0.62166, 0.28017], [
0.76001, 0.63075, 0.28796], [0.7646, 0.63982, 0.29602], [0.76914,
0.64889, 0.30433], [0.77363, 0.65793, 0.31287], [0.77806, 0.66694,
0.32165], [0.78242, 0.6759, 0.33066], [0.78669, 0.68481, 0.33988], [
0.79087, 0.69365, 0.34929], [0.79494, 0.7024, 0.35888], [0.7989,
0.71106, 0.36867], [0.80273, 0.71961, 0.37859], [0.80642, 0.72803,
0.38866], [0.80996, 0.73631, 0.39885], [0.81334, 0.74446, 0.40916], [
0.81655, 0.75244, 0.41957], [0.81956, 0.76025, 0.43004], [0.82239,
0.76787, 0.44057], [0.82501, 0.7753, 0.45115], [0.82742, 0.78252,
0.46174], [0.8296, 0.78953, 0.47235], [0.83155, 0.79631, 0.48293], [
0.83326, 0.80287, 0.49349], [0.83472, 0.80919, 0.50402], [0.83592,
0.81526, 0.51449], [0.83686, 0.82109, 0.52487], [0.83753, 0.82666,
0.53517], [0.83793, 0.83198, 0.54537], [0.83805, 0.83703, 0.55546], [
0.83788, 0.84182, 0.56542], [0.83744, 0.84635, 0.57525], [0.8367,
0.85061, 0.58493], [0.83567, 0.85462, 0.59446], [0.83435, 0.85835,
0.60382], [0.83274, 0.86183, 0.61301], [0.83084, 0.86504, 0.62202], [
0.82864, 0.868, 0.63085], [0.82615, 0.87068, 0.63949], [0.82337,
0.87312, 0.64792], [0.8203, 0.87531, 0.65617], [0.81695, 0.87724,
0.6642], [0.81331, 0.87892, 0.67203], [0.80939, 0.88036, 0.67964], [
0.80518, 0.88156, 0.68705], [0.80071, 0.8825, 0.69424], [0.79595,
0.88322, 0.70121], [0.79094, 0.8837, 0.70797], [0.78566, 0.88395,
0.7145], [0.78012, 0.88396, 0.72082], [0.77433, 0.88375, 0.72692], [
0.7683, 0.88331, 0.73279], [0.76203, 0.88264, 0.73844], [0.75553,
0.88177, 0.74387], [0.74879, 0.88066, 0.74908], [0.74184, 0.87934,
0.75407], [0.73468, 0.87781, 0.75884], [0.72731, 0.87607, 0.76339], [
0.71976, 0.87411, 0.76772], [0.71201, 0.87195, 0.77184], [0.70408,
0.86958, 0.77573], [0.69599, 0.86701, 0.77941], [0.68774, 0.86425,
0.78288], [0.67934, 0.86127, 0.78614], [0.67081, 0.85811, 0.78919], [
0.66215, 0.85476, 0.79202], [0.65336, 0.8512, 0.79465], [0.64448,
0.84747, 0.79707], [0.6355, 0.84356, 0.7993], [0.62645, 0.83947,
0.80131], [0.61732, 0.83519, 0.80313], [0.60814, 0.83075, 0.80476], [
0.59891, 0.82614, 0.80619], [0.58965, 0.82137, 0.80743], [0.58037,
0.81644, 0.80848], [0.57108, 0.81135, 0.80935], [0.56181, 0.80612,
0.81004], [0.55255, 0.80074, 0.81055], [0.54332, 0.79522, 0.81088], [
0.53412, 0.78958, 0.81105], [0.525, 0.7838, 0.81105], [0.51593, 0.77791,
0.81088], [0.50695, 0.77189, 0.81055], [0.49808, 0.76577, 0.81007], [
0.48928, 0.75954, 0.80944], [0.48061, 0.75321, 0.80866], [0.47207,
0.7468, 0.80773], [0.46365, 0.74029, 0.80667], [0.45539, 0.7337,
0.80546], [0.44728, 0.72703, 0.80413], [0.43934, 0.7203, 0.80266], [
0.43158, 0.7135, 0.80107], [0.42398, 0.70664, 0.79936], [0.41658,
0.69971, 0.79752], [0.40938, 0.69275, 0.79557], [0.40237, 0.68572,
0.79351], [0.3956, 0.67865, 0.79133], [0.38903, 0.67155, 0.78905], [
0.38267, 0.66441, 0.78666], [0.37656, 0.65724, 0.78416], [0.37066,
0.65003, 0.78155], [0.36502, 0.64279, 0.77884], [0.35961, 0.63552,
0.77604], [0.35446, 0.62824, 0.77312], [0.34955, 0.62094, 0.77011], [
0.3449, 0.6136, 0.767], [0.34051, 0.60625, 0.76378], [0.33637, 0.59889,
0.76047], [0.33253, 0.59151, 0.75704], [0.32893, 0.58412, 0.75351], [
0.32559, 0.57671, 0.74987], [0.32256, 0.56928, 0.74613], [0.31978,
0.56186, 0.74228], [0.31727, 0.55441, 0.7383], [0.31505, 0.54695,
0.73422], [0.31311, 0.53948, 0.73002], [0.31144, 0.53201, 0.72569], [
0.31007, 0.52453, 0.72124], [0.30897, 0.51704, 0.71667], [0.30811,
0.50955, 0.71197], [0.30755, 0.50205, 0.70713], [0.30726, 0.49456,
0.70216], [0.30723, 0.48707, 0.69706], [0.30746, 0.47958, 0.69182], [
0.30795, 0.4721, 0.68643], [0.3087, 0.46463, 0.6809], [0.30968, 0.45716,
0.67525], [0.31088, 0.44973, 0.66944], [0.31228, 0.44232, 0.6635], [
0.31393, 0.43493, 0.65741], [0.31578, 0.42758, 0.65118], [0.3178,
0.42025, 0.64482], [0.32001, 0.41299, 0.63833], [0.32238, 0.40577,
0.6317], [0.32489, 0.39861, 0.62495], [0.32755, 0.39152, 0.61809], [
0.33035, 0.38448, 0.61111], [0.33327, 0.37755, 0.60402], [0.33627,
0.37068, 0.59684], [0.33939, 0.36392, 0.58955], [0.34257, 0.35728,
0.58219], [0.3458, 0.35073, 0.57476], [0.34912, 0.34428, 0.56727], [
0.35247, 0.33797, 0.55971], [0.35587, 0.33179, 0.55212], [0.35927,
0.32574, 0.54448], [0.36271, 0.31986, 0.53684], [0.36617, 0.31411,
0.52917], [0.36961, 0.30852, 0.52148], [0.37306, 0.30306, 0.51382], [
0.37652, 0.2978, 0.50615], [0.37994, 0.29269, 0.49854], [0.38336,
0.28775, 0.49094], [0.38674, 0.28301, 0.48337], [0.39011, 0.27842,
0.47586], [0.39346, 0.27401, 0.4684], [0.39677, 0.26978, 0.461], [
0.40006, 0.26573, 0.45366], [0.40333, 0.26185, 0.4464], [0.40655,
0.25815, 0.43921], [0.40974, 0.25466, 0.43212], [0.4129, 0.25132,
0.42509], [0.41602, 0.24817, 0.41813], [0.41912, 0.24515, 0.41128], [
0.42218, 0.24235, 0.40451], [0.42522, 0.23972, 0.39784], [0.42823,
0.23728, 0.39126], [0.43121, 0.23498, 0.38475], [0.43415, 0.23282,
0.37836], [0.43708, 0.23086, 0.37204], [0.43998, 0.22907, 0.36583], [
0.44286, 0.22743, 0.3597], [0.44571, 0.22596, 0.35366], [0.44855,
0.2246, 0.34773]]
romaO_map = LinearSegmentedColormap.from_list('romaO', cm_data)
test_cm = romaO_map
if __name__ == '__main__':
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(romaO_map)
except ImportError:
print('viscm not found, falling back on simple display')
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=
romaO_map)
plt.show()
<|reserved_special_token_1|>
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.45137, 0.22346, 0.34187], [0.45418, 0.22244, 0.3361], [
0.45696, 0.22158, 0.33043], [0.45975, 0.2209, 0.32483], [0.46251,
0.22035, 0.31935], [0.46527, 0.21994, 0.31394], [0.46803, 0.21968,
0.30862], [0.47078, 0.21958, 0.30337], [0.47352, 0.21962, 0.29822], [
0.47628, 0.21982, 0.29316], [0.47902, 0.22017, 0.28818], [0.48178,
0.22067, 0.2833], [0.48453, 0.2213, 0.2785], [0.48731, 0.22208, 0.27379
], [0.49008, 0.22304, 0.26917], [0.49286, 0.22411, 0.26461], [0.49567,
0.22536, 0.26016], [0.4985, 0.22677, 0.25579], [0.50134, 0.22833,
0.25153], [0.50419, 0.22999, 0.24733], [0.50707, 0.23188, 0.24322], [
0.50997, 0.23387, 0.23923], [0.5129, 0.23605, 0.23533], [0.51584,
0.23835, 0.23151], [0.51884, 0.24082, 0.22779], [0.52184, 0.24345,
0.22414], [0.52489, 0.24625, 0.22065], [0.52797, 0.2492, 0.2172], [
0.53108, 0.25231, 0.21387], [0.53423, 0.25556, 0.21064], [0.53742,
0.25899, 0.20753], [0.54063, 0.26255, 0.20452], [0.54389, 0.26628,
0.20158], [0.54718, 0.27017, 0.19879], [0.55051, 0.27419, 0.19613], [
0.55389, 0.27839, 0.19356], [0.55731, 0.28273, 0.19109], [0.56075,
0.2872, 0.18877], [0.56424, 0.29186, 0.18655], [0.56777, 0.29665,
0.18446], [0.57134, 0.30157, 0.18248], [0.57495, 0.30666, 0.18065], [
0.5786, 0.31186, 0.17898], [0.58228, 0.31724, 0.17743], [0.58602,
0.32275, 0.17597], [0.58977, 0.32838, 0.17473], [0.59358, 0.33415,
0.17358], [0.59742, 0.34005, 0.17261], [0.60129, 0.34606, 0.17179], [
0.60519, 0.35223, 0.17114], [0.60915, 0.35851, 0.17065], [0.61311,
0.36491, 0.17034], [0.61713, 0.37143, 0.1702], [0.62118, 0.37808,
0.17023], [0.62526, 0.38483, 0.17046], [0.62937, 0.39171, 0.17087], [
0.63352, 0.39869, 0.17148], [0.63769, 0.40579, 0.17229], [0.6419,
0.41299, 0.17332], [0.64613, 0.42029, 0.17458], [0.65041, 0.42771,
0.176], [0.6547, 0.43522, 0.17774], [0.65904, 0.44283, 0.17962], [
0.66341, 0.45054, 0.18175], [0.6678, 0.45834, 0.18416], [0.67222,
0.46625, 0.1868], [0.67667, 0.47425, 0.18968], [0.68114, 0.48233,
0.19283], [0.68566, 0.49051, 0.19624], [0.69019, 0.49878, 0.19987], [
0.69474, 0.50712, 0.20384], [0.69933, 0.51554, 0.20803], [0.70394,
0.52406, 0.21251], [0.70858, 0.53265, 0.21726], [0.71322, 0.5413,
0.22229], [0.7179, 0.55003, 0.22761], [0.72257, 0.55881, 0.23318], [
0.72727, 0.56767, 0.23907], [0.73197, 0.57658, 0.24521], [0.73666,
0.58553, 0.25168], [0.74136, 0.59451, 0.25837], [0.74605, 0.60354,
0.26537], [0.75073, 0.61259, 0.27263], [0.75538, 0.62166, 0.28017], [
0.76001, 0.63075, 0.28796], [0.7646, 0.63982, 0.29602], [0.76914,
0.64889, 0.30433], [0.77363, 0.65793, 0.31287], [0.77806, 0.66694,
0.32165], [0.78242, 0.6759, 0.33066], [0.78669, 0.68481, 0.33988], [
0.79087, 0.69365, 0.34929], [0.79494, 0.7024, 0.35888], [0.7989,
0.71106, 0.36867], [0.80273, 0.71961, 0.37859], [0.80642, 0.72803,
0.38866], [0.80996, 0.73631, 0.39885], [0.81334, 0.74446, 0.40916], [
0.81655, 0.75244, 0.41957], [0.81956, 0.76025, 0.43004], [0.82239,
0.76787, 0.44057], [0.82501, 0.7753, 0.45115], [0.82742, 0.78252,
0.46174], [0.8296, 0.78953, 0.47235], [0.83155, 0.79631, 0.48293], [
0.83326, 0.80287, 0.49349], [0.83472, 0.80919, 0.50402], [0.83592,
0.81526, 0.51449], [0.83686, 0.82109, 0.52487], [0.83753, 0.82666,
0.53517], [0.83793, 0.83198, 0.54537], [0.83805, 0.83703, 0.55546], [
0.83788, 0.84182, 0.56542], [0.83744, 0.84635, 0.57525], [0.8367,
0.85061, 0.58493], [0.83567, 0.85462, 0.59446], [0.83435, 0.85835,
0.60382], [0.83274, 0.86183, 0.61301], [0.83084, 0.86504, 0.62202], [
0.82864, 0.868, 0.63085], [0.82615, 0.87068, 0.63949], [0.82337,
0.87312, 0.64792], [0.8203, 0.87531, 0.65617], [0.81695, 0.87724,
0.6642], [0.81331, 0.87892, 0.67203], [0.80939, 0.88036, 0.67964], [
0.80518, 0.88156, 0.68705], [0.80071, 0.8825, 0.69424], [0.79595,
0.88322, 0.70121], [0.79094, 0.8837, 0.70797], [0.78566, 0.88395,
0.7145], [0.78012, 0.88396, 0.72082], [0.77433, 0.88375, 0.72692], [
0.7683, 0.88331, 0.73279], [0.76203, 0.88264, 0.73844], [0.75553,
0.88177, 0.74387], [0.74879, 0.88066, 0.74908], [0.74184, 0.87934,
0.75407], [0.73468, 0.87781, 0.75884], [0.72731, 0.87607, 0.76339], [
0.71976, 0.87411, 0.76772], [0.71201, 0.87195, 0.77184], [0.70408,
0.86958, 0.77573], [0.69599, 0.86701, 0.77941], [0.68774, 0.86425,
0.78288], [0.67934, 0.86127, 0.78614], [0.67081, 0.85811, 0.78919], [
0.66215, 0.85476, 0.79202], [0.65336, 0.8512, 0.79465], [0.64448,
0.84747, 0.79707], [0.6355, 0.84356, 0.7993], [0.62645, 0.83947,
0.80131], [0.61732, 0.83519, 0.80313], [0.60814, 0.83075, 0.80476], [
0.59891, 0.82614, 0.80619], [0.58965, 0.82137, 0.80743], [0.58037,
0.81644, 0.80848], [0.57108, 0.81135, 0.80935], [0.56181, 0.80612,
0.81004], [0.55255, 0.80074, 0.81055], [0.54332, 0.79522, 0.81088], [
0.53412, 0.78958, 0.81105], [0.525, 0.7838, 0.81105], [0.51593, 0.77791,
0.81088], [0.50695, 0.77189, 0.81055], [0.49808, 0.76577, 0.81007], [
0.48928, 0.75954, 0.80944], [0.48061, 0.75321, 0.80866], [0.47207,
0.7468, 0.80773], [0.46365, 0.74029, 0.80667], [0.45539, 0.7337,
0.80546], [0.44728, 0.72703, 0.80413], [0.43934, 0.7203, 0.80266], [
0.43158, 0.7135, 0.80107], [0.42398, 0.70664, 0.79936], [0.41658,
0.69971, 0.79752], [0.40938, 0.69275, 0.79557], [0.40237, 0.68572,
0.79351], [0.3956, 0.67865, 0.79133], [0.38903, 0.67155, 0.78905], [
0.38267, 0.66441, 0.78666], [0.37656, 0.65724, 0.78416], [0.37066,
0.65003, 0.78155], [0.36502, 0.64279, 0.77884], [0.35961, 0.63552,
0.77604], [0.35446, 0.62824, 0.77312], [0.34955, 0.62094, 0.77011], [
0.3449, 0.6136, 0.767], [0.34051, 0.60625, 0.76378], [0.33637, 0.59889,
0.76047], [0.33253, 0.59151, 0.75704], [0.32893, 0.58412, 0.75351], [
0.32559, 0.57671, 0.74987], [0.32256, 0.56928, 0.74613], [0.31978,
0.56186, 0.74228], [0.31727, 0.55441, 0.7383], [0.31505, 0.54695,
0.73422], [0.31311, 0.53948, 0.73002], [0.31144, 0.53201, 0.72569], [
0.31007, 0.52453, 0.72124], [0.30897, 0.51704, 0.71667], [0.30811,
0.50955, 0.71197], [0.30755, 0.50205, 0.70713], [0.30726, 0.49456,
0.70216], [0.30723, 0.48707, 0.69706], [0.30746, 0.47958, 0.69182], [
0.30795, 0.4721, 0.68643], [0.3087, 0.46463, 0.6809], [0.30968, 0.45716,
0.67525], [0.31088, 0.44973, 0.66944], [0.31228, 0.44232, 0.6635], [
0.31393, 0.43493, 0.65741], [0.31578, 0.42758, 0.65118], [0.3178,
0.42025, 0.64482], [0.32001, 0.41299, 0.63833], [0.32238, 0.40577,
0.6317], [0.32489, 0.39861, 0.62495], [0.32755, 0.39152, 0.61809], [
0.33035, 0.38448, 0.61111], [0.33327, 0.37755, 0.60402], [0.33627,
0.37068, 0.59684], [0.33939, 0.36392, 0.58955], [0.34257, 0.35728,
0.58219], [0.3458, 0.35073, 0.57476], [0.34912, 0.34428, 0.56727], [
0.35247, 0.33797, 0.55971], [0.35587, 0.33179, 0.55212], [0.35927,
0.32574, 0.54448], [0.36271, 0.31986, 0.53684], [0.36617, 0.31411,
0.52917], [0.36961, 0.30852, 0.52148], [0.37306, 0.30306, 0.51382], [
0.37652, 0.2978, 0.50615], [0.37994, 0.29269, 0.49854], [0.38336,
0.28775, 0.49094], [0.38674, 0.28301, 0.48337], [0.39011, 0.27842,
0.47586], [0.39346, 0.27401, 0.4684], [0.39677, 0.26978, 0.461], [
0.40006, 0.26573, 0.45366], [0.40333, 0.26185, 0.4464], [0.40655,
0.25815, 0.43921], [0.40974, 0.25466, 0.43212], [0.4129, 0.25132,
0.42509], [0.41602, 0.24817, 0.41813], [0.41912, 0.24515, 0.41128], [
0.42218, 0.24235, 0.40451], [0.42522, 0.23972, 0.39784], [0.42823,
0.23728, 0.39126], [0.43121, 0.23498, 0.38475], [0.43415, 0.23282,
0.37836], [0.43708, 0.23086, 0.37204], [0.43998, 0.22907, 0.36583], [
0.44286, 0.22743, 0.3597], [0.44571, 0.22596, 0.35366], [0.44855,
0.2246, 0.34773]]
romaO_map = LinearSegmentedColormap.from_list('romaO', cm_data)
test_cm = romaO_map
if __name__ == '__main__':
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(romaO_map)
except ImportError:
print('viscm not found, falling back on simple display')
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=
romaO_map)
plt.show()
<|reserved_special_token_1|>
#
# romaO
# www.fabiocrameri.ch/colourmaps
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.45137, 0.22346, 0.34187],
[0.45418, 0.22244, 0.3361],
[0.45696, 0.22158, 0.33043],
[0.45975, 0.2209, 0.32483],
[0.46251, 0.22035, 0.31935],
[0.46527, 0.21994, 0.31394],
[0.46803, 0.21968, 0.30862],
[0.47078, 0.21958, 0.30337],
[0.47352, 0.21962, 0.29822],
[0.47628, 0.21982, 0.29316],
[0.47902, 0.22017, 0.28818],
[0.48178, 0.22067, 0.2833],
[0.48453, 0.2213, 0.2785],
[0.48731, 0.22208, 0.27379],
[0.49008, 0.22304, 0.26917],
[0.49286, 0.22411, 0.26461],
[0.49567, 0.22536, 0.26016],
[0.4985, 0.22677, 0.25579],
[0.50134, 0.22833, 0.25153],
[0.50419, 0.22999, 0.24733],
[0.50707, 0.23188, 0.24322],
[0.50997, 0.23387, 0.23923],
[0.5129, 0.23605, 0.23533],
[0.51584, 0.23835, 0.23151],
[0.51884, 0.24082, 0.22779],
[0.52184, 0.24345, 0.22414],
[0.52489, 0.24625, 0.22065],
[0.52797, 0.2492, 0.2172],
[0.53108, 0.25231, 0.21387],
[0.53423, 0.25556, 0.21064],
[0.53742, 0.25899, 0.20753],
[0.54063, 0.26255, 0.20452],
[0.54389, 0.26628, 0.20158],
[0.54718, 0.27017, 0.19879],
[0.55051, 0.27419, 0.19613],
[0.55389, 0.27839, 0.19356],
[0.55731, 0.28273, 0.19109],
[0.56075, 0.2872, 0.18877],
[0.56424, 0.29186, 0.18655],
[0.56777, 0.29665, 0.18446],
[0.57134, 0.30157, 0.18248],
[0.57495, 0.30666, 0.18065],
[0.5786, 0.31186, 0.17898],
[0.58228, 0.31724, 0.17743],
[0.58602, 0.32275, 0.17597],
[0.58977, 0.32838, 0.17473],
[0.59358, 0.33415, 0.17358],
[0.59742, 0.34005, 0.17261],
[0.60129, 0.34606, 0.17179],
[0.60519, 0.35223, 0.17114],
[0.60915, 0.35851, 0.17065],
[0.61311, 0.36491, 0.17034],
[0.61713, 0.37143, 0.1702],
[0.62118, 0.37808, 0.17023],
[0.62526, 0.38483, 0.17046],
[0.62937, 0.39171, 0.17087],
[0.63352, 0.39869, 0.17148],
[0.63769, 0.40579, 0.17229],
[0.6419, 0.41299, 0.17332],
[0.64613, 0.42029, 0.17458],
[0.65041, 0.42771, 0.176],
[0.6547, 0.43522, 0.17774],
[0.65904, 0.44283, 0.17962],
[0.66341, 0.45054, 0.18175],
[0.6678, 0.45834, 0.18416],
[0.67222, 0.46625, 0.1868],
[0.67667, 0.47425, 0.18968],
[0.68114, 0.48233, 0.19283],
[0.68566, 0.49051, 0.19624],
[0.69019, 0.49878, 0.19987],
[0.69474, 0.50712, 0.20384],
[0.69933, 0.51554, 0.20803],
[0.70394, 0.52406, 0.21251],
[0.70858, 0.53265, 0.21726],
[0.71322, 0.5413, 0.22229],
[0.7179, 0.55003, 0.22761],
[0.72257, 0.55881, 0.23318],
[0.72727, 0.56767, 0.23907],
[0.73197, 0.57658, 0.24521],
[0.73666, 0.58553, 0.25168],
[0.74136, 0.59451, 0.25837],
[0.74605, 0.60354, 0.26537],
[0.75073, 0.61259, 0.27263],
[0.75538, 0.62166, 0.28017],
[0.76001, 0.63075, 0.28796],
[0.7646, 0.63982, 0.29602],
[0.76914, 0.64889, 0.30433],
[0.77363, 0.65793, 0.31287],
[0.77806, 0.66694, 0.32165],
[0.78242, 0.6759, 0.33066],
[0.78669, 0.68481, 0.33988],
[0.79087, 0.69365, 0.34929],
[0.79494, 0.7024, 0.35888],
[0.7989, 0.71106, 0.36867],
[0.80273, 0.71961, 0.37859],
[0.80642, 0.72803, 0.38866],
[0.80996, 0.73631, 0.39885],
[0.81334, 0.74446, 0.40916],
[0.81655, 0.75244, 0.41957],
[0.81956, 0.76025, 0.43004],
[0.82239, 0.76787, 0.44057],
[0.82501, 0.7753, 0.45115],
[0.82742, 0.78252, 0.46174],
[0.8296, 0.78953, 0.47235],
[0.83155, 0.79631, 0.48293],
[0.83326, 0.80287, 0.49349],
[0.83472, 0.80919, 0.50402],
[0.83592, 0.81526, 0.51449],
[0.83686, 0.82109, 0.52487],
[0.83753, 0.82666, 0.53517],
[0.83793, 0.83198, 0.54537],
[0.83805, 0.83703, 0.55546],
[0.83788, 0.84182, 0.56542],
[0.83744, 0.84635, 0.57525],
[0.8367, 0.85061, 0.58493],
[0.83567, 0.85462, 0.59446],
[0.83435, 0.85835, 0.60382],
[0.83274, 0.86183, 0.61301],
[0.83084, 0.86504, 0.62202],
[0.82864, 0.868, 0.63085],
[0.82615, 0.87068, 0.63949],
[0.82337, 0.87312, 0.64792],
[0.8203, 0.87531, 0.65617],
[0.81695, 0.87724, 0.6642],
[0.81331, 0.87892, 0.67203],
[0.80939, 0.88036, 0.67964],
[0.80518, 0.88156, 0.68705],
[0.80071, 0.8825, 0.69424],
[0.79595, 0.88322, 0.70121],
[0.79094, 0.8837, 0.70797],
[0.78566, 0.88395, 0.7145],
[0.78012, 0.88396, 0.72082],
[0.77433, 0.88375, 0.72692],
[0.7683, 0.88331, 0.73279],
[0.76203, 0.88264, 0.73844],
[0.75553, 0.88177, 0.74387],
[0.74879, 0.88066, 0.74908],
[0.74184, 0.87934, 0.75407],
[0.73468, 0.87781, 0.75884],
[0.72731, 0.87607, 0.76339],
[0.71976, 0.87411, 0.76772],
[0.71201, 0.87195, 0.77184],
[0.70408, 0.86958, 0.77573],
[0.69599, 0.86701, 0.77941],
[0.68774, 0.86425, 0.78288],
[0.67934, 0.86127, 0.78614],
[0.67081, 0.85811, 0.78919],
[0.66215, 0.85476, 0.79202],
[0.65336, 0.8512, 0.79465],
[0.64448, 0.84747, 0.79707],
[0.6355, 0.84356, 0.7993],
[0.62645, 0.83947, 0.80131],
[0.61732, 0.83519, 0.80313],
[0.60814, 0.83075, 0.80476],
[0.59891, 0.82614, 0.80619],
[0.58965, 0.82137, 0.80743],
[0.58037, 0.81644, 0.80848],
[0.57108, 0.81135, 0.80935],
[0.56181, 0.80612, 0.81004],
[0.55255, 0.80074, 0.81055],
[0.54332, 0.79522, 0.81088],
[0.53412, 0.78958, 0.81105],
[0.525, 0.7838, 0.81105],
[0.51593, 0.77791, 0.81088],
[0.50695, 0.77189, 0.81055],
[0.49808, 0.76577, 0.81007],
[0.48928, 0.75954, 0.80944],
[0.48061, 0.75321, 0.80866],
[0.47207, 0.7468, 0.80773],
[0.46365, 0.74029, 0.80667],
[0.45539, 0.7337, 0.80546],
[0.44728, 0.72703, 0.80413],
[0.43934, 0.7203, 0.80266],
[0.43158, 0.7135, 0.80107],
[0.42398, 0.70664, 0.79936],
[0.41658, 0.69971, 0.79752],
[0.40938, 0.69275, 0.79557],
[0.40237, 0.68572, 0.79351],
[0.3956, 0.67865, 0.79133],
[0.38903, 0.67155, 0.78905],
[0.38267, 0.66441, 0.78666],
[0.37656, 0.65724, 0.78416],
[0.37066, 0.65003, 0.78155],
[0.36502, 0.64279, 0.77884],
[0.35961, 0.63552, 0.77604],
[0.35446, 0.62824, 0.77312],
[0.34955, 0.62094, 0.77011],
[0.3449, 0.6136, 0.767],
[0.34051, 0.60625, 0.76378],
[0.33637, 0.59889, 0.76047],
[0.33253, 0.59151, 0.75704],
[0.32893, 0.58412, 0.75351],
[0.32559, 0.57671, 0.74987],
[0.32256, 0.56928, 0.74613],
[0.31978, 0.56186, 0.74228],
[0.31727, 0.55441, 0.7383],
[0.31505, 0.54695, 0.73422],
[0.31311, 0.53948, 0.73002],
[0.31144, 0.53201, 0.72569],
[0.31007, 0.52453, 0.72124],
[0.30897, 0.51704, 0.71667],
[0.30811, 0.50955, 0.71197],
[0.30755, 0.50205, 0.70713],
[0.30726, 0.49456, 0.70216],
[0.30723, 0.48707, 0.69706],
[0.30746, 0.47958, 0.69182],
[0.30795, 0.4721, 0.68643],
[0.3087, 0.46463, 0.6809],
[0.30968, 0.45716, 0.67525],
[0.31088, 0.44973, 0.66944],
[0.31228, 0.44232, 0.6635],
[0.31393, 0.43493, 0.65741],
[0.31578, 0.42758, 0.65118],
[0.3178, 0.42025, 0.64482],
[0.32001, 0.41299, 0.63833],
[0.32238, 0.40577, 0.6317],
[0.32489, 0.39861, 0.62495],
[0.32755, 0.39152, 0.61809],
[0.33035, 0.38448, 0.61111],
[0.33327, 0.37755, 0.60402],
[0.33627, 0.37068, 0.59684],
[0.33939, 0.36392, 0.58955],
[0.34257, 0.35728, 0.58219],
[0.3458, 0.35073, 0.57476],
[0.34912, 0.34428, 0.56727],
[0.35247, 0.33797, 0.55971],
[0.35587, 0.33179, 0.55212],
[0.35927, 0.32574, 0.54448],
[0.36271, 0.31986, 0.53684],
[0.36617, 0.31411, 0.52917],
[0.36961, 0.30852, 0.52148],
[0.37306, 0.30306, 0.51382],
[0.37652, 0.2978, 0.50615],
[0.37994, 0.29269, 0.49854],
[0.38336, 0.28775, 0.49094],
[0.38674, 0.28301, 0.48337],
[0.39011, 0.27842, 0.47586],
[0.39346, 0.27401, 0.4684],
[0.39677, 0.26978, 0.461],
[0.40006, 0.26573, 0.45366],
[0.40333, 0.26185, 0.4464],
[0.40655, 0.25815, 0.43921],
[0.40974, 0.25466, 0.43212],
[0.4129, 0.25132, 0.42509],
[0.41602, 0.24817, 0.41813],
[0.41912, 0.24515, 0.41128],
[0.42218, 0.24235, 0.40451],
[0.42522, 0.23972, 0.39784],
[0.42823, 0.23728, 0.39126],
[0.43121, 0.23498, 0.38475],
[0.43415, 0.23282, 0.37836],
[0.43708, 0.23086, 0.37204],
[0.43998, 0.22907, 0.36583],
[0.44286, 0.22743, 0.3597],
[0.44571, 0.22596, 0.35366],
[0.44855, 0.2246, 0.34773]]
romaO_map = LinearSegmentedColormap.from_list('romaO', cm_data)
# For use of "viscm view"
test_cm = romaO_map
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(romaO_map)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=romaO_map)
plt.show()
|
flexible
|
{
"blob_id": "5082182af5a08970568dc1ab7a53ee5337260687",
"index": 45,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n import numpy as np\n try:\n from viscm import viscm\n viscm(romaO_map)\n except ImportError:\n print('viscm not found, falling back on simple display')\n plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=\n romaO_map)\n plt.show()\n",
"step-3": "<mask token>\ncm_data = [[0.45137, 0.22346, 0.34187], [0.45418, 0.22244, 0.3361], [\n 0.45696, 0.22158, 0.33043], [0.45975, 0.2209, 0.32483], [0.46251, \n 0.22035, 0.31935], [0.46527, 0.21994, 0.31394], [0.46803, 0.21968, \n 0.30862], [0.47078, 0.21958, 0.30337], [0.47352, 0.21962, 0.29822], [\n 0.47628, 0.21982, 0.29316], [0.47902, 0.22017, 0.28818], [0.48178, \n 0.22067, 0.2833], [0.48453, 0.2213, 0.2785], [0.48731, 0.22208, 0.27379\n ], [0.49008, 0.22304, 0.26917], [0.49286, 0.22411, 0.26461], [0.49567, \n 0.22536, 0.26016], [0.4985, 0.22677, 0.25579], [0.50134, 0.22833, \n 0.25153], [0.50419, 0.22999, 0.24733], [0.50707, 0.23188, 0.24322], [\n 0.50997, 0.23387, 0.23923], [0.5129, 0.23605, 0.23533], [0.51584, \n 0.23835, 0.23151], [0.51884, 0.24082, 0.22779], [0.52184, 0.24345, \n 0.22414], [0.52489, 0.24625, 0.22065], [0.52797, 0.2492, 0.2172], [\n 0.53108, 0.25231, 0.21387], [0.53423, 0.25556, 0.21064], [0.53742, \n 0.25899, 0.20753], [0.54063, 0.26255, 0.20452], [0.54389, 0.26628, \n 0.20158], [0.54718, 0.27017, 0.19879], [0.55051, 0.27419, 0.19613], [\n 0.55389, 0.27839, 0.19356], [0.55731, 0.28273, 0.19109], [0.56075, \n 0.2872, 0.18877], [0.56424, 0.29186, 0.18655], [0.56777, 0.29665, \n 0.18446], [0.57134, 0.30157, 0.18248], [0.57495, 0.30666, 0.18065], [\n 0.5786, 0.31186, 0.17898], [0.58228, 0.31724, 0.17743], [0.58602, \n 0.32275, 0.17597], [0.58977, 0.32838, 0.17473], [0.59358, 0.33415, \n 0.17358], [0.59742, 0.34005, 0.17261], [0.60129, 0.34606, 0.17179], [\n 0.60519, 0.35223, 0.17114], [0.60915, 0.35851, 0.17065], [0.61311, \n 0.36491, 0.17034], [0.61713, 0.37143, 0.1702], [0.62118, 0.37808, \n 0.17023], [0.62526, 0.38483, 0.17046], [0.62937, 0.39171, 0.17087], [\n 0.63352, 0.39869, 0.17148], [0.63769, 0.40579, 0.17229], [0.6419, \n 0.41299, 0.17332], [0.64613, 0.42029, 0.17458], [0.65041, 0.42771, \n 0.176], [0.6547, 0.43522, 0.17774], [0.65904, 0.44283, 0.17962], [\n 0.66341, 0.45054, 0.18175], [0.6678, 0.45834, 0.18416], [0.67222, \n 0.46625, 0.1868], [0.67667, 0.47425, 0.18968], [0.68114, 0.48233, \n 0.19283], [0.68566, 0.49051, 0.19624], [0.69019, 0.49878, 0.19987], [\n 0.69474, 0.50712, 0.20384], [0.69933, 0.51554, 0.20803], [0.70394, \n 0.52406, 0.21251], [0.70858, 0.53265, 0.21726], [0.71322, 0.5413, \n 0.22229], [0.7179, 0.55003, 0.22761], [0.72257, 0.55881, 0.23318], [\n 0.72727, 0.56767, 0.23907], [0.73197, 0.57658, 0.24521], [0.73666, \n 0.58553, 0.25168], [0.74136, 0.59451, 0.25837], [0.74605, 0.60354, \n 0.26537], [0.75073, 0.61259, 0.27263], [0.75538, 0.62166, 0.28017], [\n 0.76001, 0.63075, 0.28796], [0.7646, 0.63982, 0.29602], [0.76914, \n 0.64889, 0.30433], [0.77363, 0.65793, 0.31287], [0.77806, 0.66694, \n 0.32165], [0.78242, 0.6759, 0.33066], [0.78669, 0.68481, 0.33988], [\n 0.79087, 0.69365, 0.34929], [0.79494, 0.7024, 0.35888], [0.7989, \n 0.71106, 0.36867], [0.80273, 0.71961, 0.37859], [0.80642, 0.72803, \n 0.38866], [0.80996, 0.73631, 0.39885], [0.81334, 0.74446, 0.40916], [\n 0.81655, 0.75244, 0.41957], [0.81956, 0.76025, 0.43004], [0.82239, \n 0.76787, 0.44057], [0.82501, 0.7753, 0.45115], [0.82742, 0.78252, \n 0.46174], [0.8296, 0.78953, 0.47235], [0.83155, 0.79631, 0.48293], [\n 0.83326, 0.80287, 0.49349], [0.83472, 0.80919, 0.50402], [0.83592, \n 0.81526, 0.51449], [0.83686, 0.82109, 0.52487], [0.83753, 0.82666, \n 0.53517], [0.83793, 0.83198, 0.54537], [0.83805, 0.83703, 0.55546], [\n 0.83788, 0.84182, 0.56542], [0.83744, 0.84635, 0.57525], [0.8367, \n 0.85061, 0.58493], [0.83567, 0.85462, 0.59446], [0.83435, 0.85835, \n 0.60382], [0.83274, 0.86183, 0.61301], [0.83084, 0.86504, 0.62202], [\n 0.82864, 0.868, 0.63085], [0.82615, 0.87068, 0.63949], [0.82337, \n 0.87312, 0.64792], [0.8203, 0.87531, 0.65617], [0.81695, 0.87724, \n 0.6642], [0.81331, 0.87892, 0.67203], [0.80939, 0.88036, 0.67964], [\n 0.80518, 0.88156, 0.68705], [0.80071, 0.8825, 0.69424], [0.79595, \n 0.88322, 0.70121], [0.79094, 0.8837, 0.70797], [0.78566, 0.88395, \n 0.7145], [0.78012, 0.88396, 0.72082], [0.77433, 0.88375, 0.72692], [\n 0.7683, 0.88331, 0.73279], [0.76203, 0.88264, 0.73844], [0.75553, \n 0.88177, 0.74387], [0.74879, 0.88066, 0.74908], [0.74184, 0.87934, \n 0.75407], [0.73468, 0.87781, 0.75884], [0.72731, 0.87607, 0.76339], [\n 0.71976, 0.87411, 0.76772], [0.71201, 0.87195, 0.77184], [0.70408, \n 0.86958, 0.77573], [0.69599, 0.86701, 0.77941], [0.68774, 0.86425, \n 0.78288], [0.67934, 0.86127, 0.78614], [0.67081, 0.85811, 0.78919], [\n 0.66215, 0.85476, 0.79202], [0.65336, 0.8512, 0.79465], [0.64448, \n 0.84747, 0.79707], [0.6355, 0.84356, 0.7993], [0.62645, 0.83947, \n 0.80131], [0.61732, 0.83519, 0.80313], [0.60814, 0.83075, 0.80476], [\n 0.59891, 0.82614, 0.80619], [0.58965, 0.82137, 0.80743], [0.58037, \n 0.81644, 0.80848], [0.57108, 0.81135, 0.80935], [0.56181, 0.80612, \n 0.81004], [0.55255, 0.80074, 0.81055], [0.54332, 0.79522, 0.81088], [\n 0.53412, 0.78958, 0.81105], [0.525, 0.7838, 0.81105], [0.51593, 0.77791,\n 0.81088], [0.50695, 0.77189, 0.81055], [0.49808, 0.76577, 0.81007], [\n 0.48928, 0.75954, 0.80944], [0.48061, 0.75321, 0.80866], [0.47207, \n 0.7468, 0.80773], [0.46365, 0.74029, 0.80667], [0.45539, 0.7337, \n 0.80546], [0.44728, 0.72703, 0.80413], [0.43934, 0.7203, 0.80266], [\n 0.43158, 0.7135, 0.80107], [0.42398, 0.70664, 0.79936], [0.41658, \n 0.69971, 0.79752], [0.40938, 0.69275, 0.79557], [0.40237, 0.68572, \n 0.79351], [0.3956, 0.67865, 0.79133], [0.38903, 0.67155, 0.78905], [\n 0.38267, 0.66441, 0.78666], [0.37656, 0.65724, 0.78416], [0.37066, \n 0.65003, 0.78155], [0.36502, 0.64279, 0.77884], [0.35961, 0.63552, \n 0.77604], [0.35446, 0.62824, 0.77312], [0.34955, 0.62094, 0.77011], [\n 0.3449, 0.6136, 0.767], [0.34051, 0.60625, 0.76378], [0.33637, 0.59889,\n 0.76047], [0.33253, 0.59151, 0.75704], [0.32893, 0.58412, 0.75351], [\n 0.32559, 0.57671, 0.74987], [0.32256, 0.56928, 0.74613], [0.31978, \n 0.56186, 0.74228], [0.31727, 0.55441, 0.7383], [0.31505, 0.54695, \n 0.73422], [0.31311, 0.53948, 0.73002], [0.31144, 0.53201, 0.72569], [\n 0.31007, 0.52453, 0.72124], [0.30897, 0.51704, 0.71667], [0.30811, \n 0.50955, 0.71197], [0.30755, 0.50205, 0.70713], [0.30726, 0.49456, \n 0.70216], [0.30723, 0.48707, 0.69706], [0.30746, 0.47958, 0.69182], [\n 0.30795, 0.4721, 0.68643], [0.3087, 0.46463, 0.6809], [0.30968, 0.45716,\n 0.67525], [0.31088, 0.44973, 0.66944], [0.31228, 0.44232, 0.6635], [\n 0.31393, 0.43493, 0.65741], [0.31578, 0.42758, 0.65118], [0.3178, \n 0.42025, 0.64482], [0.32001, 0.41299, 0.63833], [0.32238, 0.40577, \n 0.6317], [0.32489, 0.39861, 0.62495], [0.32755, 0.39152, 0.61809], [\n 0.33035, 0.38448, 0.61111], [0.33327, 0.37755, 0.60402], [0.33627, \n 0.37068, 0.59684], [0.33939, 0.36392, 0.58955], [0.34257, 0.35728, \n 0.58219], [0.3458, 0.35073, 0.57476], [0.34912, 0.34428, 0.56727], [\n 0.35247, 0.33797, 0.55971], [0.35587, 0.33179, 0.55212], [0.35927, \n 0.32574, 0.54448], [0.36271, 0.31986, 0.53684], [0.36617, 0.31411, \n 0.52917], [0.36961, 0.30852, 0.52148], [0.37306, 0.30306, 0.51382], [\n 0.37652, 0.2978, 0.50615], [0.37994, 0.29269, 0.49854], [0.38336, \n 0.28775, 0.49094], [0.38674, 0.28301, 0.48337], [0.39011, 0.27842, \n 0.47586], [0.39346, 0.27401, 0.4684], [0.39677, 0.26978, 0.461], [\n 0.40006, 0.26573, 0.45366], [0.40333, 0.26185, 0.4464], [0.40655, \n 0.25815, 0.43921], [0.40974, 0.25466, 0.43212], [0.4129, 0.25132, \n 0.42509], [0.41602, 0.24817, 0.41813], [0.41912, 0.24515, 0.41128], [\n 0.42218, 0.24235, 0.40451], [0.42522, 0.23972, 0.39784], [0.42823, \n 0.23728, 0.39126], [0.43121, 0.23498, 0.38475], [0.43415, 0.23282, \n 0.37836], [0.43708, 0.23086, 0.37204], [0.43998, 0.22907, 0.36583], [\n 0.44286, 0.22743, 0.3597], [0.44571, 0.22596, 0.35366], [0.44855, \n 0.2246, 0.34773]]\nromaO_map = LinearSegmentedColormap.from_list('romaO', cm_data)\ntest_cm = romaO_map\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n import numpy as np\n try:\n from viscm import viscm\n viscm(romaO_map)\n except ImportError:\n print('viscm not found, falling back on simple display')\n plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=\n romaO_map)\n plt.show()\n",
"step-4": "from matplotlib.colors import LinearSegmentedColormap\ncm_data = [[0.45137, 0.22346, 0.34187], [0.45418, 0.22244, 0.3361], [\n 0.45696, 0.22158, 0.33043], [0.45975, 0.2209, 0.32483], [0.46251, \n 0.22035, 0.31935], [0.46527, 0.21994, 0.31394], [0.46803, 0.21968, \n 0.30862], [0.47078, 0.21958, 0.30337], [0.47352, 0.21962, 0.29822], [\n 0.47628, 0.21982, 0.29316], [0.47902, 0.22017, 0.28818], [0.48178, \n 0.22067, 0.2833], [0.48453, 0.2213, 0.2785], [0.48731, 0.22208, 0.27379\n ], [0.49008, 0.22304, 0.26917], [0.49286, 0.22411, 0.26461], [0.49567, \n 0.22536, 0.26016], [0.4985, 0.22677, 0.25579], [0.50134, 0.22833, \n 0.25153], [0.50419, 0.22999, 0.24733], [0.50707, 0.23188, 0.24322], [\n 0.50997, 0.23387, 0.23923], [0.5129, 0.23605, 0.23533], [0.51584, \n 0.23835, 0.23151], [0.51884, 0.24082, 0.22779], [0.52184, 0.24345, \n 0.22414], [0.52489, 0.24625, 0.22065], [0.52797, 0.2492, 0.2172], [\n 0.53108, 0.25231, 0.21387], [0.53423, 0.25556, 0.21064], [0.53742, \n 0.25899, 0.20753], [0.54063, 0.26255, 0.20452], [0.54389, 0.26628, \n 0.20158], [0.54718, 0.27017, 0.19879], [0.55051, 0.27419, 0.19613], [\n 0.55389, 0.27839, 0.19356], [0.55731, 0.28273, 0.19109], [0.56075, \n 0.2872, 0.18877], [0.56424, 0.29186, 0.18655], [0.56777, 0.29665, \n 0.18446], [0.57134, 0.30157, 0.18248], [0.57495, 0.30666, 0.18065], [\n 0.5786, 0.31186, 0.17898], [0.58228, 0.31724, 0.17743], [0.58602, \n 0.32275, 0.17597], [0.58977, 0.32838, 0.17473], [0.59358, 0.33415, \n 0.17358], [0.59742, 0.34005, 0.17261], [0.60129, 0.34606, 0.17179], [\n 0.60519, 0.35223, 0.17114], [0.60915, 0.35851, 0.17065], [0.61311, \n 0.36491, 0.17034], [0.61713, 0.37143, 0.1702], [0.62118, 0.37808, \n 0.17023], [0.62526, 0.38483, 0.17046], [0.62937, 0.39171, 0.17087], [\n 0.63352, 0.39869, 0.17148], [0.63769, 0.40579, 0.17229], [0.6419, \n 0.41299, 0.17332], [0.64613, 0.42029, 0.17458], [0.65041, 0.42771, \n 0.176], [0.6547, 0.43522, 0.17774], [0.65904, 0.44283, 0.17962], [\n 0.66341, 0.45054, 0.18175], [0.6678, 0.45834, 0.18416], [0.67222, \n 0.46625, 0.1868], [0.67667, 0.47425, 0.18968], [0.68114, 0.48233, \n 0.19283], [0.68566, 0.49051, 0.19624], [0.69019, 0.49878, 0.19987], [\n 0.69474, 0.50712, 0.20384], [0.69933, 0.51554, 0.20803], [0.70394, \n 0.52406, 0.21251], [0.70858, 0.53265, 0.21726], [0.71322, 0.5413, \n 0.22229], [0.7179, 0.55003, 0.22761], [0.72257, 0.55881, 0.23318], [\n 0.72727, 0.56767, 0.23907], [0.73197, 0.57658, 0.24521], [0.73666, \n 0.58553, 0.25168], [0.74136, 0.59451, 0.25837], [0.74605, 0.60354, \n 0.26537], [0.75073, 0.61259, 0.27263], [0.75538, 0.62166, 0.28017], [\n 0.76001, 0.63075, 0.28796], [0.7646, 0.63982, 0.29602], [0.76914, \n 0.64889, 0.30433], [0.77363, 0.65793, 0.31287], [0.77806, 0.66694, \n 0.32165], [0.78242, 0.6759, 0.33066], [0.78669, 0.68481, 0.33988], [\n 0.79087, 0.69365, 0.34929], [0.79494, 0.7024, 0.35888], [0.7989, \n 0.71106, 0.36867], [0.80273, 0.71961, 0.37859], [0.80642, 0.72803, \n 0.38866], [0.80996, 0.73631, 0.39885], [0.81334, 0.74446, 0.40916], [\n 0.81655, 0.75244, 0.41957], [0.81956, 0.76025, 0.43004], [0.82239, \n 0.76787, 0.44057], [0.82501, 0.7753, 0.45115], [0.82742, 0.78252, \n 0.46174], [0.8296, 0.78953, 0.47235], [0.83155, 0.79631, 0.48293], [\n 0.83326, 0.80287, 0.49349], [0.83472, 0.80919, 0.50402], [0.83592, \n 0.81526, 0.51449], [0.83686, 0.82109, 0.52487], [0.83753, 0.82666, \n 0.53517], [0.83793, 0.83198, 0.54537], [0.83805, 0.83703, 0.55546], [\n 0.83788, 0.84182, 0.56542], [0.83744, 0.84635, 0.57525], [0.8367, \n 0.85061, 0.58493], [0.83567, 0.85462, 0.59446], [0.83435, 0.85835, \n 0.60382], [0.83274, 0.86183, 0.61301], [0.83084, 0.86504, 0.62202], [\n 0.82864, 0.868, 0.63085], [0.82615, 0.87068, 0.63949], [0.82337, \n 0.87312, 0.64792], [0.8203, 0.87531, 0.65617], [0.81695, 0.87724, \n 0.6642], [0.81331, 0.87892, 0.67203], [0.80939, 0.88036, 0.67964], [\n 0.80518, 0.88156, 0.68705], [0.80071, 0.8825, 0.69424], [0.79595, \n 0.88322, 0.70121], [0.79094, 0.8837, 0.70797], [0.78566, 0.88395, \n 0.7145], [0.78012, 0.88396, 0.72082], [0.77433, 0.88375, 0.72692], [\n 0.7683, 0.88331, 0.73279], [0.76203, 0.88264, 0.73844], [0.75553, \n 0.88177, 0.74387], [0.74879, 0.88066, 0.74908], [0.74184, 0.87934, \n 0.75407], [0.73468, 0.87781, 0.75884], [0.72731, 0.87607, 0.76339], [\n 0.71976, 0.87411, 0.76772], [0.71201, 0.87195, 0.77184], [0.70408, \n 0.86958, 0.77573], [0.69599, 0.86701, 0.77941], [0.68774, 0.86425, \n 0.78288], [0.67934, 0.86127, 0.78614], [0.67081, 0.85811, 0.78919], [\n 0.66215, 0.85476, 0.79202], [0.65336, 0.8512, 0.79465], [0.64448, \n 0.84747, 0.79707], [0.6355, 0.84356, 0.7993], [0.62645, 0.83947, \n 0.80131], [0.61732, 0.83519, 0.80313], [0.60814, 0.83075, 0.80476], [\n 0.59891, 0.82614, 0.80619], [0.58965, 0.82137, 0.80743], [0.58037, \n 0.81644, 0.80848], [0.57108, 0.81135, 0.80935], [0.56181, 0.80612, \n 0.81004], [0.55255, 0.80074, 0.81055], [0.54332, 0.79522, 0.81088], [\n 0.53412, 0.78958, 0.81105], [0.525, 0.7838, 0.81105], [0.51593, 0.77791,\n 0.81088], [0.50695, 0.77189, 0.81055], [0.49808, 0.76577, 0.81007], [\n 0.48928, 0.75954, 0.80944], [0.48061, 0.75321, 0.80866], [0.47207, \n 0.7468, 0.80773], [0.46365, 0.74029, 0.80667], [0.45539, 0.7337, \n 0.80546], [0.44728, 0.72703, 0.80413], [0.43934, 0.7203, 0.80266], [\n 0.43158, 0.7135, 0.80107], [0.42398, 0.70664, 0.79936], [0.41658, \n 0.69971, 0.79752], [0.40938, 0.69275, 0.79557], [0.40237, 0.68572, \n 0.79351], [0.3956, 0.67865, 0.79133], [0.38903, 0.67155, 0.78905], [\n 0.38267, 0.66441, 0.78666], [0.37656, 0.65724, 0.78416], [0.37066, \n 0.65003, 0.78155], [0.36502, 0.64279, 0.77884], [0.35961, 0.63552, \n 0.77604], [0.35446, 0.62824, 0.77312], [0.34955, 0.62094, 0.77011], [\n 0.3449, 0.6136, 0.767], [0.34051, 0.60625, 0.76378], [0.33637, 0.59889,\n 0.76047], [0.33253, 0.59151, 0.75704], [0.32893, 0.58412, 0.75351], [\n 0.32559, 0.57671, 0.74987], [0.32256, 0.56928, 0.74613], [0.31978, \n 0.56186, 0.74228], [0.31727, 0.55441, 0.7383], [0.31505, 0.54695, \n 0.73422], [0.31311, 0.53948, 0.73002], [0.31144, 0.53201, 0.72569], [\n 0.31007, 0.52453, 0.72124], [0.30897, 0.51704, 0.71667], [0.30811, \n 0.50955, 0.71197], [0.30755, 0.50205, 0.70713], [0.30726, 0.49456, \n 0.70216], [0.30723, 0.48707, 0.69706], [0.30746, 0.47958, 0.69182], [\n 0.30795, 0.4721, 0.68643], [0.3087, 0.46463, 0.6809], [0.30968, 0.45716,\n 0.67525], [0.31088, 0.44973, 0.66944], [0.31228, 0.44232, 0.6635], [\n 0.31393, 0.43493, 0.65741], [0.31578, 0.42758, 0.65118], [0.3178, \n 0.42025, 0.64482], [0.32001, 0.41299, 0.63833], [0.32238, 0.40577, \n 0.6317], [0.32489, 0.39861, 0.62495], [0.32755, 0.39152, 0.61809], [\n 0.33035, 0.38448, 0.61111], [0.33327, 0.37755, 0.60402], [0.33627, \n 0.37068, 0.59684], [0.33939, 0.36392, 0.58955], [0.34257, 0.35728, \n 0.58219], [0.3458, 0.35073, 0.57476], [0.34912, 0.34428, 0.56727], [\n 0.35247, 0.33797, 0.55971], [0.35587, 0.33179, 0.55212], [0.35927, \n 0.32574, 0.54448], [0.36271, 0.31986, 0.53684], [0.36617, 0.31411, \n 0.52917], [0.36961, 0.30852, 0.52148], [0.37306, 0.30306, 0.51382], [\n 0.37652, 0.2978, 0.50615], [0.37994, 0.29269, 0.49854], [0.38336, \n 0.28775, 0.49094], [0.38674, 0.28301, 0.48337], [0.39011, 0.27842, \n 0.47586], [0.39346, 0.27401, 0.4684], [0.39677, 0.26978, 0.461], [\n 0.40006, 0.26573, 0.45366], [0.40333, 0.26185, 0.4464], [0.40655, \n 0.25815, 0.43921], [0.40974, 0.25466, 0.43212], [0.4129, 0.25132, \n 0.42509], [0.41602, 0.24817, 0.41813], [0.41912, 0.24515, 0.41128], [\n 0.42218, 0.24235, 0.40451], [0.42522, 0.23972, 0.39784], [0.42823, \n 0.23728, 0.39126], [0.43121, 0.23498, 0.38475], [0.43415, 0.23282, \n 0.37836], [0.43708, 0.23086, 0.37204], [0.43998, 0.22907, 0.36583], [\n 0.44286, 0.22743, 0.3597], [0.44571, 0.22596, 0.35366], [0.44855, \n 0.2246, 0.34773]]\nromaO_map = LinearSegmentedColormap.from_list('romaO', cm_data)\ntest_cm = romaO_map\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n import numpy as np\n try:\n from viscm import viscm\n viscm(romaO_map)\n except ImportError:\n print('viscm not found, falling back on simple display')\n plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=\n romaO_map)\n plt.show()\n",
"step-5": "# \n# romaO\n# www.fabiocrameri.ch/colourmaps\nfrom matplotlib.colors import LinearSegmentedColormap \n \ncm_data = [[0.45137, 0.22346, 0.34187], \n [0.45418, 0.22244, 0.3361], \n [0.45696, 0.22158, 0.33043], \n [0.45975, 0.2209, 0.32483], \n [0.46251, 0.22035, 0.31935], \n [0.46527, 0.21994, 0.31394], \n [0.46803, 0.21968, 0.30862], \n [0.47078, 0.21958, 0.30337], \n [0.47352, 0.21962, 0.29822], \n [0.47628, 0.21982, 0.29316], \n [0.47902, 0.22017, 0.28818], \n [0.48178, 0.22067, 0.2833], \n [0.48453, 0.2213, 0.2785], \n [0.48731, 0.22208, 0.27379], \n [0.49008, 0.22304, 0.26917], \n [0.49286, 0.22411, 0.26461], \n [0.49567, 0.22536, 0.26016], \n [0.4985, 0.22677, 0.25579], \n [0.50134, 0.22833, 0.25153], \n [0.50419, 0.22999, 0.24733], \n [0.50707, 0.23188, 0.24322], \n [0.50997, 0.23387, 0.23923], \n [0.5129, 0.23605, 0.23533], \n [0.51584, 0.23835, 0.23151], \n [0.51884, 0.24082, 0.22779], \n [0.52184, 0.24345, 0.22414], \n [0.52489, 0.24625, 0.22065], \n [0.52797, 0.2492, 0.2172], \n [0.53108, 0.25231, 0.21387], \n [0.53423, 0.25556, 0.21064], \n [0.53742, 0.25899, 0.20753], \n [0.54063, 0.26255, 0.20452], \n [0.54389, 0.26628, 0.20158], \n [0.54718, 0.27017, 0.19879], \n [0.55051, 0.27419, 0.19613], \n [0.55389, 0.27839, 0.19356], \n [0.55731, 0.28273, 0.19109], \n [0.56075, 0.2872, 0.18877], \n [0.56424, 0.29186, 0.18655], \n [0.56777, 0.29665, 0.18446], \n [0.57134, 0.30157, 0.18248], \n [0.57495, 0.30666, 0.18065], \n [0.5786, 0.31186, 0.17898], \n [0.58228, 0.31724, 0.17743], \n [0.58602, 0.32275, 0.17597], \n [0.58977, 0.32838, 0.17473], \n [0.59358, 0.33415, 0.17358], \n [0.59742, 0.34005, 0.17261], \n [0.60129, 0.34606, 0.17179], \n [0.60519, 0.35223, 0.17114], \n [0.60915, 0.35851, 0.17065], \n [0.61311, 0.36491, 0.17034], \n [0.61713, 0.37143, 0.1702], \n [0.62118, 0.37808, 0.17023], \n [0.62526, 0.38483, 0.17046], \n [0.62937, 0.39171, 0.17087], \n [0.63352, 0.39869, 0.17148], \n [0.63769, 0.40579, 0.17229], \n [0.6419, 0.41299, 0.17332], \n [0.64613, 0.42029, 0.17458], \n [0.65041, 0.42771, 0.176], \n [0.6547, 0.43522, 0.17774], \n [0.65904, 0.44283, 0.17962], \n [0.66341, 0.45054, 0.18175], \n [0.6678, 0.45834, 0.18416], \n [0.67222, 0.46625, 0.1868], \n [0.67667, 0.47425, 0.18968], \n [0.68114, 0.48233, 0.19283], \n [0.68566, 0.49051, 0.19624], \n [0.69019, 0.49878, 0.19987], \n [0.69474, 0.50712, 0.20384], \n [0.69933, 0.51554, 0.20803], \n [0.70394, 0.52406, 0.21251], \n [0.70858, 0.53265, 0.21726], \n [0.71322, 0.5413, 0.22229], \n [0.7179, 0.55003, 0.22761], \n [0.72257, 0.55881, 0.23318], \n [0.72727, 0.56767, 0.23907], \n [0.73197, 0.57658, 0.24521], \n [0.73666, 0.58553, 0.25168], \n [0.74136, 0.59451, 0.25837], \n [0.74605, 0.60354, 0.26537], \n [0.75073, 0.61259, 0.27263], \n [0.75538, 0.62166, 0.28017], \n [0.76001, 0.63075, 0.28796], \n [0.7646, 0.63982, 0.29602], \n [0.76914, 0.64889, 0.30433], \n [0.77363, 0.65793, 0.31287], \n [0.77806, 0.66694, 0.32165], \n [0.78242, 0.6759, 0.33066], \n [0.78669, 0.68481, 0.33988], \n [0.79087, 0.69365, 0.34929], \n [0.79494, 0.7024, 0.35888], \n [0.7989, 0.71106, 0.36867], \n [0.80273, 0.71961, 0.37859], \n [0.80642, 0.72803, 0.38866], \n [0.80996, 0.73631, 0.39885], \n [0.81334, 0.74446, 0.40916], \n [0.81655, 0.75244, 0.41957], \n [0.81956, 0.76025, 0.43004], \n [0.82239, 0.76787, 0.44057], \n [0.82501, 0.7753, 0.45115], \n [0.82742, 0.78252, 0.46174], \n [0.8296, 0.78953, 0.47235], \n [0.83155, 0.79631, 0.48293], \n [0.83326, 0.80287, 0.49349], \n [0.83472, 0.80919, 0.50402], \n [0.83592, 0.81526, 0.51449], \n [0.83686, 0.82109, 0.52487], \n [0.83753, 0.82666, 0.53517], \n [0.83793, 0.83198, 0.54537], \n [0.83805, 0.83703, 0.55546], \n [0.83788, 0.84182, 0.56542], \n [0.83744, 0.84635, 0.57525], \n [0.8367, 0.85061, 0.58493], \n [0.83567, 0.85462, 0.59446], \n [0.83435, 0.85835, 0.60382], \n [0.83274, 0.86183, 0.61301], \n [0.83084, 0.86504, 0.62202], \n [0.82864, 0.868, 0.63085], \n [0.82615, 0.87068, 0.63949], \n [0.82337, 0.87312, 0.64792], \n [0.8203, 0.87531, 0.65617], \n [0.81695, 0.87724, 0.6642], \n [0.81331, 0.87892, 0.67203], \n [0.80939, 0.88036, 0.67964], \n [0.80518, 0.88156, 0.68705], \n [0.80071, 0.8825, 0.69424], \n [0.79595, 0.88322, 0.70121], \n [0.79094, 0.8837, 0.70797], \n [0.78566, 0.88395, 0.7145], \n [0.78012, 0.88396, 0.72082], \n [0.77433, 0.88375, 0.72692], \n [0.7683, 0.88331, 0.73279], \n [0.76203, 0.88264, 0.73844], \n [0.75553, 0.88177, 0.74387], \n [0.74879, 0.88066, 0.74908], \n [0.74184, 0.87934, 0.75407], \n [0.73468, 0.87781, 0.75884], \n [0.72731, 0.87607, 0.76339], \n [0.71976, 0.87411, 0.76772], \n [0.71201, 0.87195, 0.77184], \n [0.70408, 0.86958, 0.77573], \n [0.69599, 0.86701, 0.77941], \n [0.68774, 0.86425, 0.78288], \n [0.67934, 0.86127, 0.78614], \n [0.67081, 0.85811, 0.78919], \n [0.66215, 0.85476, 0.79202], \n [0.65336, 0.8512, 0.79465], \n [0.64448, 0.84747, 0.79707], \n [0.6355, 0.84356, 0.7993], \n [0.62645, 0.83947, 0.80131], \n [0.61732, 0.83519, 0.80313], \n [0.60814, 0.83075, 0.80476], \n [0.59891, 0.82614, 0.80619], \n [0.58965, 0.82137, 0.80743], \n [0.58037, 0.81644, 0.80848], \n [0.57108, 0.81135, 0.80935], \n [0.56181, 0.80612, 0.81004], \n [0.55255, 0.80074, 0.81055], \n [0.54332, 0.79522, 0.81088], \n [0.53412, 0.78958, 0.81105], \n [0.525, 0.7838, 0.81105], \n [0.51593, 0.77791, 0.81088], \n [0.50695, 0.77189, 0.81055], \n [0.49808, 0.76577, 0.81007], \n [0.48928, 0.75954, 0.80944], \n [0.48061, 0.75321, 0.80866], \n [0.47207, 0.7468, 0.80773], \n [0.46365, 0.74029, 0.80667], \n [0.45539, 0.7337, 0.80546], \n [0.44728, 0.72703, 0.80413], \n [0.43934, 0.7203, 0.80266], \n [0.43158, 0.7135, 0.80107], \n [0.42398, 0.70664, 0.79936], \n [0.41658, 0.69971, 0.79752], \n [0.40938, 0.69275, 0.79557], \n [0.40237, 0.68572, 0.79351], \n [0.3956, 0.67865, 0.79133], \n [0.38903, 0.67155, 0.78905], \n [0.38267, 0.66441, 0.78666], \n [0.37656, 0.65724, 0.78416], \n [0.37066, 0.65003, 0.78155], \n [0.36502, 0.64279, 0.77884], \n [0.35961, 0.63552, 0.77604], \n [0.35446, 0.62824, 0.77312], \n [0.34955, 0.62094, 0.77011], \n [0.3449, 0.6136, 0.767], \n [0.34051, 0.60625, 0.76378], \n [0.33637, 0.59889, 0.76047], \n [0.33253, 0.59151, 0.75704], \n [0.32893, 0.58412, 0.75351], \n [0.32559, 0.57671, 0.74987], \n [0.32256, 0.56928, 0.74613], \n [0.31978, 0.56186, 0.74228], \n [0.31727, 0.55441, 0.7383], \n [0.31505, 0.54695, 0.73422], \n [0.31311, 0.53948, 0.73002], \n [0.31144, 0.53201, 0.72569], \n [0.31007, 0.52453, 0.72124], \n [0.30897, 0.51704, 0.71667], \n [0.30811, 0.50955, 0.71197], \n [0.30755, 0.50205, 0.70713], \n [0.30726, 0.49456, 0.70216], \n [0.30723, 0.48707, 0.69706], \n [0.30746, 0.47958, 0.69182], \n [0.30795, 0.4721, 0.68643], \n [0.3087, 0.46463, 0.6809], \n [0.30968, 0.45716, 0.67525], \n [0.31088, 0.44973, 0.66944], \n [0.31228, 0.44232, 0.6635], \n [0.31393, 0.43493, 0.65741], \n [0.31578, 0.42758, 0.65118], \n [0.3178, 0.42025, 0.64482], \n [0.32001, 0.41299, 0.63833], \n [0.32238, 0.40577, 0.6317], \n [0.32489, 0.39861, 0.62495], \n [0.32755, 0.39152, 0.61809], \n [0.33035, 0.38448, 0.61111], \n [0.33327, 0.37755, 0.60402], \n [0.33627, 0.37068, 0.59684], \n [0.33939, 0.36392, 0.58955], \n [0.34257, 0.35728, 0.58219], \n [0.3458, 0.35073, 0.57476], \n [0.34912, 0.34428, 0.56727], \n [0.35247, 0.33797, 0.55971], \n [0.35587, 0.33179, 0.55212], \n [0.35927, 0.32574, 0.54448], \n [0.36271, 0.31986, 0.53684], \n [0.36617, 0.31411, 0.52917], \n [0.36961, 0.30852, 0.52148], \n [0.37306, 0.30306, 0.51382], \n [0.37652, 0.2978, 0.50615], \n [0.37994, 0.29269, 0.49854], \n [0.38336, 0.28775, 0.49094], \n [0.38674, 0.28301, 0.48337], \n [0.39011, 0.27842, 0.47586], \n [0.39346, 0.27401, 0.4684], \n [0.39677, 0.26978, 0.461], \n [0.40006, 0.26573, 0.45366], \n [0.40333, 0.26185, 0.4464], \n [0.40655, 0.25815, 0.43921], \n [0.40974, 0.25466, 0.43212], \n [0.4129, 0.25132, 0.42509], \n [0.41602, 0.24817, 0.41813], \n [0.41912, 0.24515, 0.41128], \n [0.42218, 0.24235, 0.40451], \n [0.42522, 0.23972, 0.39784], \n [0.42823, 0.23728, 0.39126], \n [0.43121, 0.23498, 0.38475], \n [0.43415, 0.23282, 0.37836], \n [0.43708, 0.23086, 0.37204], \n [0.43998, 0.22907, 0.36583], \n [0.44286, 0.22743, 0.3597], \n [0.44571, 0.22596, 0.35366], \n [0.44855, 0.2246, 0.34773]] \n \nromaO_map = LinearSegmentedColormap.from_list('romaO', cm_data) \n# For use of \"viscm view\" \ntest_cm = romaO_map \n \nif __name__ == \"__main__\": \n import matplotlib.pyplot as plt \n import numpy as np \n \n try: \n from viscm import viscm \n viscm(romaO_map) \n except ImportError: \n print(\"viscm not found, falling back on simple display\") \n plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', \n cmap=romaO_map) \n plt.show() \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def insert_timestamp_from_filename_into_image(path_to_image: str,
ignorable_string: str, output_filename: str='', distance_to_border: int
=5, color_of_timestamp: tuple=(0, 0, 0), size_of_timestamp: int=20):
image = Image.open(path_to_image)
pos_of_timestamp = (distance_to_border, image.height -
size_of_timestamp - distance_to_border)
filename_with_extension = path_to_image.split('/')[-1]
filename = filename_with_extension
for i in range(len(filename) - 1, 0, -1):
if filename[i] == '.':
filename = filename[:i]
timestamp = filename.replace(ignorable_string, '')
drawable_image = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,
font=font)
if output_filename == '':
image.save(filename_with_extension)
else:
image.save(output_filename)
<|reserved_special_token_0|>
def insert_timestamp_from_imagedata_into_image(path_to_image: str,
output_filename: str='', distance_to_border: int=5, color_of_timestamp:
tuple=(0, 0, 0), size_of_timestamp: int=20):
image = Image.open(path_to_image)
pos_of_timestamp = (distance_to_border, image.height -
size_of_timestamp - distance_to_border)
filename_with_extension = path_to_image.split('/')[-1]
exifdata = image.getexif()
tag_id = 0
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
if tag == 'DateTime':
break
timestamp = str(exifdata.get(tag_id))
drawable_image = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,
font=font)
if output_filename == '':
image.save(filename_with_extension)
else:
image.save(output_filename)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def insert_timestamp_from_filename_into_image(path_to_image: str,
ignorable_string: str, output_filename: str='', distance_to_border: int
=5, color_of_timestamp: tuple=(0, 0, 0), size_of_timestamp: int=20):
image = Image.open(path_to_image)
pos_of_timestamp = (distance_to_border, image.height -
size_of_timestamp - distance_to_border)
filename_with_extension = path_to_image.split('/')[-1]
filename = filename_with_extension
for i in range(len(filename) - 1, 0, -1):
if filename[i] == '.':
filename = filename[:i]
timestamp = filename.replace(ignorable_string, '')
drawable_image = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,
font=font)
if output_filename == '':
image.save(filename_with_extension)
else:
image.save(output_filename)
def insert_timestamp_into_image(path_to_image: str, output_filename: str='',
distance_to_border: int=5, color_of_timestamp: tuple=(0, 0, 0),
size_of_timestamp: int=20):
image = Image.open(path_to_image)
pos_of_timestamp = (distance_to_border, image.height -
size_of_timestamp - distance_to_border)
filename_with_extension = path_to_image.split('/')[-1]
timestamp = str(datetime.now())
drawable_image = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,
font=font)
if output_filename == '':
image.save(filename_with_extension)
else:
image.save(output_filename)
def insert_timestamp_from_imagedata_into_image(path_to_image: str,
output_filename: str='', distance_to_border: int=5, color_of_timestamp:
tuple=(0, 0, 0), size_of_timestamp: int=20):
image = Image.open(path_to_image)
pos_of_timestamp = (distance_to_border, image.height -
size_of_timestamp - distance_to_border)
filename_with_extension = path_to_image.split('/')[-1]
exifdata = image.getexif()
tag_id = 0
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
if tag == 'DateTime':
break
timestamp = str(exifdata.get(tag_id))
drawable_image = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,
font=font)
if output_filename == '':
image.save(filename_with_extension)
else:
image.save(output_filename)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def insert_timestamp_from_filename_into_image(path_to_image: str,
ignorable_string: str, output_filename: str='', distance_to_border: int
=5, color_of_timestamp: tuple=(0, 0, 0), size_of_timestamp: int=20):
image = Image.open(path_to_image)
pos_of_timestamp = (distance_to_border, image.height -
size_of_timestamp - distance_to_border)
filename_with_extension = path_to_image.split('/')[-1]
filename = filename_with_extension
for i in range(len(filename) - 1, 0, -1):
if filename[i] == '.':
filename = filename[:i]
timestamp = filename.replace(ignorable_string, '')
drawable_image = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,
font=font)
if output_filename == '':
image.save(filename_with_extension)
else:
image.save(output_filename)
def insert_timestamp_into_image(path_to_image: str, output_filename: str='',
distance_to_border: int=5, color_of_timestamp: tuple=(0, 0, 0),
size_of_timestamp: int=20):
image = Image.open(path_to_image)
pos_of_timestamp = (distance_to_border, image.height -
size_of_timestamp - distance_to_border)
filename_with_extension = path_to_image.split('/')[-1]
timestamp = str(datetime.now())
drawable_image = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,
font=font)
if output_filename == '':
image.save(filename_with_extension)
else:
image.save(output_filename)
def insert_timestamp_from_imagedata_into_image(path_to_image: str,
output_filename: str='', distance_to_border: int=5, color_of_timestamp:
tuple=(0, 0, 0), size_of_timestamp: int=20):
image = Image.open(path_to_image)
pos_of_timestamp = (distance_to_border, image.height -
size_of_timestamp - distance_to_border)
filename_with_extension = path_to_image.split('/')[-1]
exifdata = image.getexif()
tag_id = 0
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
if tag == 'DateTime':
break
timestamp = str(exifdata.get(tag_id))
drawable_image = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,
font=font)
if output_filename == '':
image.save(filename_with_extension)
else:
image.save(output_filename)
if __name__ == '__main__':
insert_timestamp_from_filename_into_image('Image_2021-09-09_09-00-00.JPG',
'Image_', 'NewImage.JPG', distance_to_border=5, color_of_timestamp=
(255, 0, 0), size_of_timestamp=50)
insert_timestamp_into_image('Image_2021-09-09_09-00-00.JPG',
'NewImage2.JPG', distance_to_border=5, color_of_timestamp=(255, 0,
0), size_of_timestamp=50)
insert_timestamp_from_imagedata_into_image('Image_2021-09-09_09-00-00.JPG',
'NewImage3.JPG', distance_to_border=5, color_of_timestamp=(255, 0,
0), size_of_timestamp=50)
<|reserved_special_token_1|>
from PIL import Image, ImageDraw, ImageFont
from PIL.ExifTags import TAGS
from datetime import datetime
def insert_timestamp_from_filename_into_image(path_to_image: str,
ignorable_string: str, output_filename: str='', distance_to_border: int
=5, color_of_timestamp: tuple=(0, 0, 0), size_of_timestamp: int=20):
image = Image.open(path_to_image)
pos_of_timestamp = (distance_to_border, image.height -
size_of_timestamp - distance_to_border)
filename_with_extension = path_to_image.split('/')[-1]
filename = filename_with_extension
for i in range(len(filename) - 1, 0, -1):
if filename[i] == '.':
filename = filename[:i]
timestamp = filename.replace(ignorable_string, '')
drawable_image = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,
font=font)
if output_filename == '':
image.save(filename_with_extension)
else:
image.save(output_filename)
def insert_timestamp_into_image(path_to_image: str, output_filename: str='',
distance_to_border: int=5, color_of_timestamp: tuple=(0, 0, 0),
size_of_timestamp: int=20):
image = Image.open(path_to_image)
pos_of_timestamp = (distance_to_border, image.height -
size_of_timestamp - distance_to_border)
filename_with_extension = path_to_image.split('/')[-1]
timestamp = str(datetime.now())
drawable_image = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,
font=font)
if output_filename == '':
image.save(filename_with_extension)
else:
image.save(output_filename)
def insert_timestamp_from_imagedata_into_image(path_to_image: str,
output_filename: str='', distance_to_border: int=5, color_of_timestamp:
tuple=(0, 0, 0), size_of_timestamp: int=20):
image = Image.open(path_to_image)
pos_of_timestamp = (distance_to_border, image.height -
size_of_timestamp - distance_to_border)
filename_with_extension = path_to_image.split('/')[-1]
exifdata = image.getexif()
tag_id = 0
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
if tag == 'DateTime':
break
timestamp = str(exifdata.get(tag_id))
drawable_image = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,
font=font)
if output_filename == '':
image.save(filename_with_extension)
else:
image.save(output_filename)
if __name__ == '__main__':
insert_timestamp_from_filename_into_image('Image_2021-09-09_09-00-00.JPG',
'Image_', 'NewImage.JPG', distance_to_border=5, color_of_timestamp=
(255, 0, 0), size_of_timestamp=50)
insert_timestamp_into_image('Image_2021-09-09_09-00-00.JPG',
'NewImage2.JPG', distance_to_border=5, color_of_timestamp=(255, 0,
0), size_of_timestamp=50)
insert_timestamp_from_imagedata_into_image('Image_2021-09-09_09-00-00.JPG',
'NewImage3.JPG', distance_to_border=5, color_of_timestamp=(255, 0,
0), size_of_timestamp=50)
<|reserved_special_token_1|>
from PIL import Image, ImageDraw, ImageFont
from PIL.ExifTags import TAGS
from datetime import datetime
#Extracts the timestamp from the filename and inserts it into the image
def insert_timestamp_from_filename_into_image(path_to_image:str,
ignorable_string:str,
output_filename:str = "",
distance_to_border:int = 5,
color_of_timestamp:tuple = (0,0,0),
size_of_timestamp:int = 20):
image = Image.open(path_to_image)
#Place the timestamp in the bottom left hand corner with a certain distance to the border
pos_of_timestamp = (distance_to_border, image.height-size_of_timestamp-distance_to_border);
#Only get the filename with its extension of the filepath
filename_with_extension = path_to_image.split("/")[-1]
filename = filename_with_extension
#Filter out the file ending (.png, .jpeg ...)
for i in range(len(filename)-1, 0, -1):
if(filename[i]=="."):
filename = filename[:i]
#Filter out the ignorable part of the string to only get the timestamp
timestamp = filename.replace(ignorable_string, "")
#Get an object back that allows for drawing on an image
drawable_image = ImageDraw.Draw(image)
#Load the font file from the local directory and print the text on to the image
font = ImageFont.truetype('arial.ttf',size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp, font=font)
#Either overwrite the image or save it as a new image
if(output_filename==""):
image.save(filename_with_extension)
else:
image.save(output_filename)
#Gets the current timestamp and inserts it into the image
def insert_timestamp_into_image(path_to_image:str,
output_filename:str = "",
distance_to_border:int = 5,
color_of_timestamp:tuple = (0,0,0),
size_of_timestamp:int = 20):
image = Image.open(path_to_image)
#Place the timestamp in the bottom left hand corner with a certain distance to the border
pos_of_timestamp = (distance_to_border, image.height-size_of_timestamp-distance_to_border);
#Only get the filename with its extension of the filepath
filename_with_extension = path_to_image.split("/")[-1]
#Get the current timestamp
timestamp = str(datetime.now());
#Get an object back that allows for drawing on an image
drawable_image = ImageDraw.Draw(image)
#Load the font file from the local directory and print the text on to the image
font = ImageFont.truetype('arial.ttf',size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp, font=font)
#Either overwrite the image or save it as a new image
if(output_filename==""):
image.save(filename_with_extension)
else:
image.save(output_filename)
#Reads the attribute where the original time of creation is saved and inserts it into the image
def insert_timestamp_from_imagedata_into_image(path_to_image:str,
output_filename:str = "",
distance_to_border:int = 5,
color_of_timestamp:tuple = (0,0,0),
size_of_timestamp:int = 20):
image = Image.open(path_to_image)
#Place the timestamp in the bottom left hand corner with a certain distance to the border
pos_of_timestamp = (distance_to_border, image.height-size_of_timestamp-distance_to_border);
#Only get the filename with its extension of the filepath
filename_with_extension = path_to_image.split("/")[-1]
#Figure out the tag_id of the attribute DateTime
exifdata = image.getexif();
tag_id = 0
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
if(tag == "DateTime"):
break
#Read the attribute DateTime which is the date of creation
timestamp = str(exifdata.get(tag_id))
#Get an object back that allows for drawing on an image
drawable_image = ImageDraw.Draw(image)
#Load the font file from the local directory and print the text on to the image
font = ImageFont.truetype('arial.ttf',size_of_timestamp)
drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp, font=font)
#Either overwrite the image or save it as a new image
if(output_filename==""):
image.save(filename_with_extension)
else:
image.save(output_filename)
if __name__=="__main__":
#Example function calls
#insert_timestamp_from_filename_into_image("Image_2021-09-09_09-00-00.png", "Image_")
insert_timestamp_from_filename_into_image("Image_2021-09-09_09-00-00.JPG", "Image_", "NewImage.JPG", distance_to_border=5, color_of_timestamp=(255,0,0), size_of_timestamp=50)
#insert_timestamp_into_image("Image_2021-01-01_20-00-00.png")
insert_timestamp_into_image("Image_2021-09-09_09-00-00.JPG", "NewImage2.JPG", distance_to_border=5, color_of_timestamp=(255,0,0), size_of_timestamp=50)
#insert_timestamp_from_imagedata_into_image("Image_2021-09-09_09-00-00.png")
insert_timestamp_from_imagedata_into_image("Image_2021-09-09_09-00-00.JPG", "NewImage3.JPG", distance_to_border=5, color_of_timestamp=(255,0,0), size_of_timestamp=50)
|
flexible
|
{
"blob_id": "e6ab18d87ace00436a480f4f01da224eead84fc0",
"index": 5145,
"step-1": "<mask token>\n\n\ndef insert_timestamp_from_filename_into_image(path_to_image: str,\n ignorable_string: str, output_filename: str='', distance_to_border: int\n =5, color_of_timestamp: tuple=(0, 0, 0), size_of_timestamp: int=20):\n image = Image.open(path_to_image)\n pos_of_timestamp = (distance_to_border, image.height -\n size_of_timestamp - distance_to_border)\n filename_with_extension = path_to_image.split('/')[-1]\n filename = filename_with_extension\n for i in range(len(filename) - 1, 0, -1):\n if filename[i] == '.':\n filename = filename[:i]\n timestamp = filename.replace(ignorable_string, '')\n drawable_image = ImageDraw.Draw(image)\n font = ImageFont.truetype('arial.ttf', size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,\n font=font)\n if output_filename == '':\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n\n<mask token>\n\n\ndef insert_timestamp_from_imagedata_into_image(path_to_image: str,\n output_filename: str='', distance_to_border: int=5, color_of_timestamp:\n tuple=(0, 0, 0), size_of_timestamp: int=20):\n image = Image.open(path_to_image)\n pos_of_timestamp = (distance_to_border, image.height -\n size_of_timestamp - distance_to_border)\n filename_with_extension = path_to_image.split('/')[-1]\n exifdata = image.getexif()\n tag_id = 0\n for tag_id in exifdata:\n tag = TAGS.get(tag_id, tag_id)\n if tag == 'DateTime':\n break\n timestamp = str(exifdata.get(tag_id))\n drawable_image = ImageDraw.Draw(image)\n font = ImageFont.truetype('arial.ttf', size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,\n font=font)\n if output_filename == '':\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef insert_timestamp_from_filename_into_image(path_to_image: str,\n ignorable_string: str, output_filename: str='', distance_to_border: int\n =5, color_of_timestamp: tuple=(0, 0, 0), size_of_timestamp: int=20):\n image = Image.open(path_to_image)\n pos_of_timestamp = (distance_to_border, image.height -\n size_of_timestamp - distance_to_border)\n filename_with_extension = path_to_image.split('/')[-1]\n filename = filename_with_extension\n for i in range(len(filename) - 1, 0, -1):\n if filename[i] == '.':\n filename = filename[:i]\n timestamp = filename.replace(ignorable_string, '')\n drawable_image = ImageDraw.Draw(image)\n font = ImageFont.truetype('arial.ttf', size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,\n font=font)\n if output_filename == '':\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n\ndef insert_timestamp_into_image(path_to_image: str, output_filename: str='',\n distance_to_border: int=5, color_of_timestamp: tuple=(0, 0, 0),\n size_of_timestamp: int=20):\n image = Image.open(path_to_image)\n pos_of_timestamp = (distance_to_border, image.height -\n size_of_timestamp - distance_to_border)\n filename_with_extension = path_to_image.split('/')[-1]\n timestamp = str(datetime.now())\n drawable_image = ImageDraw.Draw(image)\n font = ImageFont.truetype('arial.ttf', size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,\n font=font)\n if output_filename == '':\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n\ndef insert_timestamp_from_imagedata_into_image(path_to_image: str,\n output_filename: str='', distance_to_border: int=5, color_of_timestamp:\n tuple=(0, 0, 0), size_of_timestamp: int=20):\n image = Image.open(path_to_image)\n pos_of_timestamp = (distance_to_border, image.height -\n size_of_timestamp - distance_to_border)\n filename_with_extension = path_to_image.split('/')[-1]\n exifdata = image.getexif()\n tag_id = 0\n for tag_id in exifdata:\n tag = TAGS.get(tag_id, tag_id)\n if tag == 'DateTime':\n break\n timestamp = str(exifdata.get(tag_id))\n drawable_image = ImageDraw.Draw(image)\n font = ImageFont.truetype('arial.ttf', size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,\n font=font)\n if output_filename == '':\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef insert_timestamp_from_filename_into_image(path_to_image: str,\n ignorable_string: str, output_filename: str='', distance_to_border: int\n =5, color_of_timestamp: tuple=(0, 0, 0), size_of_timestamp: int=20):\n image = Image.open(path_to_image)\n pos_of_timestamp = (distance_to_border, image.height -\n size_of_timestamp - distance_to_border)\n filename_with_extension = path_to_image.split('/')[-1]\n filename = filename_with_extension\n for i in range(len(filename) - 1, 0, -1):\n if filename[i] == '.':\n filename = filename[:i]\n timestamp = filename.replace(ignorable_string, '')\n drawable_image = ImageDraw.Draw(image)\n font = ImageFont.truetype('arial.ttf', size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,\n font=font)\n if output_filename == '':\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n\ndef insert_timestamp_into_image(path_to_image: str, output_filename: str='',\n distance_to_border: int=5, color_of_timestamp: tuple=(0, 0, 0),\n size_of_timestamp: int=20):\n image = Image.open(path_to_image)\n pos_of_timestamp = (distance_to_border, image.height -\n size_of_timestamp - distance_to_border)\n filename_with_extension = path_to_image.split('/')[-1]\n timestamp = str(datetime.now())\n drawable_image = ImageDraw.Draw(image)\n font = ImageFont.truetype('arial.ttf', size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,\n font=font)\n if output_filename == '':\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n\ndef insert_timestamp_from_imagedata_into_image(path_to_image: str,\n output_filename: str='', distance_to_border: int=5, color_of_timestamp:\n tuple=(0, 0, 0), size_of_timestamp: int=20):\n image = Image.open(path_to_image)\n pos_of_timestamp = (distance_to_border, image.height -\n size_of_timestamp - distance_to_border)\n filename_with_extension = path_to_image.split('/')[-1]\n exifdata = image.getexif()\n tag_id = 0\n for tag_id in exifdata:\n tag = TAGS.get(tag_id, tag_id)\n if tag == 'DateTime':\n break\n timestamp = str(exifdata.get(tag_id))\n drawable_image = ImageDraw.Draw(image)\n font = ImageFont.truetype('arial.ttf', size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,\n font=font)\n if output_filename == '':\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n\nif __name__ == '__main__':\n insert_timestamp_from_filename_into_image('Image_2021-09-09_09-00-00.JPG',\n 'Image_', 'NewImage.JPG', distance_to_border=5, color_of_timestamp=\n (255, 0, 0), size_of_timestamp=50)\n insert_timestamp_into_image('Image_2021-09-09_09-00-00.JPG',\n 'NewImage2.JPG', distance_to_border=5, color_of_timestamp=(255, 0, \n 0), size_of_timestamp=50)\n insert_timestamp_from_imagedata_into_image('Image_2021-09-09_09-00-00.JPG',\n 'NewImage3.JPG', distance_to_border=5, color_of_timestamp=(255, 0, \n 0), size_of_timestamp=50)\n",
"step-4": "from PIL import Image, ImageDraw, ImageFont\nfrom PIL.ExifTags import TAGS\nfrom datetime import datetime\n\n\ndef insert_timestamp_from_filename_into_image(path_to_image: str,\n ignorable_string: str, output_filename: str='', distance_to_border: int\n =5, color_of_timestamp: tuple=(0, 0, 0), size_of_timestamp: int=20):\n image = Image.open(path_to_image)\n pos_of_timestamp = (distance_to_border, image.height -\n size_of_timestamp - distance_to_border)\n filename_with_extension = path_to_image.split('/')[-1]\n filename = filename_with_extension\n for i in range(len(filename) - 1, 0, -1):\n if filename[i] == '.':\n filename = filename[:i]\n timestamp = filename.replace(ignorable_string, '')\n drawable_image = ImageDraw.Draw(image)\n font = ImageFont.truetype('arial.ttf', size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,\n font=font)\n if output_filename == '':\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n\ndef insert_timestamp_into_image(path_to_image: str, output_filename: str='',\n distance_to_border: int=5, color_of_timestamp: tuple=(0, 0, 0),\n size_of_timestamp: int=20):\n image = Image.open(path_to_image)\n pos_of_timestamp = (distance_to_border, image.height -\n size_of_timestamp - distance_to_border)\n filename_with_extension = path_to_image.split('/')[-1]\n timestamp = str(datetime.now())\n drawable_image = ImageDraw.Draw(image)\n font = ImageFont.truetype('arial.ttf', size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,\n font=font)\n if output_filename == '':\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n\ndef insert_timestamp_from_imagedata_into_image(path_to_image: str,\n output_filename: str='', distance_to_border: int=5, color_of_timestamp:\n tuple=(0, 0, 0), size_of_timestamp: int=20):\n image = Image.open(path_to_image)\n pos_of_timestamp = (distance_to_border, image.height -\n size_of_timestamp - distance_to_border)\n filename_with_extension = path_to_image.split('/')[-1]\n exifdata = image.getexif()\n tag_id = 0\n for tag_id in exifdata:\n tag = TAGS.get(tag_id, tag_id)\n if tag == 'DateTime':\n break\n timestamp = str(exifdata.get(tag_id))\n drawable_image = ImageDraw.Draw(image)\n font = ImageFont.truetype('arial.ttf', size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp,\n font=font)\n if output_filename == '':\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n\nif __name__ == '__main__':\n insert_timestamp_from_filename_into_image('Image_2021-09-09_09-00-00.JPG',\n 'Image_', 'NewImage.JPG', distance_to_border=5, color_of_timestamp=\n (255, 0, 0), size_of_timestamp=50)\n insert_timestamp_into_image('Image_2021-09-09_09-00-00.JPG',\n 'NewImage2.JPG', distance_to_border=5, color_of_timestamp=(255, 0, \n 0), size_of_timestamp=50)\n insert_timestamp_from_imagedata_into_image('Image_2021-09-09_09-00-00.JPG',\n 'NewImage3.JPG', distance_to_border=5, color_of_timestamp=(255, 0, \n 0), size_of_timestamp=50)\n",
"step-5": "from PIL import Image, ImageDraw, ImageFont\nfrom PIL.ExifTags import TAGS\nfrom datetime import datetime\n\n#Extracts the timestamp from the filename and inserts it into the image\ndef insert_timestamp_from_filename_into_image(path_to_image:str, \nignorable_string:str,\noutput_filename:str = \"\", \ndistance_to_border:int = 5, \ncolor_of_timestamp:tuple = (0,0,0), \nsize_of_timestamp:int = 20):\n \n image = Image.open(path_to_image)\n\n #Place the timestamp in the bottom left hand corner with a certain distance to the border\n pos_of_timestamp = (distance_to_border, image.height-size_of_timestamp-distance_to_border);\n\n #Only get the filename with its extension of the filepath\n filename_with_extension = path_to_image.split(\"/\")[-1]\n\n filename = filename_with_extension\n #Filter out the file ending (.png, .jpeg ...)\n for i in range(len(filename)-1, 0, -1):\n if(filename[i]==\".\"):\n filename = filename[:i]\n\n #Filter out the ignorable part of the string to only get the timestamp\n timestamp = filename.replace(ignorable_string, \"\")\n\n #Get an object back that allows for drawing on an image\n drawable_image = ImageDraw.Draw(image)\n\n #Load the font file from the local directory and print the text on to the image\n font = ImageFont.truetype('arial.ttf',size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp, font=font)\n\n #Either overwrite the image or save it as a new image\n if(output_filename==\"\"):\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n#Gets the current timestamp and inserts it into the image\ndef insert_timestamp_into_image(path_to_image:str, \noutput_filename:str = \"\", \ndistance_to_border:int = 5, \ncolor_of_timestamp:tuple = (0,0,0), \nsize_of_timestamp:int = 20):\n \n image = Image.open(path_to_image)\n\n #Place the timestamp in the bottom left hand corner with a certain distance to the border\n pos_of_timestamp = (distance_to_border, image.height-size_of_timestamp-distance_to_border);\n\n #Only get the filename with its extension of the filepath\n filename_with_extension = path_to_image.split(\"/\")[-1]\n\n #Get the current timestamp\n timestamp = str(datetime.now());\n\n #Get an object back that allows for drawing on an image\n drawable_image = ImageDraw.Draw(image)\n\n #Load the font file from the local directory and print the text on to the image\n font = ImageFont.truetype('arial.ttf',size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp, font=font)\n\n #Either overwrite the image or save it as a new image\n if(output_filename==\"\"):\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n#Reads the attribute where the original time of creation is saved and inserts it into the image\ndef insert_timestamp_from_imagedata_into_image(path_to_image:str, \noutput_filename:str = \"\", \ndistance_to_border:int = 5, \ncolor_of_timestamp:tuple = (0,0,0), \nsize_of_timestamp:int = 20):\n \n image = Image.open(path_to_image)\n\n #Place the timestamp in the bottom left hand corner with a certain distance to the border\n pos_of_timestamp = (distance_to_border, image.height-size_of_timestamp-distance_to_border);\n\n #Only get the filename with its extension of the filepath\n filename_with_extension = path_to_image.split(\"/\")[-1]\n\n #Figure out the tag_id of the attribute DateTime\n exifdata = image.getexif();\n tag_id = 0\n for tag_id in exifdata:\n tag = TAGS.get(tag_id, tag_id)\n if(tag == \"DateTime\"):\n break\n\n #Read the attribute DateTime which is the date of creation\n timestamp = str(exifdata.get(tag_id))\n\n #Get an object back that allows for drawing on an image\n drawable_image = ImageDraw.Draw(image)\n\n #Load the font file from the local directory and print the text on to the image\n font = ImageFont.truetype('arial.ttf',size_of_timestamp)\n drawable_image.text(pos_of_timestamp, timestamp, color_of_timestamp, font=font)\n\n #Either overwrite the image or save it as a new image\n if(output_filename==\"\"):\n image.save(filename_with_extension)\n else:\n image.save(output_filename)\n\n\nif __name__==\"__main__\":\n #Example function calls\n\n #insert_timestamp_from_filename_into_image(\"Image_2021-09-09_09-00-00.png\", \"Image_\")\n insert_timestamp_from_filename_into_image(\"Image_2021-09-09_09-00-00.JPG\", \"Image_\", \"NewImage.JPG\", distance_to_border=5, color_of_timestamp=(255,0,0), size_of_timestamp=50)\n #insert_timestamp_into_image(\"Image_2021-01-01_20-00-00.png\")\n insert_timestamp_into_image(\"Image_2021-09-09_09-00-00.JPG\", \"NewImage2.JPG\", distance_to_border=5, color_of_timestamp=(255,0,0), size_of_timestamp=50)\n #insert_timestamp_from_imagedata_into_image(\"Image_2021-09-09_09-00-00.png\")\n insert_timestamp_from_imagedata_into_image(\"Image_2021-09-09_09-00-00.JPG\", \"NewImage3.JPG\", distance_to_border=5, color_of_timestamp=(255,0,0), size_of_timestamp=50)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
total = totmil = cont = menor = 0
barato = ' '
print('-' * 40)
print('LOJA SUPER BARATÃO')
print('-' * 40)
while True:
produto = str(input('Nome do Produto: '))
preco = float(input('Preço: '))
cont += 1
total += preco
if preco > 1000:
totmil += 1
if cont == 1 or preco < menor:
barato = produto
menor = preco
resp = ' '
while resp not in 'SN':
resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]
if resp == 'N':
break
print('O total da compra foi R${:.2f}'.format(total))
print('Temos {} produtos custando mais de R$1000,00'.format(totmil))
print('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))
|
normal
|
{
"blob_id": "35b24ffa14f8b3c2040d5becc8a35721e86d8b3d",
"index": 345,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('-' * 40)\nprint('LOJA SUPER BARATÃO')\nprint('-' * 40)\nwhile True:\n produto = str(input('Nome do Produto: '))\n preco = float(input('Preço: '))\n cont += 1\n total += preco\n if preco > 1000:\n totmil += 1\n if cont == 1 or preco < menor:\n barato = produto\n menor = preco\n resp = ' '\n while resp not in 'SN':\n resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]\n if resp == 'N':\n break\nprint('O total da compra foi R${:.2f}'.format(total))\nprint('Temos {} produtos custando mais de R$1000,00'.format(totmil))\nprint('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))\n",
"step-3": "total = totmil = cont = menor = 0\nbarato = ' '\nprint('-' * 40)\nprint('LOJA SUPER BARATÃO')\nprint('-' * 40)\nwhile True:\n produto = str(input('Nome do Produto: '))\n preco = float(input('Preço: '))\n cont += 1\n total += preco\n if preco > 1000:\n totmil += 1\n if cont == 1 or preco < menor:\n barato = produto\n menor = preco\n resp = ' '\n while resp not in 'SN':\n resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]\n if resp == 'N':\n break\nprint('O total da compra foi R${:.2f}'.format(total))\nprint('Temos {} produtos custando mais de R$1000,00'.format(totmil))\nprint('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):
print('printing user input from functionGraph - ' + function)
print(dVal1, dVal2, dVal3, dVal4)
x1 = -5
x2 = 5
print('1st input:')
y = function
def f(x):
return eval(y)
"""print("Domain Val 1:")
x1 = float(input())
print("Domain Val 2:")
x2 = float(input())
print("Range Val 1:")
y1 = float(input())
print("Range Val 2:")
y2 = float(input())
"""
x1 = int(dVal1)
x2 = int(dVal2)
y1 = int(dVal3)
y2 = int(dVal4)
print('Processing...')
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
yParsed = parse_expr(y, evaluate=False)
n, d = yParsed.as_numer_denom()
undef = sympy.solve(d)
numzero = sympy.solve(n)
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
plt.xlim(x1, x2)
plt.ylim(y1, y2)
plt.autoscale(False)
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
for x in undef:
if x not in numzero:
try:
ax1.axvline(x=x, linestyle='--')
except:
pass
else:
x = x + 0.01
ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,
markeredgecolor='g', markerfacecolor='None')
count = 0
"""for zero in numzero:
if zero in undef:
ax1.plot(zero, f(zero), marker='s', color='green')
count = count + 1"""
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'
, bbox_inches='tight')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
count = 1
limit = len(yVal2) - 1
for z in yVal2:
if count == limit:
break
if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:
ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal1[count], linestyle='--')
count = count + 1
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'
, bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
count = 1
limit = len(yVal2) - 1
for z in yVal2:
if count == limit:
break
if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:
ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal1[count], linestyle='--')
count = count + 1
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'
, bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'
, bbox_inches='tight')
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange3 = np.arange(x1, x2, 0.01)
yRange3 = np.empty(xRange3.size)
"""for x in np.nditer(xRange3):
yRange3[count] = diff2(y, x)
count = count + 1"""
count = 1
limit = yRange2.size - 1
for x in np.nditer(xRange3):
if count == limit:
break
yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])
count = count + 1
np.delete(xRange3, -1)
np.delete(yRange3, -1)
xVal3 = xRange3.tolist()
yVal3 = yRange3.tolist()
print('XXXXXXXXXX')
for x in xVal3:
print(x)
print('YYYYYYYYYY')
for yVal in yVal3:
print(yVal)
ax1.plot(xVal3, yVal3, 'b')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'
, bbox_inches='tight')
plt.clf
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange3 = np.arange(x1, x2, 0.01)
yRange3 = np.empty(xRange3.size)
count = 1
limit = yRange2.size - 1
for x in np.nditer(xRange3):
if count == limit:
break
yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])
count = count + 1
np.delete(xRange3, -1)
np.delete(yRange3, -1)
xVal3 = xRange3.tolist()
yVal3 = yRange3.tolist()
ax1.plot(xVal3, yVal3, 'b', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
count = 1
limit = len(yVal2) - 1
for z in yVal3:
if count == limit:
break
if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:
points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',
color='c')
ax1.axvline(x=xVal2[count], linestyle='--')
count = count + 1
count = 1
limit = len(yVal2) - 1
for z in yVal3:
if count == limit:
break
if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:
points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',
color='c')
ax1.axvline(x=xVal2[count], linestyle='--')
count = count + 1
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'
, bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
n, d = yParsed.as_numer_denom()
undef = sympy.solve(d)
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
plt.xlim(x1, x2)
plt.ylim(y1, y2)
plt.autoscale(False)
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
n, d = yParsed.as_numer_denom()
s = Symbol('s', real=True)
undef = sympy.solve(d, s)
for xc in undef:
ax1.axvline(x=xc, linestyle='--')
"""
print("Integration x1:")
x1int = float(input())
print("Integration x2:")
x2int = float(input())
"""
x1int = int(ftcVal1)
x2int = int(ftcVal2)
print('Processing...')
sectionx = np.arange(x1int, x2int, 1e-05)
sectiony = np.empty(sectionx.size)
count = 0
for x in np.nditer(sectionx):
sectiony[count] = eval(y)
count = count + 1
plt.fill_between(sectionx, sectiony)
global area
area = 0
count = 0
limit = sectionx.size - 1
for x in np.nditer(sectionx):
if count == limit:
break
trapSum = trapz(sectiony[count], sectiony[count + 1])
area = area + trapSum
count = count + 1
print(area)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'
, bbox_inches='tight')
<|reserved_special_token_0|>
def testFunc(inp):
print('printing user input from testFunc - ' + inp)
pass
@app.route('/', methods=['GET', 'POST'])
@app.route('/graph', methods=['GET', 'POST'])
def graph():
if request.method == 'POST':
func = request.form['Function']
dVal1 = request.form['dVal1']
dVal2 = request.form['dVal2']
dVal3 = request.form['dVal3']
dVal4 = request.form['dVal4']
ftcVal1 = request.form['ftcVal1']
ftcVal2 = request.form['ftcVal2']
functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)
print('user input = ' + str(input))
return render_template('graph.html')
<|reserved_special_token_0|>
@app.route('/input', methods=['GET', 'POST'])
def input():
return render_template('input.html')
<|reserved_special_token_0|>
@app.route('/der2', methods=['GET', 'POST'])
def der2Graph():
return render_template('graph3.html')
@app.route('/relmax', methods=['GET', 'POST'])
def relmax():
return render_template('relmax.html')
@app.route('/relmin', methods=['GET', 'POST'])
def relmin():
return render_template('relmin.html')
<|reserved_special_token_0|>
@app.route('/ftc', methods=['GET', 'POST'])
def ftc():
global area
return render_template('ftc.html', result=str(area))
@app.route('/in1', methods=['GET', 'POST'])
def in1():
return render_template('in1.html')
<|reserved_special_token_0|>
@app.after_request
def add_header(response):
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):
print('printing user input from functionGraph - ' + function)
print(dVal1, dVal2, dVal3, dVal4)
x1 = -5
x2 = 5
print('1st input:')
y = function
def f(x):
return eval(y)
"""print("Domain Val 1:")
x1 = float(input())
print("Domain Val 2:")
x2 = float(input())
print("Range Val 1:")
y1 = float(input())
print("Range Val 2:")
y2 = float(input())
"""
x1 = int(dVal1)
x2 = int(dVal2)
y1 = int(dVal3)
y2 = int(dVal4)
print('Processing...')
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
yParsed = parse_expr(y, evaluate=False)
n, d = yParsed.as_numer_denom()
undef = sympy.solve(d)
numzero = sympy.solve(n)
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
plt.xlim(x1, x2)
plt.ylim(y1, y2)
plt.autoscale(False)
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
for x in undef:
if x not in numzero:
try:
ax1.axvline(x=x, linestyle='--')
except:
pass
else:
x = x + 0.01
ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,
markeredgecolor='g', markerfacecolor='None')
count = 0
"""for zero in numzero:
if zero in undef:
ax1.plot(zero, f(zero), marker='s', color='green')
count = count + 1"""
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'
, bbox_inches='tight')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
count = 1
limit = len(yVal2) - 1
for z in yVal2:
if count == limit:
break
if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:
ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal1[count], linestyle='--')
count = count + 1
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'
, bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
count = 1
limit = len(yVal2) - 1
for z in yVal2:
if count == limit:
break
if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:
ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal1[count], linestyle='--')
count = count + 1
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'
, bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'
, bbox_inches='tight')
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange3 = np.arange(x1, x2, 0.01)
yRange3 = np.empty(xRange3.size)
"""for x in np.nditer(xRange3):
yRange3[count] = diff2(y, x)
count = count + 1"""
count = 1
limit = yRange2.size - 1
for x in np.nditer(xRange3):
if count == limit:
break
yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])
count = count + 1
np.delete(xRange3, -1)
np.delete(yRange3, -1)
xVal3 = xRange3.tolist()
yVal3 = yRange3.tolist()
print('XXXXXXXXXX')
for x in xVal3:
print(x)
print('YYYYYYYYYY')
for yVal in yVal3:
print(yVal)
ax1.plot(xVal3, yVal3, 'b')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'
, bbox_inches='tight')
plt.clf
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange3 = np.arange(x1, x2, 0.01)
yRange3 = np.empty(xRange3.size)
count = 1
limit = yRange2.size - 1
for x in np.nditer(xRange3):
if count == limit:
break
yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])
count = count + 1
np.delete(xRange3, -1)
np.delete(yRange3, -1)
xVal3 = xRange3.tolist()
yVal3 = yRange3.tolist()
ax1.plot(xVal3, yVal3, 'b', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
count = 1
limit = len(yVal2) - 1
for z in yVal3:
if count == limit:
break
if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:
points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',
color='c')
ax1.axvline(x=xVal2[count], linestyle='--')
count = count + 1
count = 1
limit = len(yVal2) - 1
for z in yVal3:
if count == limit:
break
if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:
points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',
color='c')
ax1.axvline(x=xVal2[count], linestyle='--')
count = count + 1
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'
, bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
n, d = yParsed.as_numer_denom()
undef = sympy.solve(d)
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
plt.xlim(x1, x2)
plt.ylim(y1, y2)
plt.autoscale(False)
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
n, d = yParsed.as_numer_denom()
s = Symbol('s', real=True)
undef = sympy.solve(d, s)
for xc in undef:
ax1.axvline(x=xc, linestyle='--')
"""
print("Integration x1:")
x1int = float(input())
print("Integration x2:")
x2int = float(input())
"""
x1int = int(ftcVal1)
x2int = int(ftcVal2)
print('Processing...')
sectionx = np.arange(x1int, x2int, 1e-05)
sectiony = np.empty(sectionx.size)
count = 0
for x in np.nditer(sectionx):
sectiony[count] = eval(y)
count = count + 1
plt.fill_between(sectionx, sectiony)
global area
area = 0
count = 0
limit = sectionx.size - 1
for x in np.nditer(sectionx):
if count == limit:
break
trapSum = trapz(sectiony[count], sectiony[count + 1])
area = area + trapSum
count = count + 1
print(area)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'
, bbox_inches='tight')
<|reserved_special_token_0|>
def testFunc(inp):
print('printing user input from testFunc - ' + inp)
pass
@app.route('/', methods=['GET', 'POST'])
@app.route('/graph', methods=['GET', 'POST'])
def graph():
if request.method == 'POST':
func = request.form['Function']
dVal1 = request.form['dVal1']
dVal2 = request.form['dVal2']
dVal3 = request.form['dVal3']
dVal4 = request.form['dVal4']
ftcVal1 = request.form['ftcVal1']
ftcVal2 = request.form['ftcVal2']
functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)
print('user input = ' + str(input))
return render_template('graph.html')
<|reserved_special_token_0|>
@app.route('/input', methods=['GET', 'POST'])
def input():
return render_template('input.html')
<|reserved_special_token_0|>
@app.route('/der', methods=['GET', 'POST'])
def derGraph():
return render_template('graph2.html')
@app.route('/der2', methods=['GET', 'POST'])
def der2Graph():
return render_template('graph3.html')
@app.route('/relmax', methods=['GET', 'POST'])
def relmax():
return render_template('relmax.html')
@app.route('/relmin', methods=['GET', 'POST'])
def relmin():
return render_template('relmin.html')
@app.route('/poi', methods=['GET', 'POST'])
def poi():
return render_template('poi.html')
@app.route('/ftc', methods=['GET', 'POST'])
def ftc():
global area
return render_template('ftc.html', result=str(area))
@app.route('/in1', methods=['GET', 'POST'])
def in1():
return render_template('in1.html')
@app.route('/out1', methods=['GET', 'POST'])
def out1():
return render_template('out1.html')
@app.after_request
def add_header(response):
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):
print('printing user input from functionGraph - ' + function)
print(dVal1, dVal2, dVal3, dVal4)
x1 = -5
x2 = 5
print('1st input:')
y = function
def f(x):
return eval(y)
"""print("Domain Val 1:")
x1 = float(input())
print("Domain Val 2:")
x2 = float(input())
print("Range Val 1:")
y1 = float(input())
print("Range Val 2:")
y2 = float(input())
"""
x1 = int(dVal1)
x2 = int(dVal2)
y1 = int(dVal3)
y2 = int(dVal4)
print('Processing...')
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
yParsed = parse_expr(y, evaluate=False)
n, d = yParsed.as_numer_denom()
undef = sympy.solve(d)
numzero = sympy.solve(n)
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
plt.xlim(x1, x2)
plt.ylim(y1, y2)
plt.autoscale(False)
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
for x in undef:
if x not in numzero:
try:
ax1.axvline(x=x, linestyle='--')
except:
pass
else:
x = x + 0.01
ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,
markeredgecolor='g', markerfacecolor='None')
count = 0
"""for zero in numzero:
if zero in undef:
ax1.plot(zero, f(zero), marker='s', color='green')
count = count + 1"""
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'
, bbox_inches='tight')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
count = 1
limit = len(yVal2) - 1
for z in yVal2:
if count == limit:
break
if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:
ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal1[count], linestyle='--')
count = count + 1
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'
, bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
count = 1
limit = len(yVal2) - 1
for z in yVal2:
if count == limit:
break
if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:
ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal1[count], linestyle='--')
count = count + 1
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'
, bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'
, bbox_inches='tight')
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange3 = np.arange(x1, x2, 0.01)
yRange3 = np.empty(xRange3.size)
"""for x in np.nditer(xRange3):
yRange3[count] = diff2(y, x)
count = count + 1"""
count = 1
limit = yRange2.size - 1
for x in np.nditer(xRange3):
if count == limit:
break
yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])
count = count + 1
np.delete(xRange3, -1)
np.delete(yRange3, -1)
xVal3 = xRange3.tolist()
yVal3 = yRange3.tolist()
print('XXXXXXXXXX')
for x in xVal3:
print(x)
print('YYYYYYYYYY')
for yVal in yVal3:
print(yVal)
ax1.plot(xVal3, yVal3, 'b')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'
, bbox_inches='tight')
plt.clf
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange3 = np.arange(x1, x2, 0.01)
yRange3 = np.empty(xRange3.size)
count = 1
limit = yRange2.size - 1
for x in np.nditer(xRange3):
if count == limit:
break
yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])
count = count + 1
np.delete(xRange3, -1)
np.delete(yRange3, -1)
xVal3 = xRange3.tolist()
yVal3 = yRange3.tolist()
ax1.plot(xVal3, yVal3, 'b', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
count = 1
limit = len(yVal2) - 1
for z in yVal3:
if count == limit:
break
if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:
points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',
color='c')
ax1.axvline(x=xVal2[count], linestyle='--')
count = count + 1
count = 1
limit = len(yVal2) - 1
for z in yVal3:
if count == limit:
break
if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:
points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',
color='c')
ax1.axvline(x=xVal2[count], linestyle='--')
count = count + 1
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'
, bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
n, d = yParsed.as_numer_denom()
undef = sympy.solve(d)
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
plt.xlim(x1, x2)
plt.ylim(y1, y2)
plt.autoscale(False)
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
n, d = yParsed.as_numer_denom()
s = Symbol('s', real=True)
undef = sympy.solve(d, s)
for xc in undef:
ax1.axvline(x=xc, linestyle='--')
"""
print("Integration x1:")
x1int = float(input())
print("Integration x2:")
x2int = float(input())
"""
x1int = int(ftcVal1)
x2int = int(ftcVal2)
print('Processing...')
sectionx = np.arange(x1int, x2int, 1e-05)
sectiony = np.empty(sectionx.size)
count = 0
for x in np.nditer(sectionx):
sectiony[count] = eval(y)
count = count + 1
plt.fill_between(sectionx, sectiony)
global area
area = 0
count = 0
limit = sectionx.size - 1
for x in np.nditer(sectionx):
if count == limit:
break
trapSum = trapz(sectiony[count], sectiony[count + 1])
area = area + trapSum
count = count + 1
print(area)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'
, bbox_inches='tight')
global area
<|reserved_special_token_0|>
def testFunc(inp):
print('printing user input from testFunc - ' + inp)
pass
@app.route('/', methods=['GET', 'POST'])
@app.route('/graph', methods=['GET', 'POST'])
def graph():
if request.method == 'POST':
func = request.form['Function']
dVal1 = request.form['dVal1']
dVal2 = request.form['dVal2']
dVal3 = request.form['dVal3']
dVal4 = request.form['dVal4']
ftcVal1 = request.form['ftcVal1']
ftcVal2 = request.form['ftcVal2']
functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)
print('user input = ' + str(input))
return render_template('graph.html')
@app.route('/home', methods=['GET', 'POST'])
def home():
return render_template('home.html')
@app.route('/input', methods=['GET', 'POST'])
def input():
return render_template('input.html')
<|reserved_special_token_0|>
@app.route('/der', methods=['GET', 'POST'])
def derGraph():
return render_template('graph2.html')
@app.route('/der2', methods=['GET', 'POST'])
def der2Graph():
return render_template('graph3.html')
@app.route('/relmax', methods=['GET', 'POST'])
def relmax():
return render_template('relmax.html')
@app.route('/relmin', methods=['GET', 'POST'])
def relmin():
return render_template('relmin.html')
@app.route('/poi', methods=['GET', 'POST'])
def poi():
return render_template('poi.html')
@app.route('/ftc', methods=['GET', 'POST'])
def ftc():
global area
return render_template('ftc.html', result=str(area))
@app.route('/in1', methods=['GET', 'POST'])
def in1():
return render_template('in1.html')
@app.route('/out1', methods=['GET', 'POST'])
def out1():
return render_template('out1.html')
@app.after_request
def add_header(response):
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
def functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):
print('printing user input from functionGraph - ' + function)
print(dVal1, dVal2, dVal3, dVal4)
x1 = -5
x2 = 5
print('1st input:')
y = function
def f(x):
return eval(y)
"""print("Domain Val 1:")
x1 = float(input())
print("Domain Val 2:")
x2 = float(input())
print("Range Val 1:")
y1 = float(input())
print("Range Val 2:")
y2 = float(input())
"""
x1 = int(dVal1)
x2 = int(dVal2)
y1 = int(dVal3)
y2 = int(dVal4)
print('Processing...')
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
yParsed = parse_expr(y, evaluate=False)
n, d = yParsed.as_numer_denom()
undef = sympy.solve(d)
numzero = sympy.solve(n)
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
plt.xlim(x1, x2)
plt.ylim(y1, y2)
plt.autoscale(False)
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
for x in undef:
if x not in numzero:
try:
ax1.axvline(x=x, linestyle='--')
except:
pass
else:
x = x + 0.01
ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,
markeredgecolor='g', markerfacecolor='None')
count = 0
"""for zero in numzero:
if zero in undef:
ax1.plot(zero, f(zero), marker='s', color='green')
count = count + 1"""
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'
, bbox_inches='tight')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
count = 1
limit = len(yVal2) - 1
for z in yVal2:
if count == limit:
break
if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:
ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal1[count], linestyle='--')
count = count + 1
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'
, bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
count = 1
limit = len(yVal2) - 1
for z in yVal2:
if count == limit:
break
if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:
ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal1[count], linestyle='--')
count = count + 1
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'
, bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'
, bbox_inches='tight')
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange3 = np.arange(x1, x2, 0.01)
yRange3 = np.empty(xRange3.size)
"""for x in np.nditer(xRange3):
yRange3[count] = diff2(y, x)
count = count + 1"""
count = 1
limit = yRange2.size - 1
for x in np.nditer(xRange3):
if count == limit:
break
yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])
count = count + 1
np.delete(xRange3, -1)
np.delete(yRange3, -1)
xVal3 = xRange3.tolist()
yVal3 = yRange3.tolist()
print('XXXXXXXXXX')
for x in xVal3:
print(x)
print('YYYYYYYYYY')
for yVal in yVal3:
print(yVal)
ax1.plot(xVal3, yVal3, 'b')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'
, bbox_inches='tight')
plt.clf
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange3 = np.arange(x1, x2, 0.01)
yRange3 = np.empty(xRange3.size)
count = 1
limit = yRange2.size - 1
for x in np.nditer(xRange3):
if count == limit:
break
yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])
count = count + 1
np.delete(xRange3, -1)
np.delete(yRange3, -1)
xVal3 = xRange3.tolist()
yVal3 = yRange3.tolist()
ax1.plot(xVal3, yVal3, 'b', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
count = 1
limit = len(yVal2) - 1
for z in yVal3:
if count == limit:
break
if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:
points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',
color='c')
ax1.axvline(x=xVal2[count], linestyle='--')
count = count + 1
count = 1
limit = len(yVal2) - 1
for z in yVal3:
if count == limit:
break
if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:
points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',
color='c')
ax1.axvline(x=xVal2[count], linestyle='--')
count = count + 1
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'
, bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
n, d = yParsed.as_numer_denom()
undef = sympy.solve(d)
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'
)
plt.xlim(x1, x2)
plt.ylim(y1, y2)
plt.autoscale(False)
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
n, d = yParsed.as_numer_denom()
s = Symbol('s', real=True)
undef = sympy.solve(d, s)
for xc in undef:
ax1.axvline(x=xc, linestyle='--')
"""
print("Integration x1:")
x1int = float(input())
print("Integration x2:")
x2int = float(input())
"""
x1int = int(ftcVal1)
x2int = int(ftcVal2)
print('Processing...')
sectionx = np.arange(x1int, x2int, 1e-05)
sectiony = np.empty(sectionx.size)
count = 0
for x in np.nditer(sectionx):
sectiony[count] = eval(y)
count = count + 1
plt.fill_between(sectionx, sectiony)
global area
area = 0
count = 0
limit = sectionx.size - 1
for x in np.nditer(sectionx):
if count == limit:
break
trapSum = trapz(sectiony[count], sectiony[count + 1])
area = area + trapSum
count = count + 1
print(area)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig(
'/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'
, bbox_inches='tight')
global area
x1 = -5
x2 = 5
xRange1 = np.arange(x1, x2, 0.01)
def testFunc(inp):
print('printing user input from testFunc - ' + inp)
pass
@app.route('/', methods=['GET', 'POST'])
@app.route('/graph', methods=['GET', 'POST'])
def graph():
if request.method == 'POST':
func = request.form['Function']
dVal1 = request.form['dVal1']
dVal2 = request.form['dVal2']
dVal3 = request.form['dVal3']
dVal4 = request.form['dVal4']
ftcVal1 = request.form['ftcVal1']
ftcVal2 = request.form['ftcVal2']
functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)
print('user input = ' + str(input))
return render_template('graph.html')
@app.route('/home', methods=['GET', 'POST'])
def home():
return render_template('home.html')
@app.route('/input', methods=['GET', 'POST'])
def input():
return render_template('input.html')
<|reserved_special_token_0|>
@app.route('/der', methods=['GET', 'POST'])
def derGraph():
return render_template('graph2.html')
@app.route('/der2', methods=['GET', 'POST'])
def der2Graph():
return render_template('graph3.html')
@app.route('/relmax', methods=['GET', 'POST'])
def relmax():
return render_template('relmax.html')
@app.route('/relmin', methods=['GET', 'POST'])
def relmin():
return render_template('relmin.html')
@app.route('/poi', methods=['GET', 'POST'])
def poi():
return render_template('poi.html')
@app.route('/ftc', methods=['GET', 'POST'])
def ftc():
global area
return render_template('ftc.html', result=str(area))
@app.route('/in1', methods=['GET', 'POST'])
def in1():
return render_template('in1.html')
@app.route('/out1', methods=['GET', 'POST'])
def out1():
return render_template('out1.html')
@app.after_request
def add_header(response):
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=False)
<|reserved_special_token_1|>
from flask import Flask, render_template, request
import matplotlib.pyplot as plt
import numpy as np
import sympy
from DerivTest import diff, diff2, trapz
from sympy.parsing.sympy_parser import parse_expr
from sympy import Symbol
#from ParsingClass import Parser
#from scitools.StringFunction import StringFunction
#from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
def functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):
print("printing user input from functionGraph - " + function)
print(dVal1, dVal2, dVal3, dVal4)
#parser = Parser()
#x=np.array(range(10))
x1 = -5;
x2 = 5;
print("1st input:")
y=function
def f(x):
return eval(y)
'''print("Domain Val 1:")
x1 = float(input())
print("Domain Val 2:")
x2 = float(input())
print("Range Val 1:")
y1 = float(input())
print("Range Val 2:")
y2 = float(input())
'''
x1=int(dVal1)
x2=int(dVal2)
y1=int(dVal3)
y2=int(dVal4)
print("Processing...")
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
yParsed = parse_expr(y, evaluate=False)
n, d = yParsed.as_numer_denom()
#s = Symbol('s', real = True)
undef = sympy.solve(d)
numzero = sympy.solve(n)
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
plt.xlim(x1, x2)
plt.ylim(y1, y2)
plt.autoscale(False)
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count+1
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2,2,1)
ax1.plot(xVal1, yVal1, 'g')
for x in undef:
if x not in numzero:
try:
ax1.axvline(x=x, linestyle = '--')
except:
pass
else:
x=x+0.01
ax1.plot(x, eval(y), "o", markersize=7, markeredgewidth=1, markeredgecolor='g',markerfacecolor='None')
count = 0
'''for zero in numzero:
if zero in undef:
ax1.plot(zero, f(zero), marker='s', color='green')
count = count + 1'''
#ax1.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
#plt.axis([0,6,0,30])
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png', bbox_inches = 'tight')
#############################################
# Relative Extrema
#############################################
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
# ax2.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
count = 1
limit = len(yVal2) - 1
for z in yVal2:
if count == limit:
break
if (yVal2[count - 1]>0 and yVal2[count + 1]<0):
ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal1[count], linestyle='--')
count = count + 1
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png', bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1,'g')
# ax1.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
# ax2.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
count = 1
limit = len(yVal2) - 1
for z in yVal2:
if count == limit:
break
if (yVal2[count - 1] < 0 and yVal2[count + 1] > 0):
ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal1[count], linestyle='--')
count = count + 1
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png', bbox_inches='tight')
plt.clf()
#############################################
# First Derivative
#############################################
xRange1 = np.arange(x1,x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count+1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2,2,1)
ax1.plot(xVal1, yVal1, 'g')
#ax1.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y,x)
count = count+1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r')
#ax2.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png', bbox_inches = 'tight')
#############################################
# SECOND DERIVATIVE
#############################################
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
# ax1.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange3 = np.arange(x1, x2, 0.01)
yRange3 = np.empty(xRange3.size)
'''for x in np.nditer(xRange3):
yRange3[count] = diff2(y, x)
count = count + 1'''
count = 1
limit = yRange2.size-1
for x in np.nditer(xRange3):
if count == limit:
break
yRange3[count] = diff2(yRange2[count-1], yRange2[count+1])
count = count + 1
np.delete(xRange3, -1)
np.delete(yRange3, -1)
xVal3 = xRange3.tolist()
yVal3 = yRange3.tolist()
print("XXXXXXXXXX")
for x in xVal3:
print (x)
print("YYYYYYYYYY")
for yVal in yVal3:
print (yVal)
ax1.plot(xVal3, yVal3, 'b')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png', bbox_inches='tight')
plt.clf
#############################################
#POINTS OF INFLECTION
#############################################
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange3 = np.arange(x1, x2, 0.01)
yRange3 = np.empty(xRange3.size)
count = 1
limit = yRange2.size - 1
for x in np.nditer(xRange3):
if count == limit:
break
yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])
count = count + 1
np.delete(xRange3, -1)
np.delete(yRange3, -1)
xVal3 = xRange3.tolist()
yVal3 = yRange3.tolist()
ax1.plot(xVal3, yVal3, 'b', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
count = 1
limit = len(yVal2) - 1
for z in yVal3:
if count == limit:
break
if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:
points1 = ax1.plot(xVal2[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal2[count], linestyle='--')
count = count + 1
count = 1
limit = len(yVal2) - 1
for z in yVal3:
if count == limit:
break
if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:
points1 = ax1.plot(xVal2[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal2[count], linestyle='--')
count = count + 1
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png', bbox_inches='tight')
plt.clf()
#############################################
# FTC
#############################################
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
n, d = yParsed.as_numer_denom()
undef = sympy.solve(d)
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
plt.xlim(x1, x2)
plt.ylim(y1, y2)
plt.autoscale(False)
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
n, d = yParsed.as_numer_denom()
s = Symbol('s', real=True)
undef = sympy.solve(d, s)
for xc in undef:
ax1.axvline(x=xc, linestyle='--')
'''
print("Integration x1:")
x1int = float(input())
print("Integration x2:")
x2int = float(input())
'''
x1int = int(ftcVal1)
x2int = int(ftcVal2)
print("Processing...")
sectionx = np.arange(x1int, x2int, 0.00001)
sectiony = np.empty(sectionx.size)
count = 0
for x in np.nditer(sectionx):
sectiony[count] = eval(y)
count = count+1
plt.fill_between(sectionx, sectiony)
global area
area = 0
count = 0
limit = sectionx.size-1
for x in np.nditer(sectionx):
if(count == limit):
break
trapSum = trapz(sectiony[count], sectiony[count+1])
area = area + trapSum
count = count + 1
print(area)
# ax1.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png', bbox_inches='tight')
global area
x1 = -5;
x2 = 5;
xRange1 = np.arange(x1,x2, 0.01)
#print("1st input")
#y=input()
#yParsed = parse_expr(y, evaluate=False)
#functionGraph(y)
def testFunc(inp):
print("printing user input from testFunc - " +inp)
pass
##############################################
#works on CHROME ONLY, caching issue in Safari
##############################################
@app.route('/', methods=['GET', 'POST'])
@app.route('/graph', methods=['GET', 'POST'])
def graph():
if request.method == 'POST':
func = request.form['Function']
dVal1 = request.form['dVal1']
dVal2 = request.form['dVal2']
dVal3 = request.form['dVal3']
dVal4 = request.form['dVal4']
ftcVal1 = request.form['ftcVal1']
ftcVal2 = request.form['ftcVal2']
functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)
print("user input = " +str(input))
#testFunc(input)
return render_template("graph.html")
#return render_template("graph.html", result=input)
@app.route('/home', methods=['GET', 'POST'])
def home():
return render_template('home.html')
@app.route('/input', methods=['GET', 'POST'])
def input():
return render_template('input.html')
'''@app.route('/input', methods=['GET', 'POST'])
def input_post():
if request.method == 'POST':
result = request.form['Function']
print(result)
return render_template("graph.html", result=result)'''
@app.route('/der', methods=['GET', 'POST'])
def derGraph():
return render_template('graph2.html')
@app.route('/der2', methods=['GET', 'POST'])
def der2Graph():
return render_template('graph3.html')
@app.route('/relmax', methods=['GET', 'POST'])
def relmax():
return render_template('relmax.html')
@app.route('/relmin', methods=['GET', 'POST'])
def relmin():
return render_template('relmin.html')
@app.route('/poi', methods=['GET', 'POST'])
def poi():
return render_template('poi.html')
@app.route('/ftc', methods=['GET', 'POST'])
def ftc():
global area
return render_template('ftc.html', result = str(area))
@app.route('/in1', methods=['GET', 'POST'])
def in1():
return render_template('in1.html')
@app.route('/out1', methods=['GET', 'POST'])
def out1():
return render_template('out1.html')
@app.after_request
def add_header(response):
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=False)
|
flexible
|
{
"blob_id": "9dc8449bcc0c6c6ffb5ced5724ca632b6578bf1b",
"index": 9170,
"step-1": "<mask token>\n\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print('printing user input from functionGraph - ' + function)\n print(dVal1, dVal2, dVal3, dVal4)\n x1 = -5\n x2 = 5\n print('1st input:')\n y = function\n\n def f(x):\n return eval(y)\n \"\"\"print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n \"\"\"\n x1 = int(dVal1)\n x2 = int(dVal2)\n y1 = int(dVal3)\n y2 = int(dVal4)\n print('Processing...')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle='--')\n except:\n pass\n else:\n x = x + 0.01\n ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,\n markeredgecolor='g', markerfacecolor='None')\n count = 0\n \"\"\"for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1\"\"\"\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'\n , bbox_inches='tight')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'\n , bbox_inches='tight')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n \"\"\"for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1\"\"\"\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print('XXXXXXXXXX')\n for x in xVal3:\n print(x)\n print('YYYYYYYYYY')\n for yVal in yVal3:\n print(yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'\n , bbox_inches='tight')\n plt.clf\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n \"\"\"\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n \"\"\"\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print('Processing...')\n sectionx = np.arange(x1int, x2int, 1e-05)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count + 1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size - 1\n for x in np.nditer(sectionx):\n if count == limit:\n break\n trapSum = trapz(sectiony[count], sectiony[count + 1])\n area = area + trapSum\n count = count + 1\n print(area)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'\n , bbox_inches='tight')\n\n\n<mask token>\n\n\ndef testFunc(inp):\n print('printing user input from testFunc - ' + inp)\n pass\n\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n print('user input = ' + str(input))\n return render_template('graph.html')\n\n\n<mask token>\n\n\n@app.route('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n\n<mask token>\n\n\n@app.route('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\n\n@app.route('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\n\n@app.route('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\n\n<mask token>\n\n\n@app.route('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result=str(area))\n\n\n@app.route('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\n\n<mask token>\n\n\n@app.after_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print('printing user input from functionGraph - ' + function)\n print(dVal1, dVal2, dVal3, dVal4)\n x1 = -5\n x2 = 5\n print('1st input:')\n y = function\n\n def f(x):\n return eval(y)\n \"\"\"print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n \"\"\"\n x1 = int(dVal1)\n x2 = int(dVal2)\n y1 = int(dVal3)\n y2 = int(dVal4)\n print('Processing...')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle='--')\n except:\n pass\n else:\n x = x + 0.01\n ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,\n markeredgecolor='g', markerfacecolor='None')\n count = 0\n \"\"\"for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1\"\"\"\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'\n , bbox_inches='tight')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'\n , bbox_inches='tight')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n \"\"\"for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1\"\"\"\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print('XXXXXXXXXX')\n for x in xVal3:\n print(x)\n print('YYYYYYYYYY')\n for yVal in yVal3:\n print(yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'\n , bbox_inches='tight')\n plt.clf\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n \"\"\"\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n \"\"\"\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print('Processing...')\n sectionx = np.arange(x1int, x2int, 1e-05)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count + 1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size - 1\n for x in np.nditer(sectionx):\n if count == limit:\n break\n trapSum = trapz(sectiony[count], sectiony[count + 1])\n area = area + trapSum\n count = count + 1\n print(area)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'\n , bbox_inches='tight')\n\n\n<mask token>\n\n\ndef testFunc(inp):\n print('printing user input from testFunc - ' + inp)\n pass\n\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n print('user input = ' + str(input))\n return render_template('graph.html')\n\n\n<mask token>\n\n\n@app.route('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n\n<mask token>\n\n\n@app.route('/der', methods=['GET', 'POST'])\ndef derGraph():\n return render_template('graph2.html')\n\n\n@app.route('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\n\n@app.route('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\n\n@app.route('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\n\n@app.route('/poi', methods=['GET', 'POST'])\ndef poi():\n return render_template('poi.html')\n\n\n@app.route('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result=str(area))\n\n\n@app.route('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\n\n@app.route('/out1', methods=['GET', 'POST'])\ndef out1():\n return render_template('out1.html')\n\n\n@app.after_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print('printing user input from functionGraph - ' + function)\n print(dVal1, dVal2, dVal3, dVal4)\n x1 = -5\n x2 = 5\n print('1st input:')\n y = function\n\n def f(x):\n return eval(y)\n \"\"\"print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n \"\"\"\n x1 = int(dVal1)\n x2 = int(dVal2)\n y1 = int(dVal3)\n y2 = int(dVal4)\n print('Processing...')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle='--')\n except:\n pass\n else:\n x = x + 0.01\n ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,\n markeredgecolor='g', markerfacecolor='None')\n count = 0\n \"\"\"for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1\"\"\"\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'\n , bbox_inches='tight')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'\n , bbox_inches='tight')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n \"\"\"for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1\"\"\"\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print('XXXXXXXXXX')\n for x in xVal3:\n print(x)\n print('YYYYYYYYYY')\n for yVal in yVal3:\n print(yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'\n , bbox_inches='tight')\n plt.clf\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n \"\"\"\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n \"\"\"\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print('Processing...')\n sectionx = np.arange(x1int, x2int, 1e-05)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count + 1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size - 1\n for x in np.nditer(sectionx):\n if count == limit:\n break\n trapSum = trapz(sectiony[count], sectiony[count + 1])\n area = area + trapSum\n count = count + 1\n print(area)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'\n , bbox_inches='tight')\n\n\nglobal area\n<mask token>\n\n\ndef testFunc(inp):\n print('printing user input from testFunc - ' + inp)\n pass\n\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n print('user input = ' + str(input))\n return render_template('graph.html')\n\n\n@app.route('/home', methods=['GET', 'POST'])\ndef home():\n return render_template('home.html')\n\n\n@app.route('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n\n<mask token>\n\n\n@app.route('/der', methods=['GET', 'POST'])\ndef derGraph():\n return render_template('graph2.html')\n\n\n@app.route('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\n\n@app.route('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\n\n@app.route('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\n\n@app.route('/poi', methods=['GET', 'POST'])\ndef poi():\n return render_template('poi.html')\n\n\n@app.route('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result=str(area))\n\n\n@app.route('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\n\n@app.route('/out1', methods=['GET', 'POST'])\ndef out1():\n return render_template('out1.html')\n\n\n@app.after_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=False)\n",
"step-4": "<mask token>\napp = Flask(__name__)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1\n\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print('printing user input from functionGraph - ' + function)\n print(dVal1, dVal2, dVal3, dVal4)\n x1 = -5\n x2 = 5\n print('1st input:')\n y = function\n\n def f(x):\n return eval(y)\n \"\"\"print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n \"\"\"\n x1 = int(dVal1)\n x2 = int(dVal2)\n y1 = int(dVal3)\n y2 = int(dVal4)\n print('Processing...')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle='--')\n except:\n pass\n else:\n x = x + 0.01\n ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,\n markeredgecolor='g', markerfacecolor='None')\n count = 0\n \"\"\"for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1\"\"\"\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'\n , bbox_inches='tight')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'\n , bbox_inches='tight')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n \"\"\"for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1\"\"\"\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print('XXXXXXXXXX')\n for x in xVal3:\n print(x)\n print('YYYYYYYYYY')\n for yVal in yVal3:\n print(yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'\n , bbox_inches='tight')\n plt.clf\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n \"\"\"\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n \"\"\"\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print('Processing...')\n sectionx = np.arange(x1int, x2int, 1e-05)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count + 1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size - 1\n for x in np.nditer(sectionx):\n if count == limit:\n break\n trapSum = trapz(sectiony[count], sectiony[count + 1])\n area = area + trapSum\n count = count + 1\n print(area)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'\n , bbox_inches='tight')\n\n\nglobal area\nx1 = -5\nx2 = 5\nxRange1 = np.arange(x1, x2, 0.01)\n\n\ndef testFunc(inp):\n print('printing user input from testFunc - ' + inp)\n pass\n\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n print('user input = ' + str(input))\n return render_template('graph.html')\n\n\n@app.route('/home', methods=['GET', 'POST'])\ndef home():\n return render_template('home.html')\n\n\n@app.route('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n\n<mask token>\n\n\n@app.route('/der', methods=['GET', 'POST'])\ndef derGraph():\n return render_template('graph2.html')\n\n\n@app.route('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\n\n@app.route('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\n\n@app.route('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\n\n@app.route('/poi', methods=['GET', 'POST'])\ndef poi():\n return render_template('poi.html')\n\n\n@app.route('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result=str(area))\n\n\n@app.route('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\n\n@app.route('/out1', methods=['GET', 'POST'])\ndef out1():\n return render_template('out1.html')\n\n\n@app.after_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=False)\n",
"step-5": "from flask import Flask, render_template, request\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sympy\nfrom DerivTest import diff, diff2, trapz\nfrom sympy.parsing.sympy_parser import parse_expr\nfrom sympy import Symbol\n#from ParsingClass import Parser\n#from scitools.StringFunction import StringFunction\n#from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField\n\napp = Flask(__name__)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print(\"printing user input from functionGraph - \" + function)\n print(dVal1, dVal2, dVal3, dVal4)\n #parser = Parser()\n #x=np.array(range(10))\n x1 = -5;\n x2 = 5;\n print(\"1st input:\")\n y=function\n def f(x):\n return eval(y)\n '''print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n '''\n\n x1=int(dVal1)\n x2=int(dVal2)\n y1=int(dVal3)\n y2=int(dVal4)\n\n print(\"Processing...\")\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n #s = Symbol('s', real = True)\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count+1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2,2,1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle = '--')\n except:\n pass\n else:\n x=x+0.01\n ax1.plot(x, eval(y), \"o\", markersize=7, markeredgewidth=1, markeredgecolor='g',markerfacecolor='None')\n count = 0\n '''for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1'''\n #ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n #plt.axis([0,6,0,30])\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png', bbox_inches = 'tight')\n\n #############################################\n # Relative Extrema\n #############################################\n\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n # ax2.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if (yVal2[count - 1]>0 and yVal2[count + 1]<0):\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png', bbox_inches='tight')\n plt.clf()\n\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1,'g')\n # ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n # ax2.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if (yVal2[count - 1] < 0 and yVal2[count + 1] > 0):\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png', bbox_inches='tight')\n plt.clf()\n\n\n #############################################\n # First Derivative\n #############################################\n\n xRange1 = np.arange(x1,x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count+1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2,2,1)\n ax1.plot(xVal1, yVal1, 'g')\n #ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y,x)\n count = count+1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n #ax2.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png', bbox_inches = 'tight')\n\n #############################################\n # SECOND DERIVATIVE\n #############################################\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n # ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n '''for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1'''\n count = 1\n limit = yRange2.size-1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count-1], yRange2[count+1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print(\"XXXXXXXXXX\")\n for x in xVal3:\n print (x)\n print(\"YYYYYYYYYY\")\n for yVal in yVal3:\n print (yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png', bbox_inches='tight')\n plt.clf\n #############################################\n #POINTS OF INFLECTION\n #############################################\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png', bbox_inches='tight')\n plt.clf()\n\n #############################################\n # FTC\n #############################################\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n '''\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n '''\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print(\"Processing...\")\n sectionx = np.arange(x1int, x2int, 0.00001)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count+1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size-1\n for x in np.nditer(sectionx):\n if(count == limit):\n break\n trapSum = trapz(sectiony[count], sectiony[count+1])\n area = area + trapSum\n count = count + 1\n print(area)\n # ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png', bbox_inches='tight')\n\nglobal area\n\nx1 = -5;\nx2 = 5;\nxRange1 = np.arange(x1,x2, 0.01)\n#print(\"1st input\")\n#y=input()\n#yParsed = parse_expr(y, evaluate=False)\n#functionGraph(y)\n\ndef testFunc(inp):\n print(\"printing user input from testFunc - \" +inp)\n pass\n\n##############################################\n#works on CHROME ONLY, caching issue in Safari\n##############################################\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n\n print(\"user input = \" +str(input))\n\n\n #testFunc(input)\n return render_template(\"graph.html\")\n #return render_template(\"graph.html\", result=input)\n\n\n@app.route('/home', methods=['GET', 'POST'])\ndef home():\n return render_template('home.html')\n\n@app.route('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n'''@app.route('/input', methods=['GET', 'POST'])\ndef input_post():\n if request.method == 'POST':\n result = request.form['Function']\n print(result)\n return render_template(\"graph.html\", result=result)'''\n\n@app.route('/der', methods=['GET', 'POST'])\ndef derGraph():\n return render_template('graph2.html')\n\n@app.route('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\n@app.route('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\n@app.route('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\n@app.route('/poi', methods=['GET', 'POST'])\ndef poi():\n return render_template('poi.html')\n\n@app.route('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result = str(area))\n\n@app.route('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\n@app.route('/out1', methods=['GET', 'POST'])\ndef out1():\n return render_template('out1.html')\n\n@app.after_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=False)\n\n\n",
"step-ids": [
10,
13,
15,
16,
18
]
}
|
[
10,
13,
15,
16,
18
] |
'''
3、 编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n
'''
def f(n):
if n%2==0:
sum=0
for x in range(2,n+1,2):
sum+=1/x
print(sum)
if n%2!=0:
sum=0
for x in range(1,n+1,2):
sum+=1/x
print(sum)
|
normal
|
{
"blob_id": "69cf28d32e6543271a0855d61a76808b03c06891",
"index": 4805,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef f(n):\n if n % 2 == 0:\n sum = 0\n for x in range(2, n + 1, 2):\n sum += 1 / x\n print(sum)\n if n % 2 != 0:\n sum = 0\n for x in range(1, n + 1, 2):\n sum += 1 / x\n print(sum)\n",
"step-3": "'''\n3、\t编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n\n'''\n\ndef f(n):\n if n%2==0:\n sum=0\n for x in range(2,n+1,2):\n sum+=1/x\n print(sum)\n if n%2!=0:\n sum=0\n for x in range(1,n+1,2):\n sum+=1/x\n print(sum)\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Question link: https://www.hackerrank.com/challenges/30-scope/problem
# Code section:
def computeDifference(self):
# Add your code here
self.maximumDifference = -111111
for i in range(0,len(self.__elements)-1):
for j in range(i+1, len(self.__elements)):
diff = abs(self.__elements[i]-self.__elements[j])
self.maximumDifference = max(diff, self.maximumDifference)
|
normal
|
{
"blob_id": "eb90912d09fca52a43b28ec4c988e3658ddfc219",
"index": 605,
"step-1": "# Question link: https://www.hackerrank.com/challenges/30-scope/problem\n# Code section:\n\n def computeDifference(self):\n # Add your code here\n self.maximumDifference = -111111\n for i in range(0,len(self.__elements)-1):\n for j in range(i+1, len(self.__elements)):\n diff = abs(self.__elements[i]-self.__elements[j])\n self.maximumDifference = max(diff, self.maximumDifference)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def minvalue(weight, Day):
maximum = 0
res = 0
for x in range(0, len(weight)):
if weight[x] > maximum:
maximum = weight[x]
res += weight[x]
Capitivity = max(res // Day, maximum)
while True:
sum = 0
day = 1
for t in range(0, len(weight)):
if weight[t] + sum <= Capitivity:
sum += weight[t]
else:
sum = weight[t]
day += 1
if day <= Day:
return Capitivity
else:
Capitivity += 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def minvalue(weight, Day):
maximum = 0
res = 0
for x in range(0, len(weight)):
if weight[x] > maximum:
maximum = weight[x]
res += weight[x]
Capitivity = max(res // Day, maximum)
while True:
sum = 0
day = 1
for t in range(0, len(weight)):
if weight[t] + sum <= Capitivity:
sum += weight[t]
else:
sum = weight[t]
day += 1
if day <= Day:
return Capitivity
else:
Capitivity += 1
<|reserved_special_token_0|>
store.append(list(map(int, a.split(','))))
<|reserved_special_token_0|>
print(minvalue(weight, Day))
<|reserved_special_token_1|>
def minvalue(weight, Day):
maximum = 0
res = 0
for x in range(0, len(weight)):
if weight[x] > maximum:
maximum = weight[x]
res += weight[x]
Capitivity = max(res // Day, maximum)
while True:
sum = 0
day = 1
for t in range(0, len(weight)):
if weight[t] + sum <= Capitivity:
sum += weight[t]
else:
sum = weight[t]
day += 1
if day <= Day:
return Capitivity
else:
Capitivity += 1
a = input()
a = a[1:len(a) - 1]
store = []
store.append(list(map(int, a.split(','))))
weight = store[0]
Day = int(input())
print(minvalue(weight, Day))
<|reserved_special_token_1|>
def minvalue(weight,Day):
maximum = 0
res = 0
for x in range(0, len(weight)):
if weight[x] > maximum:
maximum = weight[x]
res += weight[x]
Capitivity = max(res // Day, maximum)
while True:
sum=0
day=1
for t in range(0, len(weight)):
if weight[t]+sum<=Capitivity:
sum+=weight[t]
else:
sum=weight[t]
day+=1
if day<=Day:
return Capitivity
else:
Capitivity+=1
a=input()
a=a[1:len(a)-1]
store=[]
store.append(list(map(int, a.split(","))))
weight=store[0]
Day=int(input())
print(minvalue(weight,Day))
|
flexible
|
{
"blob_id": "a0ffb793650b0e911dd9bcbec0b7ba76f7829c12",
"index": 1539,
"step-1": "<mask token>\n",
"step-2": "def minvalue(weight, Day):\n maximum = 0\n res = 0\n for x in range(0, len(weight)):\n if weight[x] > maximum:\n maximum = weight[x]\n res += weight[x]\n Capitivity = max(res // Day, maximum)\n while True:\n sum = 0\n day = 1\n for t in range(0, len(weight)):\n if weight[t] + sum <= Capitivity:\n sum += weight[t]\n else:\n sum = weight[t]\n day += 1\n if day <= Day:\n return Capitivity\n else:\n Capitivity += 1\n\n\n<mask token>\n",
"step-3": "def minvalue(weight, Day):\n maximum = 0\n res = 0\n for x in range(0, len(weight)):\n if weight[x] > maximum:\n maximum = weight[x]\n res += weight[x]\n Capitivity = max(res // Day, maximum)\n while True:\n sum = 0\n day = 1\n for t in range(0, len(weight)):\n if weight[t] + sum <= Capitivity:\n sum += weight[t]\n else:\n sum = weight[t]\n day += 1\n if day <= Day:\n return Capitivity\n else:\n Capitivity += 1\n\n\n<mask token>\nstore.append(list(map(int, a.split(','))))\n<mask token>\nprint(minvalue(weight, Day))\n",
"step-4": "def minvalue(weight, Day):\n maximum = 0\n res = 0\n for x in range(0, len(weight)):\n if weight[x] > maximum:\n maximum = weight[x]\n res += weight[x]\n Capitivity = max(res // Day, maximum)\n while True:\n sum = 0\n day = 1\n for t in range(0, len(weight)):\n if weight[t] + sum <= Capitivity:\n sum += weight[t]\n else:\n sum = weight[t]\n day += 1\n if day <= Day:\n return Capitivity\n else:\n Capitivity += 1\n\n\na = input()\na = a[1:len(a) - 1]\nstore = []\nstore.append(list(map(int, a.split(','))))\nweight = store[0]\nDay = int(input())\nprint(minvalue(weight, Day))\n",
"step-5": "def minvalue(weight,Day):\n maximum = 0\n res = 0\n for x in range(0, len(weight)):\n if weight[x] > maximum:\n maximum = weight[x]\n res += weight[x]\n Capitivity = max(res // Day, maximum)\n while True:\n sum=0\n day=1\n for t in range(0, len(weight)):\n if weight[t]+sum<=Capitivity:\n sum+=weight[t]\n else:\n sum=weight[t]\n day+=1\n if day<=Day:\n return Capitivity\n else:\n Capitivity+=1\na=input()\na=a[1:len(a)-1]\nstore=[]\nstore.append(list(map(int, a.split(\",\"))))\nweight=store[0]\nDay=int(input())\nprint(minvalue(weight,Day))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2
import pandas
from sklearn import tree
import pydotplus
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import matplotlib.image as pltimg
df = pandas.read_csv("show.csv")
d = {'UK': 0, 'USA': 1, 'N': 2}
df['Nationality'] = df['Nationality'].map(d)
d = {'YES': 1, 'NO': 0}
df['Go'] = df['Go'].map(d)
######
features = ['Age', 'Experience', 'Rank', 'Nationality']
X = df[features]
y = df['Go']
#####
dtree = DecisionTreeClassifier()
dtree = dtree.fit(X, y)
data = tree.export_graphviz(dtree, out_file=None, feature_names=features)
graph = pydotplus.graph_from_dot_data(data)
graph.write_png('mydecisiontree.png')
img=pltimg.imread('mydecisiontree.png')
imgplot = plt.imshow(img)
plt.show()
print(X)
print(y)
|
normal
|
{
"blob_id": "c9cf65eeec49eba004312491cdd2321200fa6a61",
"index": 469,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngraph.write_png('mydecisiontree.png')\n<mask token>\nplt.show()\nprint(X)\nprint(y)\n",
"step-3": "<mask token>\ndf = pandas.read_csv('show.csv')\nd = {'UK': 0, 'USA': 1, 'N': 2}\ndf['Nationality'] = df['Nationality'].map(d)\nd = {'YES': 1, 'NO': 0}\ndf['Go'] = df['Go'].map(d)\nfeatures = ['Age', 'Experience', 'Rank', 'Nationality']\nX = df[features]\ny = df['Go']\ndtree = DecisionTreeClassifier()\ndtree = dtree.fit(X, y)\ndata = tree.export_graphviz(dtree, out_file=None, feature_names=features)\ngraph = pydotplus.graph_from_dot_data(data)\ngraph.write_png('mydecisiontree.png')\nimg = pltimg.imread('mydecisiontree.png')\nimgplot = plt.imshow(img)\nplt.show()\nprint(X)\nprint(y)\n",
"step-4": "import cv2\nimport pandas\nfrom sklearn import tree\nimport pydotplus\nfrom sklearn.tree import DecisionTreeClassifier\nimport matplotlib.pyplot as plt\nimport matplotlib.image as pltimg\ndf = pandas.read_csv('show.csv')\nd = {'UK': 0, 'USA': 1, 'N': 2}\ndf['Nationality'] = df['Nationality'].map(d)\nd = {'YES': 1, 'NO': 0}\ndf['Go'] = df['Go'].map(d)\nfeatures = ['Age', 'Experience', 'Rank', 'Nationality']\nX = df[features]\ny = df['Go']\ndtree = DecisionTreeClassifier()\ndtree = dtree.fit(X, y)\ndata = tree.export_graphviz(dtree, out_file=None, feature_names=features)\ngraph = pydotplus.graph_from_dot_data(data)\ngraph.write_png('mydecisiontree.png')\nimg = pltimg.imread('mydecisiontree.png')\nimgplot = plt.imshow(img)\nplt.show()\nprint(X)\nprint(y)\n",
"step-5": "import cv2\nimport pandas\nfrom sklearn import tree\nimport pydotplus\nfrom sklearn.tree import DecisionTreeClassifier\nimport matplotlib.pyplot as plt\nimport matplotlib.image as pltimg\n\ndf = pandas.read_csv(\"show.csv\")\nd = {'UK': 0, 'USA': 1, 'N': 2}\ndf['Nationality'] = df['Nationality'].map(d)\nd = {'YES': 1, 'NO': 0}\ndf['Go'] = df['Go'].map(d)\n\n######\nfeatures = ['Age', 'Experience', 'Rank', 'Nationality']\nX = df[features]\ny = df['Go']\n#####\ndtree = DecisionTreeClassifier()\ndtree = dtree.fit(X, y)\ndata = tree.export_graphviz(dtree, out_file=None, feature_names=features)\ngraph = pydotplus.graph_from_dot_data(data)\ngraph.write_png('mydecisiontree.png')\n\nimg=pltimg.imread('mydecisiontree.png')\nimgplot = plt.imshow(img)\nplt.show()\nprint(X)\nprint(y)\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for a in A[::-1]:
idx = bisect.bisect_right(dp, a)
dp[idx] = a
<|reserved_special_token_0|>
for n in dp:
if n != float('inf'):
ans += 1
print(ans)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
input = sys.stdin.readline
N = int(input())
A = [int(input()) for _ in range(N)]
dp = [float('inf')] * (N + 1)
for a in A[::-1]:
idx = bisect.bisect_right(dp, a)
dp[idx] = a
ans = 0
for n in dp:
if n != float('inf'):
ans += 1
print(ans)
<|reserved_special_token_1|>
import bisect
import sys
input = sys.stdin.readline
N = int(input())
A = [int(input()) for _ in range(N)]
dp = [float('inf')] * (N + 1)
for a in A[::-1]:
idx = bisect.bisect_right(dp, a)
dp[idx] = a
ans = 0
for n in dp:
if n != float('inf'):
ans += 1
print(ans)
|
flexible
|
{
"blob_id": "dfe79d2f4bf4abc1d04035cf4556237a53c01122",
"index": 6913,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor a in A[::-1]:\n idx = bisect.bisect_right(dp, a)\n dp[idx] = a\n<mask token>\nfor n in dp:\n if n != float('inf'):\n ans += 1\nprint(ans)\n",
"step-3": "<mask token>\ninput = sys.stdin.readline\nN = int(input())\nA = [int(input()) for _ in range(N)]\ndp = [float('inf')] * (N + 1)\nfor a in A[::-1]:\n idx = bisect.bisect_right(dp, a)\n dp[idx] = a\nans = 0\nfor n in dp:\n if n != float('inf'):\n ans += 1\nprint(ans)\n",
"step-4": "import bisect\nimport sys\ninput = sys.stdin.readline\nN = int(input())\nA = [int(input()) for _ in range(N)]\ndp = [float('inf')] * (N + 1)\nfor a in A[::-1]:\n idx = bisect.bisect_right(dp, a)\n dp[idx] = a\nans = 0\nfor n in dp:\n if n != float('inf'):\n ans += 1\nprint(ans)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Problem 24
A permutation is an ordered arrangement of objects. For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4. If all of the permutations are listed numerically or alphabetically, we call it lexicographic order. The lexicographic permutations of 0, 1 and 2 are:
012 021 102 120 201 210
What is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?
'''
from itertools import permutations
p=permutations(range(10))
n=1000000
for i in range(n-1):
p.next()
print ''.join([str(i) for i in p.next()])
|
normal
|
{
"blob_id": "f2ac9904aaa4c12ef2954b88c37ffd0c97aadf5a",
"index": 9398,
"step-1": "'''\nProblem 24\n\n\nA permutation is an ordered arrangement of objects. For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4. If all of the permutations are listed numerically or alphabetically, we call it lexicographic order. The lexicographic permutations of 0, 1 and 2 are:\n\n012 021 102 120 201 210\n\nWhat is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?\n'''\n\nfrom itertools import permutations\np=permutations(range(10))\nn=1000000\nfor i in range(n-1):\n p.next()\nprint ''.join([str(i) for i in p.next()])\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
import time
import re
import json
from os.path import join, getsize
from aiohttp import web
from utils import helper
TBL_HEAD = '''
<table class="table table-striped table-hover table-sm">
<thead>
<tr>
<th scope="col">Directory</th>
<th scope="col">Size</th>
</tr>
</thead>
<tbody>
'''
TBL_FOOTER = '''
</tbody>
</table>
'''
def stats_count_info(request):
root_path = request.app['PATH-DB']
cpt = 0
d = dict()
dirs_data = dict()
for root, dirs, files in os.walk(root_path, topdown=False):
cpt += len(files)
size = sum(getsize(join(root, name)) for name in files)
subdir_size = sum(dirs_data[join(root,d)] for d in dirs)
size = dirs_data[root] = size + subdir_size
if root.find('.meta') != -1:
# we ignore (internal) meta directories
continue
d[root] = size
ret = ''
ret += "<h2>Files Count</h2>Number of files: {}<br /><br />".format(cpt)
ret += "<h2>Disk Consumption</h2>"
ret += "Database disk consumption overall: {} MB<br /><br />".format(d[root_path] // (1024*1024))
ret += "<h4>Resouce Usage Listed by Objects</h4><br />"
ret += TBL_HEAD
for k in sorted(d, key=d.get, reverse=True):
ret += '<tr>'
ret += "<td>{}</td><td>{}</td>".format(k, d[k])
ret += TBL_FOOTER
return ret
def generate_disk_info_page(request):
page = request.app['BLOB-HEADER']
page += stats_count_info(request)
page += request.app['BLOB-FOOTER']
return web.Response(body=page, content_type='text/html')
def handle(request):
return generate_disk_info_page(request)
|
normal
|
{
"blob_id": "7c9b51ae7cde9c3a00888dac6df710b93af6dd7f",
"index": 4836,
"step-1": "<mask token>\n\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root, d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n continue\n d[root] = size\n ret = ''\n ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)\n ret += '<h2>Disk Consumption</h2>'\n ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[\n root_path] // (1024 * 1024))\n ret += '<h4>Resouce Usage Listed by Objects</h4><br />'\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += '<td>{}</td><td>{}</td>'.format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\n\n<mask token>\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-2": "<mask token>\n\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root, d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n continue\n d[root] = size\n ret = ''\n ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)\n ret += '<h2>Disk Consumption</h2>'\n ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[\n root_path] // (1024 * 1024))\n ret += '<h4>Resouce Usage Listed by Objects</h4><br />'\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += '<td>{}</td><td>{}</td>'.format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\n\ndef generate_disk_info_page(request):\n page = request.app['BLOB-HEADER']\n page += stats_count_info(request)\n page += request.app['BLOB-FOOTER']\n return web.Response(body=page, content_type='text/html')\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-3": "<mask token>\nTBL_HEAD = \"\"\"\n<table class=\"table table-striped table-hover table-sm\">\n <thead>\n <tr>\n <th scope=\"col\">Directory</th>\n <th scope=\"col\">Size</th>\n </tr>\n </thead>\n <tbody>\n\"\"\"\nTBL_FOOTER = \"\"\"\n </tbody>\n</table>\n\"\"\"\n\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root, d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n continue\n d[root] = size\n ret = ''\n ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)\n ret += '<h2>Disk Consumption</h2>'\n ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[\n root_path] // (1024 * 1024))\n ret += '<h4>Resouce Usage Listed by Objects</h4><br />'\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += '<td>{}</td><td>{}</td>'.format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\n\ndef generate_disk_info_page(request):\n page = request.app['BLOB-HEADER']\n page += stats_count_info(request)\n page += request.app['BLOB-FOOTER']\n return web.Response(body=page, content_type='text/html')\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-4": "import os\nimport time\nimport re\nimport json\nfrom os.path import join, getsize\nfrom aiohttp import web\nfrom utils import helper\nTBL_HEAD = \"\"\"\n<table class=\"table table-striped table-hover table-sm\">\n <thead>\n <tr>\n <th scope=\"col\">Directory</th>\n <th scope=\"col\">Size</th>\n </tr>\n </thead>\n <tbody>\n\"\"\"\nTBL_FOOTER = \"\"\"\n </tbody>\n</table>\n\"\"\"\n\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root, d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n continue\n d[root] = size\n ret = ''\n ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)\n ret += '<h2>Disk Consumption</h2>'\n ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[\n root_path] // (1024 * 1024))\n ret += '<h4>Resouce Usage Listed by Objects</h4><br />'\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += '<td>{}</td><td>{}</td>'.format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\n\ndef generate_disk_info_page(request):\n page = request.app['BLOB-HEADER']\n page += stats_count_info(request)\n page += request.app['BLOB-FOOTER']\n return web.Response(body=page, content_type='text/html')\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-5": "import os\nimport time\nimport re\nimport json\nfrom os.path import join, getsize\n\nfrom aiohttp import web\n\nfrom utils import helper\n\nTBL_HEAD = '''\n<table class=\"table table-striped table-hover table-sm\">\n <thead>\n <tr>\n <th scope=\"col\">Directory</th>\n <th scope=\"col\">Size</th>\n </tr>\n </thead>\n <tbody>\n'''\n\nTBL_FOOTER = '''\n </tbody>\n</table>\n'''\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root,d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n # we ignore (internal) meta directories\n continue\n d[root] = size\n\n ret = ''\n ret += \"<h2>Files Count</h2>Number of files: {}<br /><br />\".format(cpt)\n ret += \"<h2>Disk Consumption</h2>\"\n ret += \"Database disk consumption overall: {} MB<br /><br />\".format(d[root_path] // (1024*1024))\n ret += \"<h4>Resouce Usage Listed by Objects</h4><br />\"\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += \"<td>{}</td><td>{}</td>\".format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\ndef generate_disk_info_page(request):\n page = request.app['BLOB-HEADER']\n page += stats_count_info(request)\n page += request.app['BLOB-FOOTER']\n return web.Response(body=page, content_type='text/html')\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def introduction():
like_to_play = int(input(
'Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) '
))
if like_to_play == 1:
easy_or_hard = input('Easy (1) or hard (2)? ')
easy_or_hard = int(easy_or_hard)
if easy_or_hard == 1:
EasyMode.play_game_easy()
elif easy_or_hard == 2:
HardMode.play_game_hard()
else:
print('Invalid option!')
else:
print('Goodbye!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def introduction():
like_to_play = int(input(
'Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) '
))
if like_to_play == 1:
easy_or_hard = input('Easy (1) or hard (2)? ')
easy_or_hard = int(easy_or_hard)
if easy_or_hard == 1:
EasyMode.play_game_easy()
elif easy_or_hard == 2:
HardMode.play_game_hard()
else:
print('Invalid option!')
else:
print('Goodbye!')
introduction()
<|reserved_special_token_1|>
import random
import HardMode
import EasyMode
def introduction():
like_to_play = int(input(
'Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) '
))
if like_to_play == 1:
easy_or_hard = input('Easy (1) or hard (2)? ')
easy_or_hard = int(easy_or_hard)
if easy_or_hard == 1:
EasyMode.play_game_easy()
elif easy_or_hard == 2:
HardMode.play_game_hard()
else:
print('Invalid option!')
else:
print('Goodbye!')
introduction()
<|reserved_special_token_1|>
import random
import HardMode
import EasyMode
#Intro function, gets user input of game start, instructions, and game mode
def introduction():
like_to_play = int(input ("Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) "))
#like_to_play = int(like_to_play)
#need to set y/n variables instead of numeric: flow control
if(like_to_play == 1):
easy_or_hard = input("Easy (1) or hard (2)? ")
easy_or_hard = int(easy_or_hard)
if easy_or_hard == 1:
EasyMode.play_game_easy()
elif easy_or_hard == 2:
HardMode.play_game_hard()
else:
print("Invalid option!")
else:
print("Goodbye!")
introduction()
|
flexible
|
{
"blob_id": "31246a2e022f3c5b0ce68bb06422307439cbd9b6",
"index": 4272,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef introduction():\n like_to_play = int(input(\n 'Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) '\n ))\n if like_to_play == 1:\n easy_or_hard = input('Easy (1) or hard (2)? ')\n easy_or_hard = int(easy_or_hard)\n if easy_or_hard == 1:\n EasyMode.play_game_easy()\n elif easy_or_hard == 2:\n HardMode.play_game_hard()\n else:\n print('Invalid option!')\n else:\n print('Goodbye!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef introduction():\n like_to_play = int(input(\n 'Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) '\n ))\n if like_to_play == 1:\n easy_or_hard = input('Easy (1) or hard (2)? ')\n easy_or_hard = int(easy_or_hard)\n if easy_or_hard == 1:\n EasyMode.play_game_easy()\n elif easy_or_hard == 2:\n HardMode.play_game_hard()\n else:\n print('Invalid option!')\n else:\n print('Goodbye!')\n\n\nintroduction()\n",
"step-4": "import random\nimport HardMode\nimport EasyMode\n\n\ndef introduction():\n like_to_play = int(input(\n 'Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) '\n ))\n if like_to_play == 1:\n easy_or_hard = input('Easy (1) or hard (2)? ')\n easy_or_hard = int(easy_or_hard)\n if easy_or_hard == 1:\n EasyMode.play_game_easy()\n elif easy_or_hard == 2:\n HardMode.play_game_hard()\n else:\n print('Invalid option!')\n else:\n print('Goodbye!')\n\n\nintroduction()\n",
"step-5": "import random\nimport HardMode\nimport EasyMode\n\n#Intro function, gets user input of game start, instructions, and game mode\ndef introduction():\n like_to_play = int(input (\"Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) \"))\n #like_to_play = int(like_to_play)\n #need to set y/n variables instead of numeric: flow control\n \n if(like_to_play == 1):\n easy_or_hard = input(\"Easy (1) or hard (2)? \")\n easy_or_hard = int(easy_or_hard)\n\n if easy_or_hard == 1:\n EasyMode.play_game_easy()\n elif easy_or_hard == 2:\n HardMode.play_game_hard()\n else:\n print(\"Invalid option!\")\n\n else:\n print(\"Goodbye!\")\n\nintroduction()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from .dispatch import dispatch_expts
|
normal
|
{
"blob_id": "394ebfe25bbf8eaf427509f28a82a98b9b481b63",
"index": 4957,
"step-1": "<mask token>\n",
"step-2": "from .dispatch import dispatch_expts\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def test_nested_query_with_datetime():
inner_q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and "name" != \'Idle\'', groupby=('host', 'L3'))
outer_q = assist.build_query(select='time, value', from_=inner_q, where
=
f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}'
)
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_warning():
inner_q = assist.build_query(select='time, value', from_='system_load',
where=
f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}and L2=\'cpuload\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
with pytest.warns(RuntimeWarning):
outer_q = assist.build_query(select='time, value', from_=inner_q)
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_time_grouping():
q = assist.build_query(select='time, MAX(value)', from_='system_load',
where=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('time(10m)', 'host', 'L3'))
df = assist.run_query(q, cache=False)
assert not df.empty
<|reserved_special_token_0|>
def test_cached_query():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
def _run_query(q):
df = assist.run_query(q, cache=True)
return df
_run_query(q)
df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)
assert not df.empty
def test_nocached_query():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
@unittest.mock.patch('assist.parse.client', new=None)
def _run_query(q):
df = assist.run_query(q, cache=False)
return df
with pytest.raises(AttributeError):
_run_query(q)
def test_cached_query_mv():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
def _run_query(q):
df = assist.run_multivariate_query(q, cache=True)
return df
_run_query(q)
df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)
assert not df.empty
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_nested_query():
inner_q = assist.build_query(select='time, value, host, L3', from_=
'system_load', where='L2=\'cpuload\' and "name" != \'Idle\'')
outer_q = assist.build_query(select='time, value', from_=inner_q, where
="time > '2021-06-16 00:00:00' and time < '2021-06-17 00:00:00'",
groupby=('host', 'L3'))
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_nested_query_with_datetime():
inner_q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and "name" != \'Idle\'', groupby=('host', 'L3'))
outer_q = assist.build_query(select='time, value', from_=inner_q, where
=
f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}'
)
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_warning():
inner_q = assist.build_query(select='time, value', from_='system_load',
where=
f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}and L2=\'cpuload\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
with pytest.warns(RuntimeWarning):
outer_q = assist.build_query(select='time, value', from_=inner_q)
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_time_grouping():
q = assist.build_query(select='time, MAX(value)', from_='system_load',
where=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('time(10m)', 'host', 'L3'))
df = assist.run_query(q, cache=False)
assert not df.empty
<|reserved_special_token_0|>
def test_cached_query():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
def _run_query(q):
df = assist.run_query(q, cache=True)
return df
_run_query(q)
df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)
assert not df.empty
def test_nocached_query():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
@unittest.mock.patch('assist.parse.client', new=None)
def _run_query(q):
df = assist.run_query(q, cache=False)
return df
with pytest.raises(AttributeError):
_run_query(q)
def test_cached_query_mv():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
def _run_query(q):
df = assist.run_multivariate_query(q, cache=True)
return df
_run_query(q)
df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)
assert not df.empty
def test_nocached_query_mv():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
@unittest.mock.patch('assist.parse.client', new=list())
def _run_query(q):
df = assist.run_multivariate_query(q, cache=False)
return df
with pytest.raises(AttributeError):
_run_query(q)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_simple_query():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
df = assist.run_query(q, cache=False)
assert not df.empty
def test_nested_query():
inner_q = assist.build_query(select='time, value, host, L3', from_=
'system_load', where='L2=\'cpuload\' and "name" != \'Idle\'')
outer_q = assist.build_query(select='time, value', from_=inner_q, where
="time > '2021-06-16 00:00:00' and time < '2021-06-17 00:00:00'",
groupby=('host', 'L3'))
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_nested_query_with_datetime():
inner_q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and "name" != \'Idle\'', groupby=('host', 'L3'))
outer_q = assist.build_query(select='time, value', from_=inner_q, where
=
f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}'
)
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_warning():
inner_q = assist.build_query(select='time, value', from_='system_load',
where=
f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}and L2=\'cpuload\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
with pytest.warns(RuntimeWarning):
outer_q = assist.build_query(select='time, value', from_=inner_q)
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_time_grouping():
q = assist.build_query(select='time, MAX(value)', from_='system_load',
where=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('time(10m)', 'host', 'L3'))
df = assist.run_query(q, cache=False)
assert not df.empty
<|reserved_special_token_0|>
def test_cached_query():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
def _run_query(q):
df = assist.run_query(q, cache=True)
return df
_run_query(q)
df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)
assert not df.empty
def test_nocached_query():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
@unittest.mock.patch('assist.parse.client', new=None)
def _run_query(q):
df = assist.run_query(q, cache=False)
return df
with pytest.raises(AttributeError):
_run_query(q)
def test_cached_query_mv():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
def _run_query(q):
df = assist.run_multivariate_query(q, cache=True)
return df
_run_query(q)
df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)
assert not df.empty
def test_nocached_query_mv():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
@unittest.mock.patch('assist.parse.client', new=list())
def _run_query(q):
df = assist.run_multivariate_query(q, cache=False)
return df
with pytest.raises(AttributeError):
_run_query(q)
<|reserved_special_token_1|>
import unittest.mock
import assist
import pytest
def test_simple_query():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
df = assist.run_query(q, cache=False)
assert not df.empty
def test_nested_query():
inner_q = assist.build_query(select='time, value, host, L3', from_=
'system_load', where='L2=\'cpuload\' and "name" != \'Idle\'')
outer_q = assist.build_query(select='time, value', from_=inner_q, where
="time > '2021-06-16 00:00:00' and time < '2021-06-17 00:00:00'",
groupby=('host', 'L3'))
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_nested_query_with_datetime():
inner_q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and "name" != \'Idle\'', groupby=('host', 'L3'))
outer_q = assist.build_query(select='time, value', from_=inner_q, where
=
f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}'
)
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_warning():
inner_q = assist.build_query(select='time, value', from_='system_load',
where=
f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}and L2=\'cpuload\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
with pytest.warns(RuntimeWarning):
outer_q = assist.build_query(select='time, value', from_=inner_q)
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_time_grouping():
q = assist.build_query(select='time, MAX(value)', from_='system_load',
where=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('time(10m)', 'host', 'L3'))
df = assist.run_query(q, cache=False)
assert not df.empty
def test_fill_values():
q = assist.build_query(select='time, MEAN(value)', from_='system_load',
where=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('time(10m)', 'fill(0)', 'host', 'L3'))
df = assist.run_query(q, cache=False)
assert not df.empty
def test_cached_query():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
def _run_query(q):
df = assist.run_query(q, cache=True)
return df
_run_query(q)
df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)
assert not df.empty
def test_nocached_query():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
@unittest.mock.patch('assist.parse.client', new=None)
def _run_query(q):
df = assist.run_query(q, cache=False)
return df
with pytest.raises(AttributeError):
_run_query(q)
def test_cached_query_mv():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
def _run_query(q):
df = assist.run_multivariate_query(q, cache=True)
return df
_run_query(q)
df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)
assert not df.empty
def test_nocached_query_mv():
q = assist.build_query(select='time, value', from_='system_load', where
=
'L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\''
, groupby=('host', 'L3'))
@unittest.mock.patch('assist.parse.client', new=list())
def _run_query(q):
df = assist.run_multivariate_query(q, cache=False)
return df
with pytest.raises(AttributeError):
_run_query(q)
<|reserved_special_token_1|>
import unittest.mock
import assist
import pytest
def test_simple_query():
q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
df = assist.run_query(q, cache=False)
assert not df.empty
def test_nested_query():
inner_q = assist.build_query(select='time, value, host, L3', from_='system_load',
where='L2=\'cpuload\' and "name" != \'Idle\'',
)
outer_q = assist.build_query(select='time, value', from_=inner_q,
where='time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\'',
groupby=('host', 'L3'))
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_nested_query_with_datetime():
inner_q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
outer_q = assist.build_query(select='time, value', from_=inner_q,
where=f'time > {assist.Datetime(year=2021, month=6, day=16)}'
f'and time < {assist.Datetime(year=2021, month=6, day=17)}',
)
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_warning():
inner_q = assist.build_query(select='time, value', from_='system_load',
where=f'time > {assist.Datetime(year=2021, month=6, day=16)}'
f'and time < {assist.Datetime(year=2021, month=6, day=17)}'
'and L2=\'cpuload\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
with pytest.warns(RuntimeWarning):
outer_q = assist.build_query(select='time, value', from_=inner_q, )
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_time_grouping():
q = assist.build_query(select='time, MAX(value)', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('time(10m)', 'host', 'L3'))
df = assist.run_query(q, cache=False)
assert not df.empty
def test_fill_values():
q = assist.build_query(select='time, MEAN(value)', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('time(10m)', 'fill(0)', 'host', 'L3'))
df = assist.run_query(q, cache=False)
assert not df.empty
def test_cached_query():
q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
def _run_query(q):
df = assist.run_query(q, cache=True)
return df
_run_query(q)
# Invalidate the InfluxDB client, it should still work
df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)
assert not df.empty
def test_nocached_query():
q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
@unittest.mock.patch('assist.parse.client', new=None)
def _run_query(q):
df = assist.run_query(q, cache=False)
return df
# Invalidate the InfluxDB client, it should fail
with pytest.raises(AttributeError):
_run_query(q)
def test_cached_query_mv():
q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
def _run_query(q):
df = assist.run_multivariate_query(q, cache=True)
return df
_run_query(q)
# Invalidate the InfluxDB client, it should still work
df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)
assert not df.empty
def test_nocached_query_mv():
q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
@unittest.mock.patch('assist.parse.client', new=list())
def _run_query(q):
df = assist.run_multivariate_query(q, cache=False)
return df
# Invalidate the InfluxDB client, it should fail
with pytest.raises(AttributeError):
_run_query(q)
|
flexible
|
{
"blob_id": "8aa9ba145b6c7347a7a926d50dca35383ddd52a3",
"index": 9217,
"step-1": "<mask token>\n\n\ndef test_nested_query_with_datetime():\n inner_q = assist.build_query(select='time, value', from_='system_load',\n where='L2=\\'cpuload\\' and \"name\" != \\'Idle\\'', groupby=('host', 'L3'))\n outer_q = assist.build_query(select='time, value', from_=inner_q, where\n =\n f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}'\n )\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_warning():\n inner_q = assist.build_query(select='time, value', from_='system_load',\n where=\n f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}and L2=\\'cpuload\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n with pytest.warns(RuntimeWarning):\n outer_q = assist.build_query(select='time, value', from_=inner_q)\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_time_grouping():\n q = assist.build_query(select='time, MAX(value)', from_='system_load',\n where=\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('time(10m)', 'host', 'L3'))\n df = assist.run_query(q, cache=False)\n assert not df.empty\n\n\n<mask token>\n\n\ndef test_cached_query():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n def _run_query(q):\n df = assist.run_query(q, cache=True)\n return df\n _run_query(q)\n df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)\n assert not df.empty\n\n\ndef test_nocached_query():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n @unittest.mock.patch('assist.parse.client', new=None)\n def _run_query(q):\n df = assist.run_query(q, cache=False)\n return df\n with pytest.raises(AttributeError):\n _run_query(q)\n\n\ndef test_cached_query_mv():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n def _run_query(q):\n df = assist.run_multivariate_query(q, cache=True)\n return df\n _run_query(q)\n df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)\n assert not df.empty\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_nested_query():\n inner_q = assist.build_query(select='time, value, host, L3', from_=\n 'system_load', where='L2=\\'cpuload\\' and \"name\" != \\'Idle\\'')\n outer_q = assist.build_query(select='time, value', from_=inner_q, where\n =\"time > '2021-06-16 00:00:00' and time < '2021-06-17 00:00:00'\",\n groupby=('host', 'L3'))\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_nested_query_with_datetime():\n inner_q = assist.build_query(select='time, value', from_='system_load',\n where='L2=\\'cpuload\\' and \"name\" != \\'Idle\\'', groupby=('host', 'L3'))\n outer_q = assist.build_query(select='time, value', from_=inner_q, where\n =\n f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}'\n )\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_warning():\n inner_q = assist.build_query(select='time, value', from_='system_load',\n where=\n f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}and L2=\\'cpuload\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n with pytest.warns(RuntimeWarning):\n outer_q = assist.build_query(select='time, value', from_=inner_q)\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_time_grouping():\n q = assist.build_query(select='time, MAX(value)', from_='system_load',\n where=\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('time(10m)', 'host', 'L3'))\n df = assist.run_query(q, cache=False)\n assert not df.empty\n\n\n<mask token>\n\n\ndef test_cached_query():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n def _run_query(q):\n df = assist.run_query(q, cache=True)\n return df\n _run_query(q)\n df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)\n assert not df.empty\n\n\ndef test_nocached_query():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n @unittest.mock.patch('assist.parse.client', new=None)\n def _run_query(q):\n df = assist.run_query(q, cache=False)\n return df\n with pytest.raises(AttributeError):\n _run_query(q)\n\n\ndef test_cached_query_mv():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n def _run_query(q):\n df = assist.run_multivariate_query(q, cache=True)\n return df\n _run_query(q)\n df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)\n assert not df.empty\n\n\ndef test_nocached_query_mv():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n @unittest.mock.patch('assist.parse.client', new=list())\n def _run_query(q):\n df = assist.run_multivariate_query(q, cache=False)\n return df\n with pytest.raises(AttributeError):\n _run_query(q)\n",
"step-3": "<mask token>\n\n\ndef test_simple_query():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n df = assist.run_query(q, cache=False)\n assert not df.empty\n\n\ndef test_nested_query():\n inner_q = assist.build_query(select='time, value, host, L3', from_=\n 'system_load', where='L2=\\'cpuload\\' and \"name\" != \\'Idle\\'')\n outer_q = assist.build_query(select='time, value', from_=inner_q, where\n =\"time > '2021-06-16 00:00:00' and time < '2021-06-17 00:00:00'\",\n groupby=('host', 'L3'))\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_nested_query_with_datetime():\n inner_q = assist.build_query(select='time, value', from_='system_load',\n where='L2=\\'cpuload\\' and \"name\" != \\'Idle\\'', groupby=('host', 'L3'))\n outer_q = assist.build_query(select='time, value', from_=inner_q, where\n =\n f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}'\n )\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_warning():\n inner_q = assist.build_query(select='time, value', from_='system_load',\n where=\n f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}and L2=\\'cpuload\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n with pytest.warns(RuntimeWarning):\n outer_q = assist.build_query(select='time, value', from_=inner_q)\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_time_grouping():\n q = assist.build_query(select='time, MAX(value)', from_='system_load',\n where=\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('time(10m)', 'host', 'L3'))\n df = assist.run_query(q, cache=False)\n assert not df.empty\n\n\n<mask token>\n\n\ndef test_cached_query():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n def _run_query(q):\n df = assist.run_query(q, cache=True)\n return df\n _run_query(q)\n df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)\n assert not df.empty\n\n\ndef test_nocached_query():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n @unittest.mock.patch('assist.parse.client', new=None)\n def _run_query(q):\n df = assist.run_query(q, cache=False)\n return df\n with pytest.raises(AttributeError):\n _run_query(q)\n\n\ndef test_cached_query_mv():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n def _run_query(q):\n df = assist.run_multivariate_query(q, cache=True)\n return df\n _run_query(q)\n df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)\n assert not df.empty\n\n\ndef test_nocached_query_mv():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n @unittest.mock.patch('assist.parse.client', new=list())\n def _run_query(q):\n df = assist.run_multivariate_query(q, cache=False)\n return df\n with pytest.raises(AttributeError):\n _run_query(q)\n",
"step-4": "import unittest.mock\nimport assist\nimport pytest\n\n\ndef test_simple_query():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n df = assist.run_query(q, cache=False)\n assert not df.empty\n\n\ndef test_nested_query():\n inner_q = assist.build_query(select='time, value, host, L3', from_=\n 'system_load', where='L2=\\'cpuload\\' and \"name\" != \\'Idle\\'')\n outer_q = assist.build_query(select='time, value', from_=inner_q, where\n =\"time > '2021-06-16 00:00:00' and time < '2021-06-17 00:00:00'\",\n groupby=('host', 'L3'))\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_nested_query_with_datetime():\n inner_q = assist.build_query(select='time, value', from_='system_load',\n where='L2=\\'cpuload\\' and \"name\" != \\'Idle\\'', groupby=('host', 'L3'))\n outer_q = assist.build_query(select='time, value', from_=inner_q, where\n =\n f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}'\n )\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_warning():\n inner_q = assist.build_query(select='time, value', from_='system_load',\n where=\n f'time > {assist.Datetime(year=2021, month=6, day=16)}and time < {assist.Datetime(year=2021, month=6, day=17)}and L2=\\'cpuload\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n with pytest.warns(RuntimeWarning):\n outer_q = assist.build_query(select='time, value', from_=inner_q)\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_time_grouping():\n q = assist.build_query(select='time, MAX(value)', from_='system_load',\n where=\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('time(10m)', 'host', 'L3'))\n df = assist.run_query(q, cache=False)\n assert not df.empty\n\n\ndef test_fill_values():\n q = assist.build_query(select='time, MEAN(value)', from_='system_load',\n where=\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('time(10m)', 'fill(0)', 'host', 'L3'))\n df = assist.run_query(q, cache=False)\n assert not df.empty\n\n\ndef test_cached_query():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n def _run_query(q):\n df = assist.run_query(q, cache=True)\n return df\n _run_query(q)\n df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)\n assert not df.empty\n\n\ndef test_nocached_query():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n @unittest.mock.patch('assist.parse.client', new=None)\n def _run_query(q):\n df = assist.run_query(q, cache=False)\n return df\n with pytest.raises(AttributeError):\n _run_query(q)\n\n\ndef test_cached_query_mv():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n def _run_query(q):\n df = assist.run_multivariate_query(q, cache=True)\n return df\n _run_query(q)\n df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)\n assert not df.empty\n\n\ndef test_nocached_query_mv():\n q = assist.build_query(select='time, value', from_='system_load', where\n =\n 'L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\''\n , groupby=('host', 'L3'))\n\n @unittest.mock.patch('assist.parse.client', new=list())\n def _run_query(q):\n df = assist.run_multivariate_query(q, cache=False)\n return df\n with pytest.raises(AttributeError):\n _run_query(q)\n",
"step-5": "import unittest.mock\n\nimport assist\nimport pytest\n\n\ndef test_simple_query():\n q = assist.build_query(select='time, value', from_='system_load',\n where='L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\'',\n groupby=('host', 'L3'))\n\n df = assist.run_query(q, cache=False)\n assert not df.empty\n\n\ndef test_nested_query():\n inner_q = assist.build_query(select='time, value, host, L3', from_='system_load',\n where='L2=\\'cpuload\\' and \"name\" != \\'Idle\\'',\n )\n outer_q = assist.build_query(select='time, value', from_=inner_q,\n where='time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\'',\n groupby=('host', 'L3'))\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_nested_query_with_datetime():\n inner_q = assist.build_query(select='time, value', from_='system_load',\n where='L2=\\'cpuload\\' and \"name\" != \\'Idle\\'',\n groupby=('host', 'L3'))\n outer_q = assist.build_query(select='time, value', from_=inner_q,\n where=f'time > {assist.Datetime(year=2021, month=6, day=16)}'\n f'and time < {assist.Datetime(year=2021, month=6, day=17)}',\n )\n\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_warning():\n inner_q = assist.build_query(select='time, value', from_='system_load',\n where=f'time > {assist.Datetime(year=2021, month=6, day=16)}'\n f'and time < {assist.Datetime(year=2021, month=6, day=17)}'\n 'and L2=\\'cpuload\\' and \"name\" != \\'Idle\\'',\n groupby=('host', 'L3'))\n with pytest.warns(RuntimeWarning):\n outer_q = assist.build_query(select='time, value', from_=inner_q, )\n\n df = assist.run_query(outer_q, cache=False)\n assert not df.empty\n\n\ndef test_time_grouping():\n q = assist.build_query(select='time, MAX(value)', from_='system_load',\n where='L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\'',\n groupby=('time(10m)', 'host', 'L3'))\n\n df = assist.run_query(q, cache=False)\n assert not df.empty\n\n\ndef test_fill_values():\n q = assist.build_query(select='time, MEAN(value)', from_='system_load',\n where='L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\'',\n groupby=('time(10m)', 'fill(0)', 'host', 'L3'))\n\n df = assist.run_query(q, cache=False)\n assert not df.empty\n\n\ndef test_cached_query():\n q = assist.build_query(select='time, value', from_='system_load',\n where='L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\'',\n groupby=('host', 'L3'))\n\n def _run_query(q):\n df = assist.run_query(q, cache=True)\n return df\n\n _run_query(q)\n # Invalidate the InfluxDB client, it should still work\n df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)\n\n assert not df.empty\n\ndef test_nocached_query():\n q = assist.build_query(select='time, value', from_='system_load',\n where='L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\'',\n groupby=('host', 'L3'))\n\n @unittest.mock.patch('assist.parse.client', new=None)\n def _run_query(q):\n df = assist.run_query(q, cache=False)\n return df\n\n # Invalidate the InfluxDB client, it should fail\n with pytest.raises(AttributeError):\n _run_query(q)\n\n\ndef test_cached_query_mv():\n q = assist.build_query(select='time, value', from_='system_load',\n where='L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\'',\n groupby=('host', 'L3'))\n\n def _run_query(q):\n df = assist.run_multivariate_query(q, cache=True)\n return df\n\n _run_query(q)\n # Invalidate the InfluxDB client, it should still work\n df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)\n\n assert not df.empty\n\ndef test_nocached_query_mv():\n q = assist.build_query(select='time, value', from_='system_load',\n where='L2=\\'cpuload\\' and time > \\'2021-06-16 00:00:00\\' and time < \\'2021-06-17 00:00:00\\' and \"name\" != \\'Idle\\'',\n groupby=('host', 'L3'))\n\n @unittest.mock.patch('assist.parse.client', new=list())\n def _run_query(q):\n df = assist.run_multivariate_query(q, cache=False)\n return df\n\n # Invalidate the InfluxDB client, it should fail\n with pytest.raises(AttributeError):\n _run_query(q)",
"step-ids": [
6,
8,
9,
11,
12
]
}
|
[
6,
8,
9,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .ros_publisher import *
|
flexible
|
{
"blob_id": "6e7cca4f766ca89d2e2f82a73f22742b0e8f92a8",
"index": 5870,
"step-1": "<mask token>\n",
"step-2": "from .ros_publisher import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# Print name and marks
f = open("marks.txt", "rt")
for line in f:
line = line.strip()
if len(line) == 0: # Blank line
continue
name, *marks = line.split(",")
if len(marks) == 0:
continue
marks = filter(str.isdigit, marks) # Take only numbers
total = sum(map(int, marks)) # Convert str to it and sum it
print(f"{name:15} {total:4}")
f.close()
|
normal
|
{
"blob_id": "00587de133ee68415f31649f147fbff7e9bf65d5",
"index": 3337,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n name, *marks = line.split(',')\n if len(marks) == 0:\n continue\n marks = filter(str.isdigit, marks)\n total = sum(map(int, marks))\n print(f'{name:15} {total:4}')\nf.close()\n",
"step-3": "f = open('marks.txt', 'rt')\nfor line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n name, *marks = line.split(',')\n if len(marks) == 0:\n continue\n marks = filter(str.isdigit, marks)\n total = sum(map(int, marks))\n print(f'{name:15} {total:4}')\nf.close()\n",
"step-4": "# Print name and marks\nf = open(\"marks.txt\", \"rt\")\nfor line in f:\n line = line.strip()\n if len(line) == 0: # Blank line\n continue\n\n name, *marks = line.split(\",\")\n if len(marks) == 0:\n continue\n\n marks = filter(str.isdigit, marks) # Take only numbers\n total = sum(map(int, marks)) # Convert str to it and sum it\n print(f\"{name:15} {total:4}\")\n\nf.close()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from tests import unittest
from kepler.descriptors import *
class DescriptorsTestCase(unittest.TestCase):
def testEnumDefaultsToNoopMapper(self):
class Record(object):
cat = Enum(name='cat', enums=['Lucy Cat', 'Hot Pocket'])
r = Record()
r.cat = 'Lucy Cat'
self.assertEqual(r.cat, 'Lucy Cat')
def testEnumAppliesProvidedMapper(self):
class Record(object):
cat = Enum(name='cat', enums=['LUCY CAT', 'HOT POCKET'],
mapper=lambda x: x.upper())
r = Record()
r.cat = 'Hot Pocket'
self.assertEqual(r.cat, 'HOT POCKET')
|
normal
|
{
"blob_id": "3eb40dfe68573b93c544a2279ac5c8728ae9601f",
"index": 7485,
"step-1": "<mask token>\n\n\nclass DescriptorsTestCase(unittest.TestCase):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DescriptorsTestCase(unittest.TestCase):\n\n def testEnumDefaultsToNoopMapper(self):\n\n\n class Record(object):\n cat = Enum(name='cat', enums=['Lucy Cat', 'Hot Pocket'])\n r = Record()\n r.cat = 'Lucy Cat'\n self.assertEqual(r.cat, 'Lucy Cat')\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DescriptorsTestCase(unittest.TestCase):\n\n def testEnumDefaultsToNoopMapper(self):\n\n\n class Record(object):\n cat = Enum(name='cat', enums=['Lucy Cat', 'Hot Pocket'])\n r = Record()\n r.cat = 'Lucy Cat'\n self.assertEqual(r.cat, 'Lucy Cat')\n\n def testEnumAppliesProvidedMapper(self):\n\n\n class Record(object):\n cat = Enum(name='cat', enums=['LUCY CAT', 'HOT POCKET'], mapper\n =lambda x: x.upper())\n r = Record()\n r.cat = 'Hot Pocket'\n self.assertEqual(r.cat, 'HOT POCKET')\n",
"step-4": "from __future__ import absolute_import\nfrom tests import unittest\nfrom kepler.descriptors import *\n\n\nclass DescriptorsTestCase(unittest.TestCase):\n\n def testEnumDefaultsToNoopMapper(self):\n\n\n class Record(object):\n cat = Enum(name='cat', enums=['Lucy Cat', 'Hot Pocket'])\n r = Record()\n r.cat = 'Lucy Cat'\n self.assertEqual(r.cat, 'Lucy Cat')\n\n def testEnumAppliesProvidedMapper(self):\n\n\n class Record(object):\n cat = Enum(name='cat', enums=['LUCY CAT', 'HOT POCKET'], mapper\n =lambda x: x.upper())\n r = Record()\n r.cat = 'Hot Pocket'\n self.assertEqual(r.cat, 'HOT POCKET')\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom tests import unittest\nfrom kepler.descriptors import *\n\nclass DescriptorsTestCase(unittest.TestCase):\n def testEnumDefaultsToNoopMapper(self):\n class Record(object):\n cat = Enum(name='cat', enums=['Lucy Cat', 'Hot Pocket'])\n\n r = Record()\n r.cat = 'Lucy Cat'\n self.assertEqual(r.cat, 'Lucy Cat')\n\n def testEnumAppliesProvidedMapper(self):\n class Record(object):\n cat = Enum(name='cat', enums=['LUCY CAT', 'HOT POCKET'],\n mapper=lambda x: x.upper())\n\n r = Record()\n r.cat = 'Hot Pocket'\n self.assertEqual(r.cat, 'HOT POCKET')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class FFTPricing:
def __init__(self, option: Option, riskFreeRate, volatility,
samplePoints, bandwidth, dampingFactor, underlyingModel='GBM'):
self.__option = option
self.__r = riskFreeRate
self.__sigma = volatility
self.__N = samplePoints
self.__B = bandwidth
self.__alpha = dampingFactor
self.__model = underlyingModel
<|reserved_special_token_0|>
def __fourierTransform(self, omega):
alpha = self.__alpha
r = self.__r
T = self.__option.timeToExpiry
q_hat = self.__charactersticFunc(omega)
num = np.exp(-r * T) * q_hat
den = (alpha - 1.0j * omega) * (alpha - 1.0j * omega + 1)
return num / den
def optionPrice(self):
if not self.__option.expiryType == 'European':
print('Not a European Option')
return 0.0
K = self.__option.strikePrice
N = self.__N
B = self.__B
alpha = self.__alpha
h = B / (N - 1)
omega = np.arange(0, N) * h
dk = 2 * np.pi / (h * N)
k = np.log(20) + np.arange(0, N) * dk
dw = np.zeros(N)
dw[0] = h / 2
dw[1:] = h
V = np.zeros(N)
for n in range(N):
nu_hat = self.__fourierTransform(omega)
inner_sum = np.sum(np.exp(1.0j * omega * k[n]) * nu_hat * dw)
V[n] = (np.exp(-alpha * k[n]) / np.pi * inner_sum).real
val = interp1d(k, V)
return float('{0:.2f}'.format(val(np.log(K))))
def __repr__(self):
return 'FFTPricing({}, {}, {}, {}, {}, {})'.format(self.__option,
self.__r, self.__sigma, self.__N, self.__B, self.__alpha)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FFTPricing:
def __init__(self, option: Option, riskFreeRate, volatility,
samplePoints, bandwidth, dampingFactor, underlyingModel='GBM'):
self.__option = option
self.__r = riskFreeRate
self.__sigma = volatility
self.__N = samplePoints
self.__B = bandwidth
self.__alpha = dampingFactor
self.__model = underlyingModel
def __charactersticFunc(self, omega):
S0 = self.__option.underlyingPrice
r = self.__r
T = self.__option.timeToExpiry
sigma = self.__sigma
alpha = self.__alpha
if self.__model == 'GBM':
x0 = np.log(S0)
mu = x0 + (r - sigma ** 2 / 2) * T
sig = sigma ** 2 * T / 2
omega_prime = omega + 1.0j * (alpha + 1)
return np.exp(-1.0j * mu * omega_prime - sig * omega_prime ** 2)
elif self.__model == 'VG':
pass
def __fourierTransform(self, omega):
alpha = self.__alpha
r = self.__r
T = self.__option.timeToExpiry
q_hat = self.__charactersticFunc(omega)
num = np.exp(-r * T) * q_hat
den = (alpha - 1.0j * omega) * (alpha - 1.0j * omega + 1)
return num / den
def optionPrice(self):
if not self.__option.expiryType == 'European':
print('Not a European Option')
return 0.0
K = self.__option.strikePrice
N = self.__N
B = self.__B
alpha = self.__alpha
h = B / (N - 1)
omega = np.arange(0, N) * h
dk = 2 * np.pi / (h * N)
k = np.log(20) + np.arange(0, N) * dk
dw = np.zeros(N)
dw[0] = h / 2
dw[1:] = h
V = np.zeros(N)
for n in range(N):
nu_hat = self.__fourierTransform(omega)
inner_sum = np.sum(np.exp(1.0j * omega * k[n]) * nu_hat * dw)
V[n] = (np.exp(-alpha * k[n]) / np.pi * inner_sum).real
val = interp1d(k, V)
return float('{0:.2f}'.format(val(np.log(K))))
def __repr__(self):
return 'FFTPricing({}, {}, {}, {}, {}, {})'.format(self.__option,
self.__r, self.__sigma, self.__N, self.__B, self.__alpha)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FFTPricing:
def __init__(self, option: Option, riskFreeRate, volatility,
samplePoints, bandwidth, dampingFactor, underlyingModel='GBM'):
self.__option = option
self.__r = riskFreeRate
self.__sigma = volatility
self.__N = samplePoints
self.__B = bandwidth
self.__alpha = dampingFactor
self.__model = underlyingModel
def __charactersticFunc(self, omega):
S0 = self.__option.underlyingPrice
r = self.__r
T = self.__option.timeToExpiry
sigma = self.__sigma
alpha = self.__alpha
if self.__model == 'GBM':
x0 = np.log(S0)
mu = x0 + (r - sigma ** 2 / 2) * T
sig = sigma ** 2 * T / 2
omega_prime = omega + 1.0j * (alpha + 1)
return np.exp(-1.0j * mu * omega_prime - sig * omega_prime ** 2)
elif self.__model == 'VG':
pass
def __fourierTransform(self, omega):
alpha = self.__alpha
r = self.__r
T = self.__option.timeToExpiry
q_hat = self.__charactersticFunc(omega)
num = np.exp(-r * T) * q_hat
den = (alpha - 1.0j * omega) * (alpha - 1.0j * omega + 1)
return num / den
def optionPrice(self):
if not self.__option.expiryType == 'European':
print('Not a European Option')
return 0.0
K = self.__option.strikePrice
N = self.__N
B = self.__B
alpha = self.__alpha
h = B / (N - 1)
omega = np.arange(0, N) * h
dk = 2 * np.pi / (h * N)
k = np.log(20) + np.arange(0, N) * dk
dw = np.zeros(N)
dw[0] = h / 2
dw[1:] = h
V = np.zeros(N)
for n in range(N):
nu_hat = self.__fourierTransform(omega)
inner_sum = np.sum(np.exp(1.0j * omega * k[n]) * nu_hat * dw)
V[n] = (np.exp(-alpha * k[n]) / np.pi * inner_sum).real
val = interp1d(k, V)
return float('{0:.2f}'.format(val(np.log(K))))
def __repr__(self):
return 'FFTPricing({}, {}, {}, {}, {}, {})'.format(self.__option,
self.__r, self.__sigma, self.__N, self.__B, self.__alpha)
if __name__ == '__main__':
from option import European
S0 = 100
K = 110
r = 0.1
T = 1
volatility = 0.25
N = 2 ** 10
B = 50
alpha = 10.0
print(
'------------------------------------------------------------------' +
'----------------------------')
option = European(S0, K, T, 'Call')
fftPricing = FFTPricing(option, r, volatility, N, B, alpha)
print(fftPricing)
print('FFT price for Call:', fftPricing.optionPrice())
print(
'------------------------------------------------------------------' +
'----------------------------')
option = European(S0, K, T, 'Put')
fftPricing = FFTPricing(option, r, volatility, N, B, -alpha)
print(fftPricing)
print('FFT price for Put:', fftPricing.optionPrice())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
from scipy.interpolate import interp1d
from option import Option
class FFTPricing:
def __init__(self, option: Option, riskFreeRate, volatility,
samplePoints, bandwidth, dampingFactor, underlyingModel='GBM'):
self.__option = option
self.__r = riskFreeRate
self.__sigma = volatility
self.__N = samplePoints
self.__B = bandwidth
self.__alpha = dampingFactor
self.__model = underlyingModel
def __charactersticFunc(self, omega):
S0 = self.__option.underlyingPrice
r = self.__r
T = self.__option.timeToExpiry
sigma = self.__sigma
alpha = self.__alpha
if self.__model == 'GBM':
x0 = np.log(S0)
mu = x0 + (r - sigma ** 2 / 2) * T
sig = sigma ** 2 * T / 2
omega_prime = omega + 1.0j * (alpha + 1)
return np.exp(-1.0j * mu * omega_prime - sig * omega_prime ** 2)
elif self.__model == 'VG':
pass
def __fourierTransform(self, omega):
alpha = self.__alpha
r = self.__r
T = self.__option.timeToExpiry
q_hat = self.__charactersticFunc(omega)
num = np.exp(-r * T) * q_hat
den = (alpha - 1.0j * omega) * (alpha - 1.0j * omega + 1)
return num / den
def optionPrice(self):
if not self.__option.expiryType == 'European':
print('Not a European Option')
return 0.0
K = self.__option.strikePrice
N = self.__N
B = self.__B
alpha = self.__alpha
h = B / (N - 1)
omega = np.arange(0, N) * h
dk = 2 * np.pi / (h * N)
k = np.log(20) + np.arange(0, N) * dk
dw = np.zeros(N)
dw[0] = h / 2
dw[1:] = h
V = np.zeros(N)
for n in range(N):
nu_hat = self.__fourierTransform(omega)
inner_sum = np.sum(np.exp(1.0j * omega * k[n]) * nu_hat * dw)
V[n] = (np.exp(-alpha * k[n]) / np.pi * inner_sum).real
val = interp1d(k, V)
return float('{0:.2f}'.format(val(np.log(K))))
def __repr__(self):
return 'FFTPricing({}, {}, {}, {}, {}, {})'.format(self.__option,
self.__r, self.__sigma, self.__N, self.__B, self.__alpha)
if __name__ == '__main__':
from option import European
S0 = 100
K = 110
r = 0.1
T = 1
volatility = 0.25
N = 2 ** 10
B = 50
alpha = 10.0
print(
'------------------------------------------------------------------' +
'----------------------------')
option = European(S0, K, T, 'Call')
fftPricing = FFTPricing(option, r, volatility, N, B, alpha)
print(fftPricing)
print('FFT price for Call:', fftPricing.optionPrice())
print(
'------------------------------------------------------------------' +
'----------------------------')
option = European(S0, K, T, 'Put')
fftPricing = FFTPricing(option, r, volatility, N, B, -alpha)
print(fftPricing)
print('FFT price for Put:', fftPricing.optionPrice())
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 14 01:32:26 2019
@author: himanshu
"""
import numpy as np
from scipy.interpolate import interp1d
from option import Option
class FFTPricing:
def __init__(self,
option : Option,
riskFreeRate,
volatility,
samplePoints,
bandwidth,
dampingFactor,
underlyingModel = 'GBM'):
self.__option = option
self.__r = riskFreeRate
self.__sigma = volatility
self.__N = samplePoints
self.__B = bandwidth
self.__alpha = dampingFactor
self.__model = underlyingModel
# Computes the characterstic function of a GBM.
def __charactersticFunc(self, omega):
S0 = self.__option.underlyingPrice
r = self.__r
T = self.__option.timeToExpiry
sigma = self.__sigma
alpha = self.__alpha
if self.__model == 'GBM':
x0 = np.log(S0)
mu = x0 + ((r - (sigma**2)/2)*(T))
sig = (sigma**2)*(T)/2
omega_prime = omega + 1j*(alpha+1)
return np.exp(-1j*mu*omega_prime - sig*(omega_prime**2))
elif self.__model == 'VG':
pass
# Computes the Fourier Transform of a GBM.
def __fourierTransform(self, omega):
alpha = self.__alpha
r = self.__r
T = self.__option.timeToExpiry
q_hat = self.__charactersticFunc(omega)
num = np.exp(-r*(T))*q_hat
den = (alpha - 1j*omega)*(alpha - (1j*omega) + 1)
return num/den
def optionPrice(self):
if not self.__option.expiryType == 'European':
print('Not a European Option')
return 0.0
K = self.__option.strikePrice
N = self.__N
B = self.__B
alpha = self.__alpha
h = B/(N-1)
omega = np.arange(0,N)*h
dk = 2*np.pi/(h*N)
k = np.log(20) + np.arange(0,N)*dk
dw = np.zeros(N)
dw[0] = h/2
dw[1:] = h
# FFT Algorithm
V = np.zeros(N)
for n in range(N):
nu_hat = self.__fourierTransform(omega)
inner_sum = np.sum(np.exp(1j*omega*k[n])*nu_hat*dw)
V[n] = ((np.exp(-alpha*k[n])/np.pi)*inner_sum).real
val = interp1d(k, V)
return float('{0:.2f}'.format(val(np.log(K))))
def __repr__(self):
return "FFTPricing({}, {}, {}, {}, {}, {})"\
.format(self.__option,
self.__r,
self.__sigma,
self.__N,
self.__B,
self.__alpha)
if __name__ == "__main__":
from option import European
S0 = 100
K = 110
r = 0.10
T = 1
volatility = 0.25
N = 2**10
B = 50
alpha = 10.0
print('------------------------------------------------------------------'
+'----------------------------')
option = European(S0, K, T, 'Call')
fftPricing = FFTPricing(option, r, volatility, N, B, alpha)
print(fftPricing)
print('FFT price for Call:', fftPricing.optionPrice())
print('------------------------------------------------------------------'
+'----------------------------')
option = European(S0, K, T, 'Put')
fftPricing = FFTPricing(option, r, volatility, N, B, -alpha)
print(fftPricing)
print('FFT price for Put:', fftPricing.optionPrice())
|
flexible
|
{
"blob_id": "25987c15c28e3939f9f531dbc1d4bd9bf622b5a9",
"index": 5691,
"step-1": "<mask token>\n\n\nclass FFTPricing:\n\n def __init__(self, option: Option, riskFreeRate, volatility,\n samplePoints, bandwidth, dampingFactor, underlyingModel='GBM'):\n self.__option = option\n self.__r = riskFreeRate\n self.__sigma = volatility\n self.__N = samplePoints\n self.__B = bandwidth\n self.__alpha = dampingFactor\n self.__model = underlyingModel\n <mask token>\n\n def __fourierTransform(self, omega):\n alpha = self.__alpha\n r = self.__r\n T = self.__option.timeToExpiry\n q_hat = self.__charactersticFunc(omega)\n num = np.exp(-r * T) * q_hat\n den = (alpha - 1.0j * omega) * (alpha - 1.0j * omega + 1)\n return num / den\n\n def optionPrice(self):\n if not self.__option.expiryType == 'European':\n print('Not a European Option')\n return 0.0\n K = self.__option.strikePrice\n N = self.__N\n B = self.__B\n alpha = self.__alpha\n h = B / (N - 1)\n omega = np.arange(0, N) * h\n dk = 2 * np.pi / (h * N)\n k = np.log(20) + np.arange(0, N) * dk\n dw = np.zeros(N)\n dw[0] = h / 2\n dw[1:] = h\n V = np.zeros(N)\n for n in range(N):\n nu_hat = self.__fourierTransform(omega)\n inner_sum = np.sum(np.exp(1.0j * omega * k[n]) * nu_hat * dw)\n V[n] = (np.exp(-alpha * k[n]) / np.pi * inner_sum).real\n val = interp1d(k, V)\n return float('{0:.2f}'.format(val(np.log(K))))\n\n def __repr__(self):\n return 'FFTPricing({}, {}, {}, {}, {}, {})'.format(self.__option,\n self.__r, self.__sigma, self.__N, self.__B, self.__alpha)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FFTPricing:\n\n def __init__(self, option: Option, riskFreeRate, volatility,\n samplePoints, bandwidth, dampingFactor, underlyingModel='GBM'):\n self.__option = option\n self.__r = riskFreeRate\n self.__sigma = volatility\n self.__N = samplePoints\n self.__B = bandwidth\n self.__alpha = dampingFactor\n self.__model = underlyingModel\n\n def __charactersticFunc(self, omega):\n S0 = self.__option.underlyingPrice\n r = self.__r\n T = self.__option.timeToExpiry\n sigma = self.__sigma\n alpha = self.__alpha\n if self.__model == 'GBM':\n x0 = np.log(S0)\n mu = x0 + (r - sigma ** 2 / 2) * T\n sig = sigma ** 2 * T / 2\n omega_prime = omega + 1.0j * (alpha + 1)\n return np.exp(-1.0j * mu * omega_prime - sig * omega_prime ** 2)\n elif self.__model == 'VG':\n pass\n\n def __fourierTransform(self, omega):\n alpha = self.__alpha\n r = self.__r\n T = self.__option.timeToExpiry\n q_hat = self.__charactersticFunc(omega)\n num = np.exp(-r * T) * q_hat\n den = (alpha - 1.0j * omega) * (alpha - 1.0j * omega + 1)\n return num / den\n\n def optionPrice(self):\n if not self.__option.expiryType == 'European':\n print('Not a European Option')\n return 0.0\n K = self.__option.strikePrice\n N = self.__N\n B = self.__B\n alpha = self.__alpha\n h = B / (N - 1)\n omega = np.arange(0, N) * h\n dk = 2 * np.pi / (h * N)\n k = np.log(20) + np.arange(0, N) * dk\n dw = np.zeros(N)\n dw[0] = h / 2\n dw[1:] = h\n V = np.zeros(N)\n for n in range(N):\n nu_hat = self.__fourierTransform(omega)\n inner_sum = np.sum(np.exp(1.0j * omega * k[n]) * nu_hat * dw)\n V[n] = (np.exp(-alpha * k[n]) / np.pi * inner_sum).real\n val = interp1d(k, V)\n return float('{0:.2f}'.format(val(np.log(K))))\n\n def __repr__(self):\n return 'FFTPricing({}, {}, {}, {}, {}, {})'.format(self.__option,\n self.__r, self.__sigma, self.__N, self.__B, self.__alpha)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FFTPricing:\n\n def __init__(self, option: Option, riskFreeRate, volatility,\n samplePoints, bandwidth, dampingFactor, underlyingModel='GBM'):\n self.__option = option\n self.__r = riskFreeRate\n self.__sigma = volatility\n self.__N = samplePoints\n self.__B = bandwidth\n self.__alpha = dampingFactor\n self.__model = underlyingModel\n\n def __charactersticFunc(self, omega):\n S0 = self.__option.underlyingPrice\n r = self.__r\n T = self.__option.timeToExpiry\n sigma = self.__sigma\n alpha = self.__alpha\n if self.__model == 'GBM':\n x0 = np.log(S0)\n mu = x0 + (r - sigma ** 2 / 2) * T\n sig = sigma ** 2 * T / 2\n omega_prime = omega + 1.0j * (alpha + 1)\n return np.exp(-1.0j * mu * omega_prime - sig * omega_prime ** 2)\n elif self.__model == 'VG':\n pass\n\n def __fourierTransform(self, omega):\n alpha = self.__alpha\n r = self.__r\n T = self.__option.timeToExpiry\n q_hat = self.__charactersticFunc(omega)\n num = np.exp(-r * T) * q_hat\n den = (alpha - 1.0j * omega) * (alpha - 1.0j * omega + 1)\n return num / den\n\n def optionPrice(self):\n if not self.__option.expiryType == 'European':\n print('Not a European Option')\n return 0.0\n K = self.__option.strikePrice\n N = self.__N\n B = self.__B\n alpha = self.__alpha\n h = B / (N - 1)\n omega = np.arange(0, N) * h\n dk = 2 * np.pi / (h * N)\n k = np.log(20) + np.arange(0, N) * dk\n dw = np.zeros(N)\n dw[0] = h / 2\n dw[1:] = h\n V = np.zeros(N)\n for n in range(N):\n nu_hat = self.__fourierTransform(omega)\n inner_sum = np.sum(np.exp(1.0j * omega * k[n]) * nu_hat * dw)\n V[n] = (np.exp(-alpha * k[n]) / np.pi * inner_sum).real\n val = interp1d(k, V)\n return float('{0:.2f}'.format(val(np.log(K))))\n\n def __repr__(self):\n return 'FFTPricing({}, {}, {}, {}, {}, {})'.format(self.__option,\n self.__r, self.__sigma, self.__N, self.__B, self.__alpha)\n\n\nif __name__ == '__main__':\n from option import European\n S0 = 100\n K = 110\n r = 0.1\n T = 1\n volatility = 0.25\n N = 2 ** 10\n B = 50\n alpha = 10.0\n print(\n '------------------------------------------------------------------' +\n '----------------------------')\n option = European(S0, K, T, 'Call')\n fftPricing = FFTPricing(option, r, volatility, N, B, alpha)\n print(fftPricing)\n print('FFT price for Call:', fftPricing.optionPrice())\n print(\n '------------------------------------------------------------------' +\n '----------------------------')\n option = European(S0, K, T, 'Put')\n fftPricing = FFTPricing(option, r, volatility, N, B, -alpha)\n print(fftPricing)\n print('FFT price for Put:', fftPricing.optionPrice())\n",
"step-4": "<mask token>\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom option import Option\n\n\nclass FFTPricing:\n\n def __init__(self, option: Option, riskFreeRate, volatility,\n samplePoints, bandwidth, dampingFactor, underlyingModel='GBM'):\n self.__option = option\n self.__r = riskFreeRate\n self.__sigma = volatility\n self.__N = samplePoints\n self.__B = bandwidth\n self.__alpha = dampingFactor\n self.__model = underlyingModel\n\n def __charactersticFunc(self, omega):\n S0 = self.__option.underlyingPrice\n r = self.__r\n T = self.__option.timeToExpiry\n sigma = self.__sigma\n alpha = self.__alpha\n if self.__model == 'GBM':\n x0 = np.log(S0)\n mu = x0 + (r - sigma ** 2 / 2) * T\n sig = sigma ** 2 * T / 2\n omega_prime = omega + 1.0j * (alpha + 1)\n return np.exp(-1.0j * mu * omega_prime - sig * omega_prime ** 2)\n elif self.__model == 'VG':\n pass\n\n def __fourierTransform(self, omega):\n alpha = self.__alpha\n r = self.__r\n T = self.__option.timeToExpiry\n q_hat = self.__charactersticFunc(omega)\n num = np.exp(-r * T) * q_hat\n den = (alpha - 1.0j * omega) * (alpha - 1.0j * omega + 1)\n return num / den\n\n def optionPrice(self):\n if not self.__option.expiryType == 'European':\n print('Not a European Option')\n return 0.0\n K = self.__option.strikePrice\n N = self.__N\n B = self.__B\n alpha = self.__alpha\n h = B / (N - 1)\n omega = np.arange(0, N) * h\n dk = 2 * np.pi / (h * N)\n k = np.log(20) + np.arange(0, N) * dk\n dw = np.zeros(N)\n dw[0] = h / 2\n dw[1:] = h\n V = np.zeros(N)\n for n in range(N):\n nu_hat = self.__fourierTransform(omega)\n inner_sum = np.sum(np.exp(1.0j * omega * k[n]) * nu_hat * dw)\n V[n] = (np.exp(-alpha * k[n]) / np.pi * inner_sum).real\n val = interp1d(k, V)\n return float('{0:.2f}'.format(val(np.log(K))))\n\n def __repr__(self):\n return 'FFTPricing({}, {}, {}, {}, {}, {})'.format(self.__option,\n self.__r, self.__sigma, self.__N, self.__B, self.__alpha)\n\n\nif __name__ == '__main__':\n from option import European\n S0 = 100\n K = 110\n r = 0.1\n T = 1\n volatility = 0.25\n N = 2 ** 10\n B = 50\n alpha = 10.0\n print(\n '------------------------------------------------------------------' +\n '----------------------------')\n option = European(S0, K, T, 'Call')\n fftPricing = FFTPricing(option, r, volatility, N, B, alpha)\n print(fftPricing)\n print('FFT price for Call:', fftPricing.optionPrice())\n print(\n '------------------------------------------------------------------' +\n '----------------------------')\n option = European(S0, K, T, 'Put')\n fftPricing = FFTPricing(option, r, volatility, N, B, -alpha)\n print(fftPricing)\n print('FFT price for Put:', fftPricing.optionPrice())\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 14 01:32:26 2019\n\n@author: himanshu\n\"\"\"\n\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom option import Option\n\nclass FFTPricing:\n \n def __init__(self,\n option : Option,\n riskFreeRate,\n volatility,\n samplePoints,\n bandwidth,\n dampingFactor,\n underlyingModel = 'GBM'):\n \n self.__option = option\n self.__r = riskFreeRate\n self.__sigma = volatility\n self.__N = samplePoints\n self.__B = bandwidth\n self.__alpha = dampingFactor\n self.__model = underlyingModel\n \n \n # Computes the characterstic function of a GBM.\n def __charactersticFunc(self, omega):\n S0 = self.__option.underlyingPrice\n r = self.__r\n T = self.__option.timeToExpiry\n sigma = self.__sigma\n alpha = self.__alpha\n \n if self.__model == 'GBM':\n x0 = np.log(S0)\n mu = x0 + ((r - (sigma**2)/2)*(T))\n sig = (sigma**2)*(T)/2\n omega_prime = omega + 1j*(alpha+1)\n return np.exp(-1j*mu*omega_prime - sig*(omega_prime**2))\n elif self.__model == 'VG':\n pass\n \n # Computes the Fourier Transform of a GBM.\n def __fourierTransform(self, omega):\n alpha = self.__alpha\n r = self.__r\n T = self.__option.timeToExpiry\n \n q_hat = self.__charactersticFunc(omega)\n num = np.exp(-r*(T))*q_hat\n den = (alpha - 1j*omega)*(alpha - (1j*omega) + 1)\n return num/den\n \n def optionPrice(self):\n if not self.__option.expiryType == 'European':\n print('Not a European Option')\n return 0.0\n \n K = self.__option.strikePrice\n \n N = self.__N\n B = self.__B\n alpha = self.__alpha\n \n h = B/(N-1)\n omega = np.arange(0,N)*h\n \n dk = 2*np.pi/(h*N)\n k = np.log(20) + np.arange(0,N)*dk\n \n dw = np.zeros(N)\n dw[0] = h/2\n dw[1:] = h\n \n # FFT Algorithm\n V = np.zeros(N)\n for n in range(N):\n nu_hat = self.__fourierTransform(omega)\n inner_sum = np.sum(np.exp(1j*omega*k[n])*nu_hat*dw)\n V[n] = ((np.exp(-alpha*k[n])/np.pi)*inner_sum).real\n \n val = interp1d(k, V)\n return float('{0:.2f}'.format(val(np.log(K))))\n \n def __repr__(self):\n \n return \"FFTPricing({}, {}, {}, {}, {}, {})\"\\\n .format(self.__option,\n self.__r,\n self.__sigma,\n self.__N,\n self.__B,\n self.__alpha)\n \nif __name__ == \"__main__\":\n from option import European\n S0 = 100\n K = 110\n r = 0.10\n T = 1\n volatility = 0.25\n \n N = 2**10\n B = 50\n alpha = 10.0\n \n print('------------------------------------------------------------------'\n +'----------------------------')\n option = European(S0, K, T, 'Call')\n fftPricing = FFTPricing(option, r, volatility, N, B, alpha)\n print(fftPricing)\n print('FFT price for Call:', fftPricing.optionPrice())\n \n print('------------------------------------------------------------------'\n +'----------------------------')\n option = European(S0, K, T, 'Put')\n fftPricing = FFTPricing(option, r, volatility, N, B, -alpha)\n print(fftPricing)\n print('FFT price for Put:', fftPricing.optionPrice())\n ",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class MemberTests(CustomAPITestCase):
def setUp(self):
"""
Make a user for authenticating and
testing community actions
"""
owner = self.user_model.objects.create(password=make_password(
'user1'), email='user1@test.com', first_name='1', last_name=
'User', is_active=True)
moderator = self.user_model.objects.create(password=make_password(
'user2'), email='user2@test.com', first_name='2', last_name=
'User', is_active=True)
member = self.user_model.objects.create(password=make_password(
'user3'), email='user3@test.com', first_name='3', last_name=
'User', is_active=True)
other = self.user_model.objects.create(password=make_password(
'user4'), email='user4@test.com', first_name='4', last_name=
'User', is_active=True)
Profile.objects.create(user=owner)
Profile.objects.create(user=moderator)
Profile.objects.create(user=member)
Profile.objects.create(user=other)
lcom1 = LocalCommunity.objects.create(name='lcom1', description=
'descl1', city='Paris', country='FR', gps_x=0, gps_y=0)
lcom2 = LocalCommunity.objects.create(name='lcom2', description=
'descl2', city='Paris', country='FR', gps_x=0, gps_y=0,
auto_accept_member=True)
lcom3 = LocalCommunity.objects.create(name='lcom3', description=
'descl3', city='Paris', country='FR', gps_x=0, gps_y=0)
lcom4 = LocalCommunity.objects.create(name='lcom4', description=
'descl4', city='Paris', country='FR', gps_x=0, gps_y=0,
auto_accept_member=True)
lcom5 = LocalCommunity.objects.create(name='lcom5', description=
'descl5', city='Paris', country='FR', gps_x=0, gps_y=0)
tcom1 = TransportCommunity.objects.create(name='tcom1', description
='desct1', departure='dep1', arrival='arr1', auto_accept_member
=True)
tcom2 = TransportCommunity.objects.create(name='tcom2', description
='desct2', departure='dep2', arrival='arr2')
tcom3 = TransportCommunity.objects.create(name='tcom3', description
='desct3', departure='dep3', arrival='arr3')
tcom4 = TransportCommunity.objects.create(name='tcom4', description
='desct4', departure='dep4', arrival='arr4')
tcom5 = TransportCommunity.objects.create(name='tcom5', description
='desct5', departure='dep4', arrival='arr5')
own_mbr = Member.objects.create(user=owner, community=lcom1, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom2, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom3, role=
'0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom3,
role='1', status='0')
spl_mbr = Member.objects.create(user=member, community=lcom3, role=
'2', status='0')
own_mbr = Member.objects.create(user=owner, community=lcom4, role=
'0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom4,
role='1', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom4, role=
'2', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom5, role=
'0', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom5, role=
'2', status='2')
own_mbr = Member.objects.create(user=owner, community=tcom1, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom2, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom3, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom4, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom5, role=
'0', status='1')
def test_setup(self):
self.assertEqual(4, self.user_model.objects.all().count())
self.assertEqual(10, Community.objects.all().count())
self.assertEqual(15, Member.objects.all().count())
<|reserved_special_token_0|>
def test_join_community_not_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/1/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=1)
self.assertEqual(community, member.community)
self.assertEqual('2', member.role)
self.assertEqual('0', member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')
self.assertTrue('demande à faire' in mail.outbox[0].body)
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(16, Member.objects.all().count())
def test_join_community_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/2/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=2)
self.assertEqual(community, member.community)
self.assertEqual('2', member.role)
self.assertEqual('1', member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')
self.assertTrue('fait désormais' in mail.outbox[0].body)
def test_leave_community(self):
"""
Ensure a member can leave a community
"""
url = '/api/v1/communities/3/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)
self.assertEqual(14, Member.objects.all().count())
def test_leave_community_banned(self):
"""
Ensure a banned member cannot leave a community
"""
url = '/api/v1/communities/5/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)
self.assertEqual(15, Member.objects.all().count())
<|reserved_special_token_0|>
def test_list_my_memberships_member(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual(5, data['results'][2]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('2', data['results'][2]['status'])
self.assertEqual('2', data['results'][0]['role'])
self.assertEqual('2', data['results'][1]['role'])
self.assertEqual('2', data['results'][2]['role'])
def test_list_my_memberships_moderator(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(2, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('1', data['results'][0]['role'])
self.assertEqual('1', data['results'][1]['role'])
<|reserved_special_token_0|>
def test_list_members_without_auth(self):
"""
Ensure non authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url)
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_without_member_rights(self):
"""
Ensure a non-member authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_list_members_with_mod_rights(self):
"""
Ensure a moderator can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(6, data['results'][0]['id'])
self.assertEqual(1, data['results'][0]['user']['id'])
self.assertEqual('0', data['results'][0]['role'])
self.assertEqual('1', data['results'][0]['status'])
self.assertEqual(7, data['results'][1]['id'])
self.assertEqual(2, data['results'][1]['user']['id'])
self.assertEqual('1', data['results'][1]['role'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual(8, data['results'][2]['id'])
self.assertEqual(3, data['results'][2]['user']['id'])
self.assertEqual('2', data['results'][2]['role'])
self.assertEqual('1', data['results'][2]['status'])
def test_list_members_with_owner_rights(self):
"""
Ensure an owner can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
def test_accept_member_without_auth(self):
"""
Ensure a non authenticated user can not accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
<|reserved_special_token_0|>
def test_accept_member_with_owner(self):
"""
Ensure an owner can accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
<|reserved_special_token_0|>
def test_accept_member_with_owner_not_found(self):
"""
Ensure member exists
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 19}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
<|reserved_special_token_0|>
def test_accept_member_with_moderator(self):
"""
Ensure an moderator can accept members
"""
mod = Member.objects.get(id=4)
mod.status = '1'
mod.save()
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user2'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
def test_ban_member_without_auth(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 8}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_member_with_non_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_ban_member_with_owner(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('2', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership cancelled')
<|reserved_special_token_0|>
def test_promote_user_without_auth(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
<|reserved_special_token_0|>
def test_promote_user_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_owner(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('1', data['role'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MemberTests(CustomAPITestCase):
def setUp(self):
"""
Make a user for authenticating and
testing community actions
"""
owner = self.user_model.objects.create(password=make_password(
'user1'), email='user1@test.com', first_name='1', last_name=
'User', is_active=True)
moderator = self.user_model.objects.create(password=make_password(
'user2'), email='user2@test.com', first_name='2', last_name=
'User', is_active=True)
member = self.user_model.objects.create(password=make_password(
'user3'), email='user3@test.com', first_name='3', last_name=
'User', is_active=True)
other = self.user_model.objects.create(password=make_password(
'user4'), email='user4@test.com', first_name='4', last_name=
'User', is_active=True)
Profile.objects.create(user=owner)
Profile.objects.create(user=moderator)
Profile.objects.create(user=member)
Profile.objects.create(user=other)
lcom1 = LocalCommunity.objects.create(name='lcom1', description=
'descl1', city='Paris', country='FR', gps_x=0, gps_y=0)
lcom2 = LocalCommunity.objects.create(name='lcom2', description=
'descl2', city='Paris', country='FR', gps_x=0, gps_y=0,
auto_accept_member=True)
lcom3 = LocalCommunity.objects.create(name='lcom3', description=
'descl3', city='Paris', country='FR', gps_x=0, gps_y=0)
lcom4 = LocalCommunity.objects.create(name='lcom4', description=
'descl4', city='Paris', country='FR', gps_x=0, gps_y=0,
auto_accept_member=True)
lcom5 = LocalCommunity.objects.create(name='lcom5', description=
'descl5', city='Paris', country='FR', gps_x=0, gps_y=0)
tcom1 = TransportCommunity.objects.create(name='tcom1', description
='desct1', departure='dep1', arrival='arr1', auto_accept_member
=True)
tcom2 = TransportCommunity.objects.create(name='tcom2', description
='desct2', departure='dep2', arrival='arr2')
tcom3 = TransportCommunity.objects.create(name='tcom3', description
='desct3', departure='dep3', arrival='arr3')
tcom4 = TransportCommunity.objects.create(name='tcom4', description
='desct4', departure='dep4', arrival='arr4')
tcom5 = TransportCommunity.objects.create(name='tcom5', description
='desct5', departure='dep4', arrival='arr5')
own_mbr = Member.objects.create(user=owner, community=lcom1, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom2, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom3, role=
'0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom3,
role='1', status='0')
spl_mbr = Member.objects.create(user=member, community=lcom3, role=
'2', status='0')
own_mbr = Member.objects.create(user=owner, community=lcom4, role=
'0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom4,
role='1', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom4, role=
'2', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom5, role=
'0', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom5, role=
'2', status='2')
own_mbr = Member.objects.create(user=owner, community=tcom1, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom2, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom3, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom4, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom5, role=
'0', status='1')
def test_setup(self):
self.assertEqual(4, self.user_model.objects.all().count())
self.assertEqual(10, Community.objects.all().count())
self.assertEqual(15, Member.objects.all().count())
def test_join_wrong_community(self):
"""
Ensure an authenticated user cannot join a community that does not exists
"""
url = '/api/v1/communities/15/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(15, Member.objects.all().count())
def test_join_community_not_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/1/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=1)
self.assertEqual(community, member.community)
self.assertEqual('2', member.role)
self.assertEqual('0', member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')
self.assertTrue('demande à faire' in mail.outbox[0].body)
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(16, Member.objects.all().count())
def test_join_community_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/2/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=2)
self.assertEqual(community, member.community)
self.assertEqual('2', member.role)
self.assertEqual('1', member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')
self.assertTrue('fait désormais' in mail.outbox[0].body)
def test_leave_community(self):
"""
Ensure a member can leave a community
"""
url = '/api/v1/communities/3/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)
self.assertEqual(14, Member.objects.all().count())
def test_leave_community_banned(self):
"""
Ensure a banned member cannot leave a community
"""
url = '/api/v1/communities/5/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)
self.assertEqual(15, Member.objects.all().count())
def test_list_my_memberships_without_auth(self):
"""
Ensure an unauthenticated user cannot list memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url)
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_my_memberships_member(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual(5, data['results'][2]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('2', data['results'][2]['status'])
self.assertEqual('2', data['results'][0]['role'])
self.assertEqual('2', data['results'][1]['role'])
self.assertEqual('2', data['results'][2]['role'])
def test_list_my_memberships_moderator(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(2, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('1', data['results'][0]['role'])
self.assertEqual('1', data['results'][1]['role'])
def test_list_my_memberships_owner(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(10, data['count'])
def test_list_members_without_auth(self):
"""
Ensure non authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url)
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_without_member_rights(self):
"""
Ensure a non-member authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
<|reserved_special_token_0|>
def test_list_members_with_mod_rights_not_accepted(self):
"""
Ensure a moderator can list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_with_mod_rights(self):
"""
Ensure a moderator can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(6, data['results'][0]['id'])
self.assertEqual(1, data['results'][0]['user']['id'])
self.assertEqual('0', data['results'][0]['role'])
self.assertEqual('1', data['results'][0]['status'])
self.assertEqual(7, data['results'][1]['id'])
self.assertEqual(2, data['results'][1]['user']['id'])
self.assertEqual('1', data['results'][1]['role'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual(8, data['results'][2]['id'])
self.assertEqual(3, data['results'][2]['user']['id'])
self.assertEqual('2', data['results'][2]['role'])
self.assertEqual('1', data['results'][2]['status'])
def test_list_members_with_owner_rights(self):
"""
Ensure an owner can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
def test_accept_member_without_auth(self):
"""
Ensure a non authenticated user can not accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
<|reserved_special_token_0|>
def test_accept_member_with_owner(self):
"""
Ensure an owner can accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
<|reserved_special_token_0|>
def test_accept_member_with_owner_not_found(self):
"""
Ensure member exists
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 19}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
<|reserved_special_token_0|>
def test_accept_member_with_moderator(self):
"""
Ensure an moderator can accept members
"""
mod = Member.objects.get(id=4)
mod.status = '1'
mod.save()
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user2'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
def test_ban_member_without_auth(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 8}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_member_with_non_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_moderator_with_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 7}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_ban_member_with_owner(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('2', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership cancelled')
<|reserved_special_token_0|>
def test_promote_user_without_auth(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_user(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user4'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_owner(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('1', data['role'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MemberTests(CustomAPITestCase):
def setUp(self):
"""
Make a user for authenticating and
testing community actions
"""
owner = self.user_model.objects.create(password=make_password(
'user1'), email='user1@test.com', first_name='1', last_name=
'User', is_active=True)
moderator = self.user_model.objects.create(password=make_password(
'user2'), email='user2@test.com', first_name='2', last_name=
'User', is_active=True)
member = self.user_model.objects.create(password=make_password(
'user3'), email='user3@test.com', first_name='3', last_name=
'User', is_active=True)
other = self.user_model.objects.create(password=make_password(
'user4'), email='user4@test.com', first_name='4', last_name=
'User', is_active=True)
Profile.objects.create(user=owner)
Profile.objects.create(user=moderator)
Profile.objects.create(user=member)
Profile.objects.create(user=other)
lcom1 = LocalCommunity.objects.create(name='lcom1', description=
'descl1', city='Paris', country='FR', gps_x=0, gps_y=0)
lcom2 = LocalCommunity.objects.create(name='lcom2', description=
'descl2', city='Paris', country='FR', gps_x=0, gps_y=0,
auto_accept_member=True)
lcom3 = LocalCommunity.objects.create(name='lcom3', description=
'descl3', city='Paris', country='FR', gps_x=0, gps_y=0)
lcom4 = LocalCommunity.objects.create(name='lcom4', description=
'descl4', city='Paris', country='FR', gps_x=0, gps_y=0,
auto_accept_member=True)
lcom5 = LocalCommunity.objects.create(name='lcom5', description=
'descl5', city='Paris', country='FR', gps_x=0, gps_y=0)
tcom1 = TransportCommunity.objects.create(name='tcom1', description
='desct1', departure='dep1', arrival='arr1', auto_accept_member
=True)
tcom2 = TransportCommunity.objects.create(name='tcom2', description
='desct2', departure='dep2', arrival='arr2')
tcom3 = TransportCommunity.objects.create(name='tcom3', description
='desct3', departure='dep3', arrival='arr3')
tcom4 = TransportCommunity.objects.create(name='tcom4', description
='desct4', departure='dep4', arrival='arr4')
tcom5 = TransportCommunity.objects.create(name='tcom5', description
='desct5', departure='dep4', arrival='arr5')
own_mbr = Member.objects.create(user=owner, community=lcom1, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom2, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom3, role=
'0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom3,
role='1', status='0')
spl_mbr = Member.objects.create(user=member, community=lcom3, role=
'2', status='0')
own_mbr = Member.objects.create(user=owner, community=lcom4, role=
'0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom4,
role='1', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom4, role=
'2', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom5, role=
'0', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom5, role=
'2', status='2')
own_mbr = Member.objects.create(user=owner, community=tcom1, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom2, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom3, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom4, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom5, role=
'0', status='1')
def test_setup(self):
self.assertEqual(4, self.user_model.objects.all().count())
self.assertEqual(10, Community.objects.all().count())
self.assertEqual(15, Member.objects.all().count())
def test_join_wrong_community(self):
"""
Ensure an authenticated user cannot join a community that does not exists
"""
url = '/api/v1/communities/15/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(15, Member.objects.all().count())
def test_join_community_not_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/1/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=1)
self.assertEqual(community, member.community)
self.assertEqual('2', member.role)
self.assertEqual('0', member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')
self.assertTrue('demande à faire' in mail.outbox[0].body)
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(16, Member.objects.all().count())
def test_join_community_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/2/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=2)
self.assertEqual(community, member.community)
self.assertEqual('2', member.role)
self.assertEqual('1', member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')
self.assertTrue('fait désormais' in mail.outbox[0].body)
def test_leave_community(self):
"""
Ensure a member can leave a community
"""
url = '/api/v1/communities/3/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)
self.assertEqual(14, Member.objects.all().count())
def test_leave_community_banned(self):
"""
Ensure a banned member cannot leave a community
"""
url = '/api/v1/communities/5/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)
self.assertEqual(15, Member.objects.all().count())
def test_list_my_memberships_without_auth(self):
"""
Ensure an unauthenticated user cannot list memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url)
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_my_memberships_member(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual(5, data['results'][2]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('2', data['results'][2]['status'])
self.assertEqual('2', data['results'][0]['role'])
self.assertEqual('2', data['results'][1]['role'])
self.assertEqual('2', data['results'][2]['role'])
def test_list_my_memberships_moderator(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(2, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('1', data['results'][0]['role'])
self.assertEqual('1', data['results'][1]['role'])
def test_list_my_memberships_owner(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(10, data['count'])
def test_list_members_without_auth(self):
"""
Ensure non authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url)
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_without_member_rights(self):
"""
Ensure a non-member authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_without_mod_rights(self):
"""
Ensure a simple user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_with_mod_rights_not_accepted(self):
"""
Ensure a moderator can list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_with_mod_rights(self):
"""
Ensure a moderator can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(6, data['results'][0]['id'])
self.assertEqual(1, data['results'][0]['user']['id'])
self.assertEqual('0', data['results'][0]['role'])
self.assertEqual('1', data['results'][0]['status'])
self.assertEqual(7, data['results'][1]['id'])
self.assertEqual(2, data['results'][1]['user']['id'])
self.assertEqual('1', data['results'][1]['role'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual(8, data['results'][2]['id'])
self.assertEqual(3, data['results'][2]['user']['id'])
self.assertEqual('2', data['results'][2]['role'])
self.assertEqual('1', data['results'][2]['status'])
def test_list_members_with_owner_rights(self):
"""
Ensure an owner can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
def test_accept_member_without_auth(self):
"""
Ensure a non authenticated user can not accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_simple_member(self):
"""
Ensure a simple member cannot accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user4'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_owner(self):
"""
Ensure an owner can accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
<|reserved_special_token_0|>
def test_accept_member_with_owner_not_found(self):
"""
Ensure member exists
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 19}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_accept_member_with_not_accepted_moderator(self):
"""
Ensure an non accepted moderator cannot accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_moderator(self):
"""
Ensure an moderator can accept members
"""
mod = Member.objects.get(id=4)
mod.status = '1'
mod.save()
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user2'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
def test_ban_member_without_auth(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 8}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_member_with_non_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_moderator_with_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 7}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_owner_with_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 6}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
<|reserved_special_token_0|>
def test_ban_member_with_owner(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('2', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership cancelled')
<|reserved_special_token_0|>
def test_promote_user_without_auth(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_user(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user4'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_owner(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('1', data['role'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MemberTests(CustomAPITestCase):
def setUp(self):
"""
Make a user for authenticating and
testing community actions
"""
owner = self.user_model.objects.create(password=make_password(
'user1'), email='user1@test.com', first_name='1', last_name=
'User', is_active=True)
moderator = self.user_model.objects.create(password=make_password(
'user2'), email='user2@test.com', first_name='2', last_name=
'User', is_active=True)
member = self.user_model.objects.create(password=make_password(
'user3'), email='user3@test.com', first_name='3', last_name=
'User', is_active=True)
other = self.user_model.objects.create(password=make_password(
'user4'), email='user4@test.com', first_name='4', last_name=
'User', is_active=True)
Profile.objects.create(user=owner)
Profile.objects.create(user=moderator)
Profile.objects.create(user=member)
Profile.objects.create(user=other)
lcom1 = LocalCommunity.objects.create(name='lcom1', description=
'descl1', city='Paris', country='FR', gps_x=0, gps_y=0)
lcom2 = LocalCommunity.objects.create(name='lcom2', description=
'descl2', city='Paris', country='FR', gps_x=0, gps_y=0,
auto_accept_member=True)
lcom3 = LocalCommunity.objects.create(name='lcom3', description=
'descl3', city='Paris', country='FR', gps_x=0, gps_y=0)
lcom4 = LocalCommunity.objects.create(name='lcom4', description=
'descl4', city='Paris', country='FR', gps_x=0, gps_y=0,
auto_accept_member=True)
lcom5 = LocalCommunity.objects.create(name='lcom5', description=
'descl5', city='Paris', country='FR', gps_x=0, gps_y=0)
tcom1 = TransportCommunity.objects.create(name='tcom1', description
='desct1', departure='dep1', arrival='arr1', auto_accept_member
=True)
tcom2 = TransportCommunity.objects.create(name='tcom2', description
='desct2', departure='dep2', arrival='arr2')
tcom3 = TransportCommunity.objects.create(name='tcom3', description
='desct3', departure='dep3', arrival='arr3')
tcom4 = TransportCommunity.objects.create(name='tcom4', description
='desct4', departure='dep4', arrival='arr4')
tcom5 = TransportCommunity.objects.create(name='tcom5', description
='desct5', departure='dep4', arrival='arr5')
own_mbr = Member.objects.create(user=owner, community=lcom1, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom2, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom3, role=
'0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom3,
role='1', status='0')
spl_mbr = Member.objects.create(user=member, community=lcom3, role=
'2', status='0')
own_mbr = Member.objects.create(user=owner, community=lcom4, role=
'0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom4,
role='1', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom4, role=
'2', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom5, role=
'0', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom5, role=
'2', status='2')
own_mbr = Member.objects.create(user=owner, community=tcom1, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom2, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom3, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom4, role=
'0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom5, role=
'0', status='1')
def test_setup(self):
self.assertEqual(4, self.user_model.objects.all().count())
self.assertEqual(10, Community.objects.all().count())
self.assertEqual(15, Member.objects.all().count())
def test_join_wrong_community(self):
"""
Ensure an authenticated user cannot join a community that does not exists
"""
url = '/api/v1/communities/15/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(15, Member.objects.all().count())
def test_join_community_not_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/1/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=1)
self.assertEqual(community, member.community)
self.assertEqual('2', member.role)
self.assertEqual('0', member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')
self.assertTrue('demande à faire' in mail.outbox[0].body)
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(16, Member.objects.all().count())
def test_join_community_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/2/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=2)
self.assertEqual(community, member.community)
self.assertEqual('2', member.role)
self.assertEqual('1', member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')
self.assertTrue('fait désormais' in mail.outbox[0].body)
def test_leave_community(self):
"""
Ensure a member can leave a community
"""
url = '/api/v1/communities/3/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)
self.assertEqual(14, Member.objects.all().count())
def test_leave_community_banned(self):
"""
Ensure a banned member cannot leave a community
"""
url = '/api/v1/communities/5/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)
self.assertEqual(15, Member.objects.all().count())
def test_list_my_memberships_without_auth(self):
"""
Ensure an unauthenticated user cannot list memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url)
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_my_memberships_member(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual(5, data['results'][2]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('2', data['results'][2]['status'])
self.assertEqual('2', data['results'][0]['role'])
self.assertEqual('2', data['results'][1]['role'])
self.assertEqual('2', data['results'][2]['role'])
def test_list_my_memberships_moderator(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(2, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('1', data['results'][0]['role'])
self.assertEqual('1', data['results'][1]['role'])
def test_list_my_memberships_owner(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(10, data['count'])
def test_list_members_without_auth(self):
"""
Ensure non authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url)
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_without_member_rights(self):
"""
Ensure a non-member authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_without_mod_rights(self):
"""
Ensure a simple user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_with_mod_rights_not_accepted(self):
"""
Ensure a moderator can list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_with_mod_rights(self):
"""
Ensure a moderator can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(6, data['results'][0]['id'])
self.assertEqual(1, data['results'][0]['user']['id'])
self.assertEqual('0', data['results'][0]['role'])
self.assertEqual('1', data['results'][0]['status'])
self.assertEqual(7, data['results'][1]['id'])
self.assertEqual(2, data['results'][1]['user']['id'])
self.assertEqual('1', data['results'][1]['role'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual(8, data['results'][2]['id'])
self.assertEqual(3, data['results'][2]['user']['id'])
self.assertEqual('2', data['results'][2]['role'])
self.assertEqual('1', data['results'][2]['status'])
def test_list_members_with_owner_rights(self):
"""
Ensure an owner can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
def test_accept_member_without_auth(self):
"""
Ensure a non authenticated user can not accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_simple_member(self):
"""
Ensure a simple member cannot accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user4'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_owner(self):
"""
Ensure an owner can accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
def test_accept_member_with_owner_bad_request(self):
"""
Ensure accept_member request data format
"""
url = '/api/v1/communities/3/accept_member/'
data = {'lol': 5}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_accept_member_with_owner_not_found(self):
"""
Ensure member exists
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 19}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_accept_member_with_not_accepted_moderator(self):
"""
Ensure an non accepted moderator cannot accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_moderator(self):
"""
Ensure an moderator can accept members
"""
mod = Member.objects.get(id=4)
mod.status = '1'
mod.save()
url = '/api/v1/communities/3/accept_member/'
data = {'id': 5}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user2'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
def test_ban_member_without_auth(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 8}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_member_with_non_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_moderator_with_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 7}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_owner_with_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 6}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_member_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user2'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('2', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership cancelled')
def test_ban_member_with_owner(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('2', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership cancelled')
def test_ban_owner_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {'id': 6}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_without_auth(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_user(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user4'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_owner(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {'id': 8}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth
('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('1', data['role'])
<|reserved_special_token_1|>
from django.contrib.auth.hashers import make_password
from django.core import mail
from rest_framework import status
from django.contrib.auth.models import User
import time
from api.tests.api_test_case import CustomAPITestCase
from core.models import Member, Community, LocalCommunity, TransportCommunity, Profile, Notification
class MemberTests(CustomAPITestCase):
def setUp(self):
"""
Make a user for authenticating and
testing community actions
"""
owner = self.user_model.objects.create(password=make_password('user1'), email='user1@test.com',
first_name='1', last_name='User', is_active=True)
moderator = self.user_model.objects.create(password=make_password('user2'), email='user2@test.com',
first_name='2', last_name='User', is_active=True)
member = self.user_model.objects.create(password=make_password('user3'), email='user3@test.com',
first_name='3', last_name='User', is_active=True)
other = self.user_model.objects.create(password=make_password('user4'), email='user4@test.com',
first_name='4', last_name='User', is_active=True)
Profile.objects.create(user=owner)
Profile.objects.create(user=moderator)
Profile.objects.create(user=member)
Profile.objects.create(user=other)
lcom1 = LocalCommunity.objects.create(name='lcom1', description='descl1', city='Paris', country='FR',
gps_x=0, gps_y=0)
lcom2 = LocalCommunity.objects.create(name='lcom2', description='descl2', city='Paris', country='FR',
gps_x=0, gps_y=0,
auto_accept_member=True)
lcom3 = LocalCommunity.objects.create(name='lcom3', description='descl3', city='Paris', country='FR',
gps_x=0, gps_y=0)
lcom4 = LocalCommunity.objects.create(name='lcom4', description='descl4', city='Paris', country='FR',
gps_x=0, gps_y=0,
auto_accept_member=True)
lcom5 = LocalCommunity.objects.create(name='lcom5', description='descl5', city='Paris', country='FR',
gps_x=0, gps_y=0)
tcom1 = TransportCommunity.objects.create(name='tcom1', description='desct1', departure='dep1', arrival='arr1',
auto_accept_member=True)
tcom2 = TransportCommunity.objects.create(name='tcom2', description='desct2', departure='dep2', arrival='arr2')
tcom3 = TransportCommunity.objects.create(name='tcom3', description='desct3', departure='dep3', arrival='arr3')
tcom4 = TransportCommunity.objects.create(name='tcom4', description='desct4', departure='dep4', arrival='arr4')
tcom5 = TransportCommunity.objects.create(name='tcom5', description='desct5', departure='dep4', arrival='arr5')
own_mbr = Member.objects.create(user=owner, community=lcom1, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom2, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom3, role='0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom3, role='1', status='0')
spl_mbr = Member.objects.create(user=member, community=lcom3, role='2', status='0')
own_mbr = Member.objects.create(user=owner, community=lcom4, role='0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom4, role='1', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom4, role='2', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom5, role='0', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom5, role='2', status='2')
own_mbr = Member.objects.create(user=owner, community=tcom1, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom2, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom3, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom4, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom5, role='0', status='1')
def test_setup(self):
self.assertEqual(4, self.user_model.objects.all().count())
self.assertEqual(10, Community.objects.all().count())
self.assertEqual(15, Member.objects.all().count())
def test_join_wrong_community(self):
"""
Ensure an authenticated user cannot join a community that does not exists
"""
url = '/api/v1/communities/15/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(15, Member.objects.all().count())
def test_join_community_not_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/1/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=1)
self.assertEqual(community, member.community)
self.assertEqual("2", member.role)
self.assertEqual("0", member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[SmarTribe] Nouveau membre')
self.assertTrue('demande à faire' in mail.outbox[0].body)
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(16, Member.objects.all().count())
def test_join_community_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/2/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=2)
self.assertEqual(community, member.community)
self.assertEqual("2", member.role)
self.assertEqual("1", member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[SmarTribe] Nouveau membre')
self.assertTrue('fait désormais' in mail.outbox[0].body)
def test_leave_community(self):
"""
Ensure a member can leave a community
"""
url = '/api/v1/communities/3/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)
self.assertEqual(14, Member.objects.all().count())
def test_leave_community_banned(self):
"""
Ensure a banned member cannot leave a community
"""
url = '/api/v1/communities/5/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)
self.assertEqual(15, Member.objects.all().count())
def test_list_my_memberships_without_auth(self):
"""
Ensure an unauthenticated user cannot list memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url)
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_my_memberships_member(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual(5, data['results'][2]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('2', data['results'][2]['status'])
self.assertEqual('2', data['results'][0]['role'])
self.assertEqual('2', data['results'][1]['role'])
self.assertEqual('2', data['results'][2]['role'])
def test_list_my_memberships_moderator(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(2, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('1', data['results'][0]['role'])
self.assertEqual('1', data['results'][1]['role'])
def test_list_my_memberships_owner(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(10, data['count'])
def test_list_members_without_auth(self):
"""
Ensure non authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url)
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_without_member_rights(self):
"""
Ensure a non-member authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_without_mod_rights(self):
"""
Ensure a simple user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_with_mod_rights_not_accepted(self):
"""
Ensure a moderator can list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
# Test before acceptation
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_with_mod_rights(self):
"""
Ensure a moderator can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(6, data['results'][0]['id'])
self.assertEqual(1, data['results'][0]['user']['id'])
self.assertEqual('0', data['results'][0]['role'])
self.assertEqual('1', data['results'][0]['status'])
self.assertEqual(7, data['results'][1]['id'])
self.assertEqual(2, data['results'][1]['user']['id'])
self.assertEqual('1', data['results'][1]['role'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual(8, data['results'][2]['id'])
self.assertEqual(3, data['results'][2]['user']['id'])
self.assertEqual('2', data['results'][2]['role'])
self.assertEqual('1', data['results'][2]['status'])
def test_list_members_with_owner_rights(self):
"""
Ensure an owner can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
def test_accept_member_without_auth(self):
"""
Ensure a non authenticated user can not accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_simple_member(self):
"""
Ensure a simple member cannot accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user4'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_owner(self):
"""
Ensure an owner can accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
def test_accept_member_with_owner_bad_request(self):
"""
Ensure accept_member request data format
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'lol': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_accept_member_with_owner_not_found(self):
"""
Ensure member exists
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 19
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_accept_member_with_not_accepted_moderator(self):
"""
Ensure an non accepted moderator cannot accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_moderator(self):
"""
Ensure an moderator can accept members
"""
mod = Member.objects.get(id=4)
mod.status = '1'
mod.save()
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
def test_ban_member_without_auth(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 8
}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_member_with_non_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_moderator_with_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 7
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_owner_with_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 6
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_member_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('2', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership cancelled')
def test_ban_member_with_owner(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('2', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership cancelled')
def test_ban_owner_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 6
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_without_auth(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {
'id': 8
}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_user(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user4'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_owner(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('1', data['role'])
|
flexible
|
{
"blob_id": "75c00eec7eacd37ff0b37d26163c2304620bb9db",
"index": 5868,
"step-1": "<mask token>\n\n\nclass MemberTests(CustomAPITestCase):\n\n def setUp(self):\n \"\"\"\n Make a user for authenticating and\n testing community actions\n \"\"\"\n owner = self.user_model.objects.create(password=make_password(\n 'user1'), email='user1@test.com', first_name='1', last_name=\n 'User', is_active=True)\n moderator = self.user_model.objects.create(password=make_password(\n 'user2'), email='user2@test.com', first_name='2', last_name=\n 'User', is_active=True)\n member = self.user_model.objects.create(password=make_password(\n 'user3'), email='user3@test.com', first_name='3', last_name=\n 'User', is_active=True)\n other = self.user_model.objects.create(password=make_password(\n 'user4'), email='user4@test.com', first_name='4', last_name=\n 'User', is_active=True)\n Profile.objects.create(user=owner)\n Profile.objects.create(user=moderator)\n Profile.objects.create(user=member)\n Profile.objects.create(user=other)\n lcom1 = LocalCommunity.objects.create(name='lcom1', description=\n 'descl1', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom2 = LocalCommunity.objects.create(name='lcom2', description=\n 'descl2', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom3 = LocalCommunity.objects.create(name='lcom3', description=\n 'descl3', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom4 = LocalCommunity.objects.create(name='lcom4', description=\n 'descl4', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom5 = LocalCommunity.objects.create(name='lcom5', description=\n 'descl5', city='Paris', country='FR', gps_x=0, gps_y=0)\n tcom1 = TransportCommunity.objects.create(name='tcom1', description\n ='desct1', departure='dep1', arrival='arr1', auto_accept_member\n =True)\n tcom2 = TransportCommunity.objects.create(name='tcom2', description\n ='desct2', departure='dep2', arrival='arr2')\n tcom3 = TransportCommunity.objects.create(name='tcom3', description\n ='desct3', departure='dep3', arrival='arr3')\n tcom4 = TransportCommunity.objects.create(name='tcom4', description\n ='desct4', departure='dep4', arrival='arr4')\n tcom5 = TransportCommunity.objects.create(name='tcom5', description\n ='desct5', departure='dep4', arrival='arr5')\n own_mbr = Member.objects.create(user=owner, community=lcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom3, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom3,\n role='1', status='0')\n spl_mbr = Member.objects.create(user=member, community=lcom3, role=\n '2', status='0')\n own_mbr = Member.objects.create(user=owner, community=lcom4, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom4,\n role='1', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom4, role=\n '2', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom5, role=\n '0', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom5, role=\n '2', status='2')\n own_mbr = Member.objects.create(user=owner, community=tcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom3, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom4, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom5, role=\n '0', status='1')\n\n def test_setup(self):\n self.assertEqual(4, self.user_model.objects.all().count())\n self.assertEqual(10, Community.objects.all().count())\n self.assertEqual(15, Member.objects.all().count())\n <mask token>\n\n def test_join_community_not_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/1/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=1)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('0', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('demande à faire' in mail.outbox[0].body)\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n\n def test_join_community_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/2/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=2)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('1', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('fait désormais' in mail.outbox[0].body)\n\n def test_leave_community(self):\n \"\"\"\n Ensure a member can leave a community\n \"\"\"\n url = '/api/v1/communities/3/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)\n self.assertEqual(14, Member.objects.all().count())\n\n def test_leave_community_banned(self):\n \"\"\"\n Ensure a banned member cannot leave a community\n \"\"\"\n url = '/api/v1/communities/5/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n <mask token>\n\n def test_list_my_memberships_member(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual(5, data['results'][2]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('2', data['results'][2]['status'])\n self.assertEqual('2', data['results'][0]['role'])\n self.assertEqual('2', data['results'][1]['role'])\n self.assertEqual('2', data['results'][2]['role'])\n\n def test_list_my_memberships_moderator(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(2, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('1', data['results'][0]['role'])\n self.assertEqual('1', data['results'][1]['role'])\n <mask token>\n\n def test_list_members_without_auth(self):\n \"\"\"\n Ensure non authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_member_rights(self):\n \"\"\"\n Ensure a non-member authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n <mask token>\n\n def test_list_members_with_mod_rights(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(6, data['results'][0]['id'])\n self.assertEqual(1, data['results'][0]['user']['id'])\n self.assertEqual('0', data['results'][0]['role'])\n self.assertEqual('1', data['results'][0]['status'])\n self.assertEqual(7, data['results'][1]['id'])\n self.assertEqual(2, data['results'][1]['user']['id'])\n self.assertEqual('1', data['results'][1]['role'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual(8, data['results'][2]['id'])\n self.assertEqual(3, data['results'][2]['user']['id'])\n self.assertEqual('2', data['results'][2]['role'])\n self.assertEqual('1', data['results'][2]['status'])\n\n def test_list_members_with_owner_rights(self):\n \"\"\"\n Ensure an owner can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n\n def test_accept_member_without_auth(self):\n \"\"\"\n Ensure a non authenticated user can not accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n\n def test_accept_member_with_owner(self):\n \"\"\"\n Ensure an owner can accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n <mask token>\n\n def test_accept_member_with_owner_not_found(self):\n \"\"\"\n Ensure member exists\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 19}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n <mask token>\n\n def test_accept_member_with_moderator(self):\n \"\"\"\n Ensure an moderator can accept members\n \"\"\"\n mod = Member.objects.get(id=4)\n mod.status = '1'\n mod.save()\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_ban_member_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_non_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n <mask token>\n <mask token>\n\n def test_ban_member_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n <mask token>\n\n def test_promote_user_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n\n def test_promote_user_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('1', data['role'])\n",
"step-2": "<mask token>\n\n\nclass MemberTests(CustomAPITestCase):\n\n def setUp(self):\n \"\"\"\n Make a user for authenticating and\n testing community actions\n \"\"\"\n owner = self.user_model.objects.create(password=make_password(\n 'user1'), email='user1@test.com', first_name='1', last_name=\n 'User', is_active=True)\n moderator = self.user_model.objects.create(password=make_password(\n 'user2'), email='user2@test.com', first_name='2', last_name=\n 'User', is_active=True)\n member = self.user_model.objects.create(password=make_password(\n 'user3'), email='user3@test.com', first_name='3', last_name=\n 'User', is_active=True)\n other = self.user_model.objects.create(password=make_password(\n 'user4'), email='user4@test.com', first_name='4', last_name=\n 'User', is_active=True)\n Profile.objects.create(user=owner)\n Profile.objects.create(user=moderator)\n Profile.objects.create(user=member)\n Profile.objects.create(user=other)\n lcom1 = LocalCommunity.objects.create(name='lcom1', description=\n 'descl1', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom2 = LocalCommunity.objects.create(name='lcom2', description=\n 'descl2', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom3 = LocalCommunity.objects.create(name='lcom3', description=\n 'descl3', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom4 = LocalCommunity.objects.create(name='lcom4', description=\n 'descl4', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom5 = LocalCommunity.objects.create(name='lcom5', description=\n 'descl5', city='Paris', country='FR', gps_x=0, gps_y=0)\n tcom1 = TransportCommunity.objects.create(name='tcom1', description\n ='desct1', departure='dep1', arrival='arr1', auto_accept_member\n =True)\n tcom2 = TransportCommunity.objects.create(name='tcom2', description\n ='desct2', departure='dep2', arrival='arr2')\n tcom3 = TransportCommunity.objects.create(name='tcom3', description\n ='desct3', departure='dep3', arrival='arr3')\n tcom4 = TransportCommunity.objects.create(name='tcom4', description\n ='desct4', departure='dep4', arrival='arr4')\n tcom5 = TransportCommunity.objects.create(name='tcom5', description\n ='desct5', departure='dep4', arrival='arr5')\n own_mbr = Member.objects.create(user=owner, community=lcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom3, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom3,\n role='1', status='0')\n spl_mbr = Member.objects.create(user=member, community=lcom3, role=\n '2', status='0')\n own_mbr = Member.objects.create(user=owner, community=lcom4, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom4,\n role='1', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom4, role=\n '2', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom5, role=\n '0', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom5, role=\n '2', status='2')\n own_mbr = Member.objects.create(user=owner, community=tcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom3, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom4, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom5, role=\n '0', status='1')\n\n def test_setup(self):\n self.assertEqual(4, self.user_model.objects.all().count())\n self.assertEqual(10, Community.objects.all().count())\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_wrong_community(self):\n \"\"\"\n Ensure an authenticated user cannot join a community that does not exists\n \"\"\"\n url = '/api/v1/communities/15/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_community_not_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/1/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=1)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('0', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('demande à faire' in mail.outbox[0].body)\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n\n def test_join_community_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/2/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=2)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('1', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('fait désormais' in mail.outbox[0].body)\n\n def test_leave_community(self):\n \"\"\"\n Ensure a member can leave a community\n \"\"\"\n url = '/api/v1/communities/3/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)\n self.assertEqual(14, Member.objects.all().count())\n\n def test_leave_community_banned(self):\n \"\"\"\n Ensure a banned member cannot leave a community\n \"\"\"\n url = '/api/v1/communities/5/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n\n def test_list_my_memberships_without_auth(self):\n \"\"\"\n Ensure an unauthenticated user cannot list memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_my_memberships_member(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual(5, data['results'][2]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('2', data['results'][2]['status'])\n self.assertEqual('2', data['results'][0]['role'])\n self.assertEqual('2', data['results'][1]['role'])\n self.assertEqual('2', data['results'][2]['role'])\n\n def test_list_my_memberships_moderator(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(2, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('1', data['results'][0]['role'])\n self.assertEqual('1', data['results'][1]['role'])\n\n def test_list_my_memberships_owner(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(10, data['count'])\n\n def test_list_members_without_auth(self):\n \"\"\"\n Ensure non authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_member_rights(self):\n \"\"\"\n Ensure a non-member authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n\n def test_list_members_with_mod_rights_not_accepted(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(6, data['results'][0]['id'])\n self.assertEqual(1, data['results'][0]['user']['id'])\n self.assertEqual('0', data['results'][0]['role'])\n self.assertEqual('1', data['results'][0]['status'])\n self.assertEqual(7, data['results'][1]['id'])\n self.assertEqual(2, data['results'][1]['user']['id'])\n self.assertEqual('1', data['results'][1]['role'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual(8, data['results'][2]['id'])\n self.assertEqual(3, data['results'][2]['user']['id'])\n self.assertEqual('2', data['results'][2]['role'])\n self.assertEqual('1', data['results'][2]['status'])\n\n def test_list_members_with_owner_rights(self):\n \"\"\"\n Ensure an owner can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n\n def test_accept_member_without_auth(self):\n \"\"\"\n Ensure a non authenticated user can not accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n\n def test_accept_member_with_owner(self):\n \"\"\"\n Ensure an owner can accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n <mask token>\n\n def test_accept_member_with_owner_not_found(self):\n \"\"\"\n Ensure member exists\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 19}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n <mask token>\n\n def test_accept_member_with_moderator(self):\n \"\"\"\n Ensure an moderator can accept members\n \"\"\"\n mod = Member.objects.get(id=4)\n mod.status = '1'\n mod.save()\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_ban_member_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_non_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_moderator_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 7}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n <mask token>\n\n def test_ban_member_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n <mask token>\n\n def test_promote_user_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_user(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('1', data['role'])\n",
"step-3": "<mask token>\n\n\nclass MemberTests(CustomAPITestCase):\n\n def setUp(self):\n \"\"\"\n Make a user for authenticating and\n testing community actions\n \"\"\"\n owner = self.user_model.objects.create(password=make_password(\n 'user1'), email='user1@test.com', first_name='1', last_name=\n 'User', is_active=True)\n moderator = self.user_model.objects.create(password=make_password(\n 'user2'), email='user2@test.com', first_name='2', last_name=\n 'User', is_active=True)\n member = self.user_model.objects.create(password=make_password(\n 'user3'), email='user3@test.com', first_name='3', last_name=\n 'User', is_active=True)\n other = self.user_model.objects.create(password=make_password(\n 'user4'), email='user4@test.com', first_name='4', last_name=\n 'User', is_active=True)\n Profile.objects.create(user=owner)\n Profile.objects.create(user=moderator)\n Profile.objects.create(user=member)\n Profile.objects.create(user=other)\n lcom1 = LocalCommunity.objects.create(name='lcom1', description=\n 'descl1', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom2 = LocalCommunity.objects.create(name='lcom2', description=\n 'descl2', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom3 = LocalCommunity.objects.create(name='lcom3', description=\n 'descl3', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom4 = LocalCommunity.objects.create(name='lcom4', description=\n 'descl4', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom5 = LocalCommunity.objects.create(name='lcom5', description=\n 'descl5', city='Paris', country='FR', gps_x=0, gps_y=0)\n tcom1 = TransportCommunity.objects.create(name='tcom1', description\n ='desct1', departure='dep1', arrival='arr1', auto_accept_member\n =True)\n tcom2 = TransportCommunity.objects.create(name='tcom2', description\n ='desct2', departure='dep2', arrival='arr2')\n tcom3 = TransportCommunity.objects.create(name='tcom3', description\n ='desct3', departure='dep3', arrival='arr3')\n tcom4 = TransportCommunity.objects.create(name='tcom4', description\n ='desct4', departure='dep4', arrival='arr4')\n tcom5 = TransportCommunity.objects.create(name='tcom5', description\n ='desct5', departure='dep4', arrival='arr5')\n own_mbr = Member.objects.create(user=owner, community=lcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom3, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom3,\n role='1', status='0')\n spl_mbr = Member.objects.create(user=member, community=lcom3, role=\n '2', status='0')\n own_mbr = Member.objects.create(user=owner, community=lcom4, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom4,\n role='1', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom4, role=\n '2', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom5, role=\n '0', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom5, role=\n '2', status='2')\n own_mbr = Member.objects.create(user=owner, community=tcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom3, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom4, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom5, role=\n '0', status='1')\n\n def test_setup(self):\n self.assertEqual(4, self.user_model.objects.all().count())\n self.assertEqual(10, Community.objects.all().count())\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_wrong_community(self):\n \"\"\"\n Ensure an authenticated user cannot join a community that does not exists\n \"\"\"\n url = '/api/v1/communities/15/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_community_not_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/1/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=1)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('0', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('demande à faire' in mail.outbox[0].body)\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n\n def test_join_community_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/2/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=2)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('1', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('fait désormais' in mail.outbox[0].body)\n\n def test_leave_community(self):\n \"\"\"\n Ensure a member can leave a community\n \"\"\"\n url = '/api/v1/communities/3/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)\n self.assertEqual(14, Member.objects.all().count())\n\n def test_leave_community_banned(self):\n \"\"\"\n Ensure a banned member cannot leave a community\n \"\"\"\n url = '/api/v1/communities/5/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n\n def test_list_my_memberships_without_auth(self):\n \"\"\"\n Ensure an unauthenticated user cannot list memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_my_memberships_member(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual(5, data['results'][2]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('2', data['results'][2]['status'])\n self.assertEqual('2', data['results'][0]['role'])\n self.assertEqual('2', data['results'][1]['role'])\n self.assertEqual('2', data['results'][2]['role'])\n\n def test_list_my_memberships_moderator(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(2, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('1', data['results'][0]['role'])\n self.assertEqual('1', data['results'][1]['role'])\n\n def test_list_my_memberships_owner(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(10, data['count'])\n\n def test_list_members_without_auth(self):\n \"\"\"\n Ensure non authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_member_rights(self):\n \"\"\"\n Ensure a non-member authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_mod_rights(self):\n \"\"\"\n Ensure a simple user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights_not_accepted(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(6, data['results'][0]['id'])\n self.assertEqual(1, data['results'][0]['user']['id'])\n self.assertEqual('0', data['results'][0]['role'])\n self.assertEqual('1', data['results'][0]['status'])\n self.assertEqual(7, data['results'][1]['id'])\n self.assertEqual(2, data['results'][1]['user']['id'])\n self.assertEqual('1', data['results'][1]['role'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual(8, data['results'][2]['id'])\n self.assertEqual(3, data['results'][2]['user']['id'])\n self.assertEqual('2', data['results'][2]['role'])\n self.assertEqual('1', data['results'][2]['status'])\n\n def test_list_members_with_owner_rights(self):\n \"\"\"\n Ensure an owner can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n\n def test_accept_member_without_auth(self):\n \"\"\"\n Ensure a non authenticated user can not accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_simple_member(self):\n \"\"\"\n Ensure a simple member cannot accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_owner(self):\n \"\"\"\n Ensure an owner can accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n <mask token>\n\n def test_accept_member_with_owner_not_found(self):\n \"\"\"\n Ensure member exists\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 19}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n\n def test_accept_member_with_not_accepted_moderator(self):\n \"\"\"\n Ensure an non accepted moderator cannot accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_moderator(self):\n \"\"\"\n Ensure an moderator can accept members\n \"\"\"\n mod = Member.objects.get(id=4)\n mod.status = '1'\n mod.save()\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_ban_member_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_non_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_moderator_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 7}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_owner_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 6}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n\n def test_ban_member_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n <mask token>\n\n def test_promote_user_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_user(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('1', data['role'])\n",
"step-4": "<mask token>\n\n\nclass MemberTests(CustomAPITestCase):\n\n def setUp(self):\n \"\"\"\n Make a user for authenticating and\n testing community actions\n \"\"\"\n owner = self.user_model.objects.create(password=make_password(\n 'user1'), email='user1@test.com', first_name='1', last_name=\n 'User', is_active=True)\n moderator = self.user_model.objects.create(password=make_password(\n 'user2'), email='user2@test.com', first_name='2', last_name=\n 'User', is_active=True)\n member = self.user_model.objects.create(password=make_password(\n 'user3'), email='user3@test.com', first_name='3', last_name=\n 'User', is_active=True)\n other = self.user_model.objects.create(password=make_password(\n 'user4'), email='user4@test.com', first_name='4', last_name=\n 'User', is_active=True)\n Profile.objects.create(user=owner)\n Profile.objects.create(user=moderator)\n Profile.objects.create(user=member)\n Profile.objects.create(user=other)\n lcom1 = LocalCommunity.objects.create(name='lcom1', description=\n 'descl1', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom2 = LocalCommunity.objects.create(name='lcom2', description=\n 'descl2', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom3 = LocalCommunity.objects.create(name='lcom3', description=\n 'descl3', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom4 = LocalCommunity.objects.create(name='lcom4', description=\n 'descl4', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom5 = LocalCommunity.objects.create(name='lcom5', description=\n 'descl5', city='Paris', country='FR', gps_x=0, gps_y=0)\n tcom1 = TransportCommunity.objects.create(name='tcom1', description\n ='desct1', departure='dep1', arrival='arr1', auto_accept_member\n =True)\n tcom2 = TransportCommunity.objects.create(name='tcom2', description\n ='desct2', departure='dep2', arrival='arr2')\n tcom3 = TransportCommunity.objects.create(name='tcom3', description\n ='desct3', departure='dep3', arrival='arr3')\n tcom4 = TransportCommunity.objects.create(name='tcom4', description\n ='desct4', departure='dep4', arrival='arr4')\n tcom5 = TransportCommunity.objects.create(name='tcom5', description\n ='desct5', departure='dep4', arrival='arr5')\n own_mbr = Member.objects.create(user=owner, community=lcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom3, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom3,\n role='1', status='0')\n spl_mbr = Member.objects.create(user=member, community=lcom3, role=\n '2', status='0')\n own_mbr = Member.objects.create(user=owner, community=lcom4, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom4,\n role='1', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom4, role=\n '2', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom5, role=\n '0', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom5, role=\n '2', status='2')\n own_mbr = Member.objects.create(user=owner, community=tcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom3, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom4, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom5, role=\n '0', status='1')\n\n def test_setup(self):\n self.assertEqual(4, self.user_model.objects.all().count())\n self.assertEqual(10, Community.objects.all().count())\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_wrong_community(self):\n \"\"\"\n Ensure an authenticated user cannot join a community that does not exists\n \"\"\"\n url = '/api/v1/communities/15/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_community_not_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/1/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=1)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('0', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('demande à faire' in mail.outbox[0].body)\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n\n def test_join_community_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/2/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=2)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('1', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('fait désormais' in mail.outbox[0].body)\n\n def test_leave_community(self):\n \"\"\"\n Ensure a member can leave a community\n \"\"\"\n url = '/api/v1/communities/3/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)\n self.assertEqual(14, Member.objects.all().count())\n\n def test_leave_community_banned(self):\n \"\"\"\n Ensure a banned member cannot leave a community\n \"\"\"\n url = '/api/v1/communities/5/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n\n def test_list_my_memberships_without_auth(self):\n \"\"\"\n Ensure an unauthenticated user cannot list memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_my_memberships_member(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual(5, data['results'][2]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('2', data['results'][2]['status'])\n self.assertEqual('2', data['results'][0]['role'])\n self.assertEqual('2', data['results'][1]['role'])\n self.assertEqual('2', data['results'][2]['role'])\n\n def test_list_my_memberships_moderator(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(2, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('1', data['results'][0]['role'])\n self.assertEqual('1', data['results'][1]['role'])\n\n def test_list_my_memberships_owner(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(10, data['count'])\n\n def test_list_members_without_auth(self):\n \"\"\"\n Ensure non authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_member_rights(self):\n \"\"\"\n Ensure a non-member authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_mod_rights(self):\n \"\"\"\n Ensure a simple user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights_not_accepted(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(6, data['results'][0]['id'])\n self.assertEqual(1, data['results'][0]['user']['id'])\n self.assertEqual('0', data['results'][0]['role'])\n self.assertEqual('1', data['results'][0]['status'])\n self.assertEqual(7, data['results'][1]['id'])\n self.assertEqual(2, data['results'][1]['user']['id'])\n self.assertEqual('1', data['results'][1]['role'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual(8, data['results'][2]['id'])\n self.assertEqual(3, data['results'][2]['user']['id'])\n self.assertEqual('2', data['results'][2]['role'])\n self.assertEqual('1', data['results'][2]['status'])\n\n def test_list_members_with_owner_rights(self):\n \"\"\"\n Ensure an owner can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n\n def test_accept_member_without_auth(self):\n \"\"\"\n Ensure a non authenticated user can not accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_simple_member(self):\n \"\"\"\n Ensure a simple member cannot accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_owner(self):\n \"\"\"\n Ensure an owner can accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_accept_member_with_owner_bad_request(self):\n \"\"\"\n Ensure accept_member request data format\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'lol': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n\n def test_accept_member_with_owner_not_found(self):\n \"\"\"\n Ensure member exists\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 19}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n\n def test_accept_member_with_not_accepted_moderator(self):\n \"\"\"\n Ensure an non accepted moderator cannot accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_moderator(self):\n \"\"\"\n Ensure an moderator can accept members\n \"\"\"\n mod = Member.objects.get(id=4)\n mod.status = '1'\n mod.save()\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_ban_member_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_non_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_moderator_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 7}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_owner_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 6}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n\n def test_ban_member_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n\n def test_ban_owner_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 6}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_user(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('1', data['role'])\n",
"step-5": "from django.contrib.auth.hashers import make_password\nfrom django.core import mail\nfrom rest_framework import status\nfrom django.contrib.auth.models import User\nimport time\n\nfrom api.tests.api_test_case import CustomAPITestCase\nfrom core.models import Member, Community, LocalCommunity, TransportCommunity, Profile, Notification\n\n\nclass MemberTests(CustomAPITestCase):\n\n def setUp(self):\n \"\"\"\n Make a user for authenticating and\n testing community actions\n \"\"\"\n owner = self.user_model.objects.create(password=make_password('user1'), email='user1@test.com',\n first_name='1', last_name='User', is_active=True)\n moderator = self.user_model.objects.create(password=make_password('user2'), email='user2@test.com',\n first_name='2', last_name='User', is_active=True)\n member = self.user_model.objects.create(password=make_password('user3'), email='user3@test.com',\n first_name='3', last_name='User', is_active=True)\n other = self.user_model.objects.create(password=make_password('user4'), email='user4@test.com',\n first_name='4', last_name='User', is_active=True)\n\n Profile.objects.create(user=owner)\n Profile.objects.create(user=moderator)\n Profile.objects.create(user=member)\n Profile.objects.create(user=other)\n\n lcom1 = LocalCommunity.objects.create(name='lcom1', description='descl1', city='Paris', country='FR',\n gps_x=0, gps_y=0)\n lcom2 = LocalCommunity.objects.create(name='lcom2', description='descl2', city='Paris', country='FR',\n gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom3 = LocalCommunity.objects.create(name='lcom3', description='descl3', city='Paris', country='FR',\n gps_x=0, gps_y=0)\n lcom4 = LocalCommunity.objects.create(name='lcom4', description='descl4', city='Paris', country='FR',\n gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom5 = LocalCommunity.objects.create(name='lcom5', description='descl5', city='Paris', country='FR',\n gps_x=0, gps_y=0)\n tcom1 = TransportCommunity.objects.create(name='tcom1', description='desct1', departure='dep1', arrival='arr1',\n auto_accept_member=True)\n tcom2 = TransportCommunity.objects.create(name='tcom2', description='desct2', departure='dep2', arrival='arr2')\n tcom3 = TransportCommunity.objects.create(name='tcom3', description='desct3', departure='dep3', arrival='arr3')\n tcom4 = TransportCommunity.objects.create(name='tcom4', description='desct4', departure='dep4', arrival='arr4')\n tcom5 = TransportCommunity.objects.create(name='tcom5', description='desct5', departure='dep4', arrival='arr5')\n\n own_mbr = Member.objects.create(user=owner, community=lcom1, role='0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom2, role='0', status='1')\n\n own_mbr = Member.objects.create(user=owner, community=lcom3, role='0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom3, role='1', status='0')\n spl_mbr = Member.objects.create(user=member, community=lcom3, role='2', status='0')\n\n own_mbr = Member.objects.create(user=owner, community=lcom4, role='0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom4, role='1', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom4, role='2', status='1')\n\n own_mbr = Member.objects.create(user=owner, community=lcom5, role='0', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom5, role='2', status='2')\n\n own_mbr = Member.objects.create(user=owner, community=tcom1, role='0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom2, role='0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom3, role='0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom4, role='0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom5, role='0', status='1')\n\n def test_setup(self):\n self.assertEqual(4, self.user_model.objects.all().count())\n self.assertEqual(10, Community.objects.all().count())\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_wrong_community(self):\n \"\"\"\n Ensure an authenticated user cannot join a community that does not exists\n \"\"\"\n url = '/api/v1/communities/15/join_community/'\n\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)\n\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_community_not_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/1/join_community/'\n\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=1)\n self.assertEqual(community, member.community)\n self.assertEqual(\"2\", member.role)\n self.assertEqual(\"0\", member.status)\n\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[SmarTribe] Nouveau membre')\n self.assertTrue('demande à faire' in mail.outbox[0].body)\n\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n self.assertEqual(16, Member.objects.all().count())\n\n def test_join_community_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/2/join_community/'\n\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=2)\n self.assertEqual(community, member.community)\n self.assertEqual(\"2\", member.role)\n self.assertEqual(\"1\", member.status)\n\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[SmarTribe] Nouveau membre')\n self.assertTrue('fait désormais' in mail.outbox[0].body)\n\n def test_leave_community(self):\n \"\"\"\n Ensure a member can leave a community\n \"\"\"\n url = '/api/v1/communities/3/leave_community/'\n\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)\n\n self.assertEqual(14, Member.objects.all().count())\n\n def test_leave_community_banned(self):\n \"\"\"\n Ensure a banned member cannot leave a community\n \"\"\"\n url = '/api/v1/communities/5/leave_community/'\n\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n self.assertEqual(15, Member.objects.all().count())\n\n def test_list_my_memberships_without_auth(self):\n \"\"\"\n Ensure an unauthenticated user cannot list memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_my_memberships_member(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual(5, data['results'][2]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('2', data['results'][2]['status'])\n self.assertEqual('2', data['results'][0]['role'])\n self.assertEqual('2', data['results'][1]['role'])\n self.assertEqual('2', data['results'][2]['role'])\n\n def test_list_my_memberships_moderator(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(2, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('1', data['results'][0]['role'])\n self.assertEqual('1', data['results'][1]['role'])\n\n def test_list_my_memberships_owner(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(10, data['count'])\n\n def test_list_members_without_auth(self):\n \"\"\"\n Ensure non authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_member_rights(self):\n \"\"\"\n Ensure a non-member authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_mod_rights(self):\n \"\"\"\n Ensure a simple user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights_not_accepted(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n\n # Test before acceptation\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(3, data['count'])\n\n self.assertEqual(6, data['results'][0]['id'])\n self.assertEqual(1, data['results'][0]['user']['id'])\n self.assertEqual('0', data['results'][0]['role'])\n self.assertEqual('1', data['results'][0]['status'])\n\n self.assertEqual(7, data['results'][1]['id'])\n self.assertEqual(2, data['results'][1]['user']['id'])\n self.assertEqual('1', data['results'][1]['role'])\n self.assertEqual('1', data['results'][1]['status'])\n\n self.assertEqual(8, data['results'][2]['id'])\n self.assertEqual(3, data['results'][2]['user']['id'])\n self.assertEqual('2', data['results'][2]['role'])\n self.assertEqual('1', data['results'][2]['status'])\n\n def test_list_members_with_owner_rights(self):\n \"\"\"\n Ensure an owner can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(3, data['count'])\n\n def test_accept_member_without_auth(self):\n \"\"\"\n Ensure a non authenticated user can not accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_simple_member(self):\n \"\"\"\n Ensure a simple member cannot accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_owner(self):\n \"\"\"\n Ensure an owner can accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_accept_member_with_owner_bad_request(self):\n \"\"\"\n Ensure accept_member request data format\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'lol': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n\n def test_accept_member_with_owner_not_found(self):\n \"\"\"\n Ensure member exists\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 19\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n\n def test_accept_member_with_not_accepted_moderator(self):\n \"\"\"\n Ensure an non accepted moderator cannot accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_moderator(self):\n \"\"\"\n Ensure an moderator can accept members\n \"\"\"\n mod = Member.objects.get(id=4)\n mod.status = '1'\n mod.save()\n\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_ban_member_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_non_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_moderator_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 7\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_owner_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 6\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n\n def test_ban_member_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n\n def test_ban_owner_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 6\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_user(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('1', data['role'])\n",
"step-ids": [
23,
29,
33,
36,
38
]
}
|
[
23,
29,
33,
36,
38
] |
<|reserved_special_token_0|>
class Zui:
def __init__(self):
self.pb = Pushbullet(self.api_key())
self.target = self.make_devices()
self.dayone = config.URL_SCHEME
self.clear, self.pause = self.check_platform()
def api_key(self):
if config.API_KEY:
return config.API_KEY
else:
webbrowser.open('https://www.pushbullet.com/account')
API_KEY = input('Copy and Paste Access Token: ')
self.config_setting(API_KEY)
return API_KEY
def config_setting(self, api_key):
with open('config.py', 'r') as rf:
setting = rf.readlines()
setting[0] = 'API_KEY = "{0}"\n'.format(api_key)
with open('config.py', 'w') as wf:
wf.writelines(setting)
wf.flush()
def make_devices(self):
for d in self.pb.devices:
if config.PUSH_TARGET == d.nickname:
return d
else:
new_device = self.pb.new_device(config.PUSH_TARGET)
self.pb.edit_device(new_device, nickname=config.PUSH_TARGET,
model=config.PUSH_TARGET)
self.make_devices()
def clear_notepad(f):
functools.wraps(f)
def wraps(*args):
os.system(args[0].clear)
result = f(*args)
os.system(args[0].clear)
return result
return wraps
@clear_notepad
def push_to_dayone(self):
"""Pushbullet couldn't link then whitespace in URL.
So, it doesn't push_link, just push_note.
Unavilable DayOne URL shceme.
"""
try:
body = self.notepad()
return self.pb.push_note('', body, device=self.target)
except KeyboardInterrupt as e:
return False
<|reserved_special_token_0|>
def check_platform(self):
cp = {'Windows': ('CLS', 'C-z'), 'Darwin': ('clear', 'C-d')}
return cp[platform.system()][0], cp[platform.system()][1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Zui:
def __init__(self):
self.pb = Pushbullet(self.api_key())
self.target = self.make_devices()
self.dayone = config.URL_SCHEME
self.clear, self.pause = self.check_platform()
def api_key(self):
if config.API_KEY:
return config.API_KEY
else:
webbrowser.open('https://www.pushbullet.com/account')
API_KEY = input('Copy and Paste Access Token: ')
self.config_setting(API_KEY)
return API_KEY
def config_setting(self, api_key):
with open('config.py', 'r') as rf:
setting = rf.readlines()
setting[0] = 'API_KEY = "{0}"\n'.format(api_key)
with open('config.py', 'w') as wf:
wf.writelines(setting)
wf.flush()
def make_devices(self):
for d in self.pb.devices:
if config.PUSH_TARGET == d.nickname:
return d
else:
new_device = self.pb.new_device(config.PUSH_TARGET)
self.pb.edit_device(new_device, nickname=config.PUSH_TARGET,
model=config.PUSH_TARGET)
self.make_devices()
def clear_notepad(f):
functools.wraps(f)
def wraps(*args):
os.system(args[0].clear)
result = f(*args)
os.system(args[0].clear)
return result
return wraps
@clear_notepad
def push_to_dayone(self):
"""Pushbullet couldn't link then whitespace in URL.
So, it doesn't push_link, just push_note.
Unavilable DayOne URL shceme.
"""
try:
body = self.notepad()
return self.pb.push_note('', body, device=self.target)
except KeyboardInterrupt as e:
return False
def notepad(self):
try:
print('Push: {}, Close: C-c'.format(self.pause))
lines = [line for line in sys.stdin.readlines()]
return ''.join(lines)
except KeyboardInterrupt as e:
raise e
def check_platform(self):
cp = {'Windows': ('CLS', 'C-z'), 'Darwin': ('clear', 'C-d')}
return cp[platform.system()][0], cp[platform.system()][1]
def main():
z = Zui()
while z.push_to_dayone():
pass
else:
print('Bye.')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Zui:
def __init__(self):
self.pb = Pushbullet(self.api_key())
self.target = self.make_devices()
self.dayone = config.URL_SCHEME
self.clear, self.pause = self.check_platform()
def api_key(self):
if config.API_KEY:
return config.API_KEY
else:
webbrowser.open('https://www.pushbullet.com/account')
API_KEY = input('Copy and Paste Access Token: ')
self.config_setting(API_KEY)
return API_KEY
def config_setting(self, api_key):
with open('config.py', 'r') as rf:
setting = rf.readlines()
setting[0] = 'API_KEY = "{0}"\n'.format(api_key)
with open('config.py', 'w') as wf:
wf.writelines(setting)
wf.flush()
def make_devices(self):
for d in self.pb.devices:
if config.PUSH_TARGET == d.nickname:
return d
else:
new_device = self.pb.new_device(config.PUSH_TARGET)
self.pb.edit_device(new_device, nickname=config.PUSH_TARGET,
model=config.PUSH_TARGET)
self.make_devices()
def clear_notepad(f):
functools.wraps(f)
def wraps(*args):
os.system(args[0].clear)
result = f(*args)
os.system(args[0].clear)
return result
return wraps
@clear_notepad
def push_to_dayone(self):
"""Pushbullet couldn't link then whitespace in URL.
So, it doesn't push_link, just push_note.
Unavilable DayOne URL shceme.
"""
try:
body = self.notepad()
return self.pb.push_note('', body, device=self.target)
except KeyboardInterrupt as e:
return False
def notepad(self):
try:
print('Push: {}, Close: C-c'.format(self.pause))
lines = [line for line in sys.stdin.readlines()]
return ''.join(lines)
except KeyboardInterrupt as e:
raise e
def check_platform(self):
cp = {'Windows': ('CLS', 'C-z'), 'Darwin': ('clear', 'C-d')}
return cp[platform.system()][0], cp[platform.system()][1]
def main():
z = Zui()
while z.push_to_dayone():
pass
else:
print('Bye.')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import functools
import os
import platform
import sys
import webbrowser
import config
from pushbullet import Pushbullet
class Zui:
def __init__(self):
self.pb = Pushbullet(self.api_key())
self.target = self.make_devices()
self.dayone = config.URL_SCHEME
self.clear, self.pause = self.check_platform()
def api_key(self):
if config.API_KEY:
return config.API_KEY
else:
webbrowser.open('https://www.pushbullet.com/account')
API_KEY = input('Copy and Paste Access Token: ')
self.config_setting(API_KEY)
return API_KEY
def config_setting(self, api_key):
with open('config.py', 'r') as rf:
setting = rf.readlines()
setting[0] = 'API_KEY = "{0}"\n'.format(api_key)
with open('config.py', 'w') as wf:
wf.writelines(setting)
wf.flush()
def make_devices(self):
for d in self.pb.devices:
if config.PUSH_TARGET == d.nickname:
return d
else:
new_device = self.pb.new_device(config.PUSH_TARGET)
self.pb.edit_device(new_device, nickname=config.PUSH_TARGET,
model=config.PUSH_TARGET)
self.make_devices()
def clear_notepad(f):
functools.wraps(f)
def wraps(*args):
os.system(args[0].clear)
result = f(*args)
os.system(args[0].clear)
return result
return wraps
@clear_notepad
def push_to_dayone(self):
"""Pushbullet couldn't link then whitespace in URL.
So, it doesn't push_link, just push_note.
Unavilable DayOne URL shceme.
"""
try:
body = self.notepad()
return self.pb.push_note('', body, device=self.target)
except KeyboardInterrupt as e:
return False
def notepad(self):
try:
print('Push: {}, Close: C-c'.format(self.pause))
lines = [line for line in sys.stdin.readlines()]
return ''.join(lines)
except KeyboardInterrupt as e:
raise e
def check_platform(self):
cp = {'Windows': ('CLS', 'C-z'), 'Darwin': ('clear', 'C-d')}
return cp[platform.system()][0], cp[platform.system()][1]
def main():
z = Zui()
while z.push_to_dayone():
pass
else:
print('Bye.')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import functools
import os
import platform
import sys
import webbrowser
import config
from pushbullet import Pushbullet
class Zui:
def __init__(self):
self.pb = Pushbullet(self.api_key())
self.target = self.make_devices()
self.dayone = config.URL_SCHEME
self.clear, self.pause = self.check_platform()
def api_key(self):
if config.API_KEY:
return config.API_KEY
else:
webbrowser.open('https://www.pushbullet.com/account')
API_KEY = input('Copy and Paste Access Token: ')
self.config_setting(API_KEY)
return API_KEY
def config_setting(self, api_key):
with open('config.py', 'r') as rf:
setting = rf.readlines()
setting[0] = 'API_KEY = "{0}"\n'.format(api_key)
with open('config.py', 'w') as wf:
wf.writelines(setting)
wf.flush()
def make_devices(self):
for d in self.pb.devices:
if config.PUSH_TARGET == d.nickname:
return d
else:
new_device = self.pb.new_device(config.PUSH_TARGET)
# model argument was not used, only nickname
self.pb.edit_device(
new_device,
nickname=config.PUSH_TARGET,
model=config.PUSH_TARGET
)
self.make_devices()
def clear_notepad(f):
functools.wraps(f)
def wraps(*args):
os.system(args[0].clear)
result = f(*args)
os.system(args[0].clear)
return result
return wraps
@clear_notepad
def push_to_dayone(self):
'''Pushbullet couldn't link then whitespace in URL.
So, it doesn't push_link, just push_note.
Unavilable DayOne URL shceme.
'''
try:
# body = self.dayone + self.notepad()
body = self.notepad()
return self.pb.push_note('', body, device=self.target)
except KeyboardInterrupt as e:
return False
def notepad(self):
try:
print('Push: {}, Close: C-c'.format(self.pause))
lines = [line for line in sys.stdin.readlines()]
return ''.join(lines)
except KeyboardInterrupt as e:
raise e
def check_platform(self):
cp = {
'Windows': (
'CLS',
'C-z'
),
'Darwin': (
'clear',
'C-d'
),
}
return cp[platform.system()][0], cp[platform.system()][1]
def main():
z = Zui()
while z.push_to_dayone():
pass
else:
print('Bye.')
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "66cc9ca3d8cbe9690da841e43cef217f3518122c",
"index": 7939,
"step-1": "<mask token>\n\n\nclass Zui:\n\n def __init__(self):\n self.pb = Pushbullet(self.api_key())\n self.target = self.make_devices()\n self.dayone = config.URL_SCHEME\n self.clear, self.pause = self.check_platform()\n\n def api_key(self):\n if config.API_KEY:\n return config.API_KEY\n else:\n webbrowser.open('https://www.pushbullet.com/account')\n API_KEY = input('Copy and Paste Access Token: ')\n self.config_setting(API_KEY)\n return API_KEY\n\n def config_setting(self, api_key):\n with open('config.py', 'r') as rf:\n setting = rf.readlines()\n setting[0] = 'API_KEY = \"{0}\"\\n'.format(api_key)\n with open('config.py', 'w') as wf:\n wf.writelines(setting)\n wf.flush()\n\n def make_devices(self):\n for d in self.pb.devices:\n if config.PUSH_TARGET == d.nickname:\n return d\n else:\n new_device = self.pb.new_device(config.PUSH_TARGET)\n self.pb.edit_device(new_device, nickname=config.PUSH_TARGET,\n model=config.PUSH_TARGET)\n self.make_devices()\n\n def clear_notepad(f):\n functools.wraps(f)\n\n def wraps(*args):\n os.system(args[0].clear)\n result = f(*args)\n os.system(args[0].clear)\n return result\n return wraps\n\n @clear_notepad\n def push_to_dayone(self):\n \"\"\"Pushbullet couldn't link then whitespace in URL.\n So, it doesn't push_link, just push_note.\n Unavilable DayOne URL shceme.\n \"\"\"\n try:\n body = self.notepad()\n return self.pb.push_note('', body, device=self.target)\n except KeyboardInterrupt as e:\n return False\n <mask token>\n\n def check_platform(self):\n cp = {'Windows': ('CLS', 'C-z'), 'Darwin': ('clear', 'C-d')}\n return cp[platform.system()][0], cp[platform.system()][1]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Zui:\n\n def __init__(self):\n self.pb = Pushbullet(self.api_key())\n self.target = self.make_devices()\n self.dayone = config.URL_SCHEME\n self.clear, self.pause = self.check_platform()\n\n def api_key(self):\n if config.API_KEY:\n return config.API_KEY\n else:\n webbrowser.open('https://www.pushbullet.com/account')\n API_KEY = input('Copy and Paste Access Token: ')\n self.config_setting(API_KEY)\n return API_KEY\n\n def config_setting(self, api_key):\n with open('config.py', 'r') as rf:\n setting = rf.readlines()\n setting[0] = 'API_KEY = \"{0}\"\\n'.format(api_key)\n with open('config.py', 'w') as wf:\n wf.writelines(setting)\n wf.flush()\n\n def make_devices(self):\n for d in self.pb.devices:\n if config.PUSH_TARGET == d.nickname:\n return d\n else:\n new_device = self.pb.new_device(config.PUSH_TARGET)\n self.pb.edit_device(new_device, nickname=config.PUSH_TARGET,\n model=config.PUSH_TARGET)\n self.make_devices()\n\n def clear_notepad(f):\n functools.wraps(f)\n\n def wraps(*args):\n os.system(args[0].clear)\n result = f(*args)\n os.system(args[0].clear)\n return result\n return wraps\n\n @clear_notepad\n def push_to_dayone(self):\n \"\"\"Pushbullet couldn't link then whitespace in URL.\n So, it doesn't push_link, just push_note.\n Unavilable DayOne URL shceme.\n \"\"\"\n try:\n body = self.notepad()\n return self.pb.push_note('', body, device=self.target)\n except KeyboardInterrupt as e:\n return False\n\n def notepad(self):\n try:\n print('Push: {}, Close: C-c'.format(self.pause))\n lines = [line for line in sys.stdin.readlines()]\n return ''.join(lines)\n except KeyboardInterrupt as e:\n raise e\n\n def check_platform(self):\n cp = {'Windows': ('CLS', 'C-z'), 'Darwin': ('clear', 'C-d')}\n return cp[platform.system()][0], cp[platform.system()][1]\n\n\ndef main():\n z = Zui()\n while z.push_to_dayone():\n pass\n else:\n print('Bye.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Zui:\n\n def __init__(self):\n self.pb = Pushbullet(self.api_key())\n self.target = self.make_devices()\n self.dayone = config.URL_SCHEME\n self.clear, self.pause = self.check_platform()\n\n def api_key(self):\n if config.API_KEY:\n return config.API_KEY\n else:\n webbrowser.open('https://www.pushbullet.com/account')\n API_KEY = input('Copy and Paste Access Token: ')\n self.config_setting(API_KEY)\n return API_KEY\n\n def config_setting(self, api_key):\n with open('config.py', 'r') as rf:\n setting = rf.readlines()\n setting[0] = 'API_KEY = \"{0}\"\\n'.format(api_key)\n with open('config.py', 'w') as wf:\n wf.writelines(setting)\n wf.flush()\n\n def make_devices(self):\n for d in self.pb.devices:\n if config.PUSH_TARGET == d.nickname:\n return d\n else:\n new_device = self.pb.new_device(config.PUSH_TARGET)\n self.pb.edit_device(new_device, nickname=config.PUSH_TARGET,\n model=config.PUSH_TARGET)\n self.make_devices()\n\n def clear_notepad(f):\n functools.wraps(f)\n\n def wraps(*args):\n os.system(args[0].clear)\n result = f(*args)\n os.system(args[0].clear)\n return result\n return wraps\n\n @clear_notepad\n def push_to_dayone(self):\n \"\"\"Pushbullet couldn't link then whitespace in URL.\n So, it doesn't push_link, just push_note.\n Unavilable DayOne URL shceme.\n \"\"\"\n try:\n body = self.notepad()\n return self.pb.push_note('', body, device=self.target)\n except KeyboardInterrupt as e:\n return False\n\n def notepad(self):\n try:\n print('Push: {}, Close: C-c'.format(self.pause))\n lines = [line for line in sys.stdin.readlines()]\n return ''.join(lines)\n except KeyboardInterrupt as e:\n raise e\n\n def check_platform(self):\n cp = {'Windows': ('CLS', 'C-z'), 'Darwin': ('clear', 'C-d')}\n return cp[platform.system()][0], cp[platform.system()][1]\n\n\ndef main():\n z = Zui()\n while z.push_to_dayone():\n pass\n else:\n print('Bye.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import functools\nimport os\nimport platform\nimport sys\nimport webbrowser\nimport config\nfrom pushbullet import Pushbullet\n\n\nclass Zui:\n\n def __init__(self):\n self.pb = Pushbullet(self.api_key())\n self.target = self.make_devices()\n self.dayone = config.URL_SCHEME\n self.clear, self.pause = self.check_platform()\n\n def api_key(self):\n if config.API_KEY:\n return config.API_KEY\n else:\n webbrowser.open('https://www.pushbullet.com/account')\n API_KEY = input('Copy and Paste Access Token: ')\n self.config_setting(API_KEY)\n return API_KEY\n\n def config_setting(self, api_key):\n with open('config.py', 'r') as rf:\n setting = rf.readlines()\n setting[0] = 'API_KEY = \"{0}\"\\n'.format(api_key)\n with open('config.py', 'w') as wf:\n wf.writelines(setting)\n wf.flush()\n\n def make_devices(self):\n for d in self.pb.devices:\n if config.PUSH_TARGET == d.nickname:\n return d\n else:\n new_device = self.pb.new_device(config.PUSH_TARGET)\n self.pb.edit_device(new_device, nickname=config.PUSH_TARGET,\n model=config.PUSH_TARGET)\n self.make_devices()\n\n def clear_notepad(f):\n functools.wraps(f)\n\n def wraps(*args):\n os.system(args[0].clear)\n result = f(*args)\n os.system(args[0].clear)\n return result\n return wraps\n\n @clear_notepad\n def push_to_dayone(self):\n \"\"\"Pushbullet couldn't link then whitespace in URL.\n So, it doesn't push_link, just push_note.\n Unavilable DayOne URL shceme.\n \"\"\"\n try:\n body = self.notepad()\n return self.pb.push_note('', body, device=self.target)\n except KeyboardInterrupt as e:\n return False\n\n def notepad(self):\n try:\n print('Push: {}, Close: C-c'.format(self.pause))\n lines = [line for line in sys.stdin.readlines()]\n return ''.join(lines)\n except KeyboardInterrupt as e:\n raise e\n\n def check_platform(self):\n cp = {'Windows': ('CLS', 'C-z'), 'Darwin': ('clear', 'C-d')}\n return cp[platform.system()][0], cp[platform.system()][1]\n\n\ndef main():\n z = Zui()\n while z.push_to_dayone():\n pass\n else:\n print('Bye.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport functools\nimport os\nimport platform\nimport sys\nimport webbrowser\n\nimport config\nfrom pushbullet import Pushbullet\n\n\nclass Zui:\n\n def __init__(self):\n self.pb = Pushbullet(self.api_key())\n self.target = self.make_devices()\n self.dayone = config.URL_SCHEME\n self.clear, self.pause = self.check_platform()\n\n def api_key(self):\n if config.API_KEY:\n return config.API_KEY\n else:\n webbrowser.open('https://www.pushbullet.com/account')\n API_KEY = input('Copy and Paste Access Token: ')\n self.config_setting(API_KEY)\n return API_KEY\n\n def config_setting(self, api_key):\n with open('config.py', 'r') as rf:\n setting = rf.readlines()\n setting[0] = 'API_KEY = \"{0}\"\\n'.format(api_key)\n with open('config.py', 'w') as wf:\n wf.writelines(setting)\n wf.flush()\n\n def make_devices(self):\n for d in self.pb.devices:\n if config.PUSH_TARGET == d.nickname:\n return d\n else:\n new_device = self.pb.new_device(config.PUSH_TARGET)\n # model argument was not used, only nickname\n self.pb.edit_device(\n new_device,\n nickname=config.PUSH_TARGET,\n model=config.PUSH_TARGET\n )\n self.make_devices()\n\n def clear_notepad(f):\n functools.wraps(f)\n def wraps(*args):\n os.system(args[0].clear)\n result = f(*args)\n os.system(args[0].clear)\n return result\n return wraps\n\n @clear_notepad\n def push_to_dayone(self):\n '''Pushbullet couldn't link then whitespace in URL.\n So, it doesn't push_link, just push_note.\n Unavilable DayOne URL shceme.\n '''\n try:\n # body = self.dayone + self.notepad()\n body = self.notepad()\n return self.pb.push_note('', body, device=self.target)\n except KeyboardInterrupt as e:\n return False\n\n def notepad(self):\n try:\n print('Push: {}, Close: C-c'.format(self.pause))\n lines = [line for line in sys.stdin.readlines()]\n return ''.join(lines)\n except KeyboardInterrupt as e:\n raise e\n\n def check_platform(self):\n cp = {\n 'Windows': (\n 'CLS',\n 'C-z'\n ),\n 'Darwin': (\n 'clear',\n 'C-d'\n ),\n }\n return cp[platform.system()][0], cp[platform.system()][1]\n\n\ndef main():\n z = Zui()\n while z.push_to_dayone():\n pass\n else:\n print('Bye.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
<|reserved_special_token_0|>
class Audio:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Audio:
def __init__(self):
self.sox_process = None
def kill_sox(self, timeout=1):
if self.sox_process is not None:
self.sox_process.terminate()
try:
self.sox_process.wait(timeout=timeout)
except subprocess.TimeoutExpired:
self.sox_process.kill()
self.sox_process.wait(timeout=timeout)
self.sox_process = None
def run_sox(self, scale, preset, buffer=20):
"""
Builds and returns a sox command from a preset object
"""
buffer = 17
multiplier = 100
command_effects = []
command_effects += ['pitch', str(scale * multiplier)]
if preset.volume_boost != None:
command_effects += ['vol', str(preset.volume_boost) + 'dB']
else:
command_effects += ['vol', '0']
if preset.downsample_amount != None:
command_effects += ['downsample', str(preset.downsample_amount)]
else:
command_effects += ['downsample', '1']
command = ['sox', '--buffer', str(buffer), '-q', '-t', 'pulseaudio',
'default', '-t', 'pulseaudio', 'Lyrebird-Output'] + command_effects
self.sox_process = subprocess.Popen(command)
<|reserved_special_token_0|>
def load_pa_modules(self):
self.null_sink = subprocess.check_call(
'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description="Lyrebird Output"'
.split(' '))
self.remap_sink = subprocess.check_call(
'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description="Lyrebird Virtual Input"'
.split(' '))
def get_pactl_modules(self):
"""
Parses `pactl info short` into tuples containing the module ID,
the module type and the attributes of the module. It is designed
only for named modules and as such junk data may be included in
the returned list.
Returns an array of tuples that take the form:
(module_id (str), module_type (str), attributes (attribute tuples))
The attribute tuples:
(key (str), value (str))
An example output might look like:
[
( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),
( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )
]
"""
pactl_list = subprocess.run(['pactl', 'list', 'short'],
capture_output=True, encoding='utf8')
lines = pactl_list.stdout
data = []
split_lines = lines.split('\n')
for line in split_lines:
info = line.split('\t')
if len(info) <= 2:
continue
if info[2] and len(info[2]) > 0:
key_values = list(map(lambda key_value: tuple(key_value.
split('=')), info[2].split(' ')))
data.append((info[0], info[1], key_values))
else:
data.append((info[0], info[1], []))
return data
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Audio:
def __init__(self):
self.sox_process = None
def kill_sox(self, timeout=1):
if self.sox_process is not None:
self.sox_process.terminate()
try:
self.sox_process.wait(timeout=timeout)
except subprocess.TimeoutExpired:
self.sox_process.kill()
self.sox_process.wait(timeout=timeout)
self.sox_process = None
def run_sox(self, scale, preset, buffer=20):
"""
Builds and returns a sox command from a preset object
"""
buffer = 17
multiplier = 100
command_effects = []
command_effects += ['pitch', str(scale * multiplier)]
if preset.volume_boost != None:
command_effects += ['vol', str(preset.volume_boost) + 'dB']
else:
command_effects += ['vol', '0']
if preset.downsample_amount != None:
command_effects += ['downsample', str(preset.downsample_amount)]
else:
command_effects += ['downsample', '1']
command = ['sox', '--buffer', str(buffer), '-q', '-t', 'pulseaudio',
'default', '-t', 'pulseaudio', 'Lyrebird-Output'] + command_effects
self.sox_process = subprocess.Popen(command)
def get_sink_name(self, tuple):
if tuple[0] == 'sink_name':
return tuple[1]
elif tuple[0] == 'source_name':
return tuple[1]
else:
return None
def load_pa_modules(self):
self.null_sink = subprocess.check_call(
'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description="Lyrebird Output"'
.split(' '))
self.remap_sink = subprocess.check_call(
'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description="Lyrebird Virtual Input"'
.split(' '))
def get_pactl_modules(self):
"""
Parses `pactl info short` into tuples containing the module ID,
the module type and the attributes of the module. It is designed
only for named modules and as such junk data may be included in
the returned list.
Returns an array of tuples that take the form:
(module_id (str), module_type (str), attributes (attribute tuples))
The attribute tuples:
(key (str), value (str))
An example output might look like:
[
( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),
( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )
]
"""
pactl_list = subprocess.run(['pactl', 'list', 'short'],
capture_output=True, encoding='utf8')
lines = pactl_list.stdout
data = []
split_lines = lines.split('\n')
for line in split_lines:
info = line.split('\t')
if len(info) <= 2:
continue
if info[2] and len(info[2]) > 0:
key_values = list(map(lambda key_value: tuple(key_value.
split('=')), info[2].split(' ')))
data.append((info[0], info[1], key_values))
else:
data.append((info[0], info[1], []))
return data
def unload_pa_modules(self):
"""
Unloads all Lyrebird null sinks.
"""
modules = self.get_pactl_modules()
lyrebird_module_ids = []
for module in modules:
if len(module) < 3:
continue
if len(module[2]) < 1:
continue
if module[1] == 'module-null-sink':
sink_name = self.get_sink_name(module[2][0])
if sink_name == 'Lyrebird-Output':
lyrebird_module_ids.append(module[0])
elif module[1] == 'module-remap-source':
sink_name = self.get_sink_name(module[2][0])
if sink_name == 'Lyrebird-Input':
lyrebird_module_ids.append(module[0])
for id in lyrebird_module_ids:
subprocess.run(['pactl', 'unload-module', str(id)])
<|reserved_special_token_1|>
import subprocess
class Audio:
def __init__(self):
self.sox_process = None
def kill_sox(self, timeout=1):
if self.sox_process is not None:
self.sox_process.terminate()
try:
self.sox_process.wait(timeout=timeout)
except subprocess.TimeoutExpired:
self.sox_process.kill()
self.sox_process.wait(timeout=timeout)
self.sox_process = None
def run_sox(self, scale, preset, buffer=20):
"""
Builds and returns a sox command from a preset object
"""
buffer = 17
multiplier = 100
command_effects = []
command_effects += ['pitch', str(scale * multiplier)]
if preset.volume_boost != None:
command_effects += ['vol', str(preset.volume_boost) + 'dB']
else:
command_effects += ['vol', '0']
if preset.downsample_amount != None:
command_effects += ['downsample', str(preset.downsample_amount)]
else:
command_effects += ['downsample', '1']
command = ['sox', '--buffer', str(buffer), '-q', '-t', 'pulseaudio',
'default', '-t', 'pulseaudio', 'Lyrebird-Output'] + command_effects
self.sox_process = subprocess.Popen(command)
def get_sink_name(self, tuple):
if tuple[0] == 'sink_name':
return tuple[1]
elif tuple[0] == 'source_name':
return tuple[1]
else:
return None
def load_pa_modules(self):
self.null_sink = subprocess.check_call(
'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description="Lyrebird Output"'
.split(' '))
self.remap_sink = subprocess.check_call(
'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description="Lyrebird Virtual Input"'
.split(' '))
def get_pactl_modules(self):
"""
Parses `pactl info short` into tuples containing the module ID,
the module type and the attributes of the module. It is designed
only for named modules and as such junk data may be included in
the returned list.
Returns an array of tuples that take the form:
(module_id (str), module_type (str), attributes (attribute tuples))
The attribute tuples:
(key (str), value (str))
An example output might look like:
[
( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),
( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )
]
"""
pactl_list = subprocess.run(['pactl', 'list', 'short'],
capture_output=True, encoding='utf8')
lines = pactl_list.stdout
data = []
split_lines = lines.split('\n')
for line in split_lines:
info = line.split('\t')
if len(info) <= 2:
continue
if info[2] and len(info[2]) > 0:
key_values = list(map(lambda key_value: tuple(key_value.
split('=')), info[2].split(' ')))
data.append((info[0], info[1], key_values))
else:
data.append((info[0], info[1], []))
return data
def unload_pa_modules(self):
"""
Unloads all Lyrebird null sinks.
"""
modules = self.get_pactl_modules()
lyrebird_module_ids = []
for module in modules:
if len(module) < 3:
continue
if len(module[2]) < 1:
continue
if module[1] == 'module-null-sink':
sink_name = self.get_sink_name(module[2][0])
if sink_name == 'Lyrebird-Output':
lyrebird_module_ids.append(module[0])
elif module[1] == 'module-remap-source':
sink_name = self.get_sink_name(module[2][0])
if sink_name == 'Lyrebird-Input':
lyrebird_module_ids.append(module[0])
for id in lyrebird_module_ids:
subprocess.run(['pactl', 'unload-module', str(id)])
<|reserved_special_token_1|>
import subprocess
class Audio:
def __init__(self):
self.sox_process = None
def kill_sox(self, timeout=1):
if self.sox_process is not None:
self.sox_process.terminate()
try:
self.sox_process.wait(timeout=timeout)
except subprocess.TimeoutExpired:
self.sox_process.kill()
self.sox_process.wait(timeout=timeout)
self.sox_process = None
# trying a lower buffer size
def run_sox(self, scale, preset, buffer=20):
'''
Builds and returns a sox command from a preset object
'''
buffer = 17
multiplier = 100
command_effects = []
command_effects += ["pitch", str(scale * multiplier)]
# Volume boosting
if preset.volume_boost != None:
command_effects += ["vol", str(preset.volume_boost) + "dB"]
else:
# Fix a bug where SoX uses last given volumne
command_effects += ["vol", "0"]
# Downsampling
if preset.downsample_amount != None:
command_effects += ["downsample", str(preset.downsample_amount)]
else:
# Append downsample of 1 to fix a bug where the downsample isn't being reverted
# when we disable the effect with it on.
command_effects += ["downsample", "1"]
command = ["sox", "--buffer", str(buffer), "-q", "-t", "pulseaudio", "default", "-t", "pulseaudio", "Lyrebird-Output"] + command_effects
self.sox_process = subprocess.Popen(command)
def get_sink_name(self, tuple):
if tuple[0] == "sink_name":
return tuple[1]
elif tuple[0] == "source_name":
return tuple[1]
else:
return None
def load_pa_modules(self):
self.null_sink = subprocess.check_call(
'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description="Lyrebird Output"'.split(' ')
)
self.remap_sink = subprocess.check_call(
'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description="Lyrebird Virtual Input"'\
.split(' ')
)
def get_pactl_modules(self):
'''
Parses `pactl info short` into tuples containing the module ID,
the module type and the attributes of the module. It is designed
only for named modules and as such junk data may be included in
the returned list.
Returns an array of tuples that take the form:
(module_id (str), module_type (str), attributes (attribute tuples))
The attribute tuples:
(key (str), value (str))
An example output might look like:
[
( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),
( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )
]
'''
pactl_list = subprocess.run(["pactl", "list", "short"], capture_output=True, encoding="utf8")
lines = pactl_list.stdout
data = []
split_lines = lines.split("\n")
for line in split_lines:
info = line.split("\t")
if len(info) <= 2:
continue
if info[2] and len(info[2]) > 0:
key_values = list(map(lambda key_value: tuple(key_value.split("=")), info[2].split(" ")))
data.append((info[0], info[1], key_values))
else:
data.append((info[0], info[1], []))
return data
def unload_pa_modules(self):
'''
Unloads all Lyrebird null sinks.
'''
modules = self.get_pactl_modules()
lyrebird_module_ids = []
for module in modules:
if len(module) < 3:
continue;
if len(module[2]) < 1:
continue;
if module[1] == "module-null-sink":
sink_name = self.get_sink_name(module[2][0])
if sink_name == "Lyrebird-Output":
lyrebird_module_ids.append(module[0])
elif module[1] == "module-remap-source":
sink_name = self.get_sink_name(module[2][0])
if sink_name == "Lyrebird-Input":
lyrebird_module_ids.append(module[0])
for id in lyrebird_module_ids:
subprocess.run(["pactl", "unload-module", str(id)])
|
flexible
|
{
"blob_id": "d35d26cc50da9a3267edd2da706a4b6e653d22ac",
"index": 6555,
"step-1": "<mask token>\n\n\nclass Audio:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Audio:\n\n def __init__(self):\n self.sox_process = None\n\n def kill_sox(self, timeout=1):\n if self.sox_process is not None:\n self.sox_process.terminate()\n try:\n self.sox_process.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n self.sox_process.kill()\n self.sox_process.wait(timeout=timeout)\n self.sox_process = None\n\n def run_sox(self, scale, preset, buffer=20):\n \"\"\"\n Builds and returns a sox command from a preset object\n \"\"\"\n buffer = 17\n multiplier = 100\n command_effects = []\n command_effects += ['pitch', str(scale * multiplier)]\n if preset.volume_boost != None:\n command_effects += ['vol', str(preset.volume_boost) + 'dB']\n else:\n command_effects += ['vol', '0']\n if preset.downsample_amount != None:\n command_effects += ['downsample', str(preset.downsample_amount)]\n else:\n command_effects += ['downsample', '1']\n command = ['sox', '--buffer', str(buffer), '-q', '-t', 'pulseaudio',\n 'default', '-t', 'pulseaudio', 'Lyrebird-Output'] + command_effects\n self.sox_process = subprocess.Popen(command)\n <mask token>\n\n def load_pa_modules(self):\n self.null_sink = subprocess.check_call(\n 'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description=\"Lyrebird Output\"'\n .split(' '))\n self.remap_sink = subprocess.check_call(\n 'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description=\"Lyrebird Virtual Input\"'\n .split(' '))\n\n def get_pactl_modules(self):\n \"\"\"\n Parses `pactl info short` into tuples containing the module ID,\n the module type and the attributes of the module. It is designed\n only for named modules and as such junk data may be included in\n the returned list.\n \n Returns an array of tuples that take the form:\n (module_id (str), module_type (str), attributes (attribute tuples))\n \n The attribute tuples:\n (key (str), value (str))\n \n An example output might look like:\n [\n ( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),\n ( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )\n ]\n \"\"\"\n pactl_list = subprocess.run(['pactl', 'list', 'short'],\n capture_output=True, encoding='utf8')\n lines = pactl_list.stdout\n data = []\n split_lines = lines.split('\\n')\n for line in split_lines:\n info = line.split('\\t')\n if len(info) <= 2:\n continue\n if info[2] and len(info[2]) > 0:\n key_values = list(map(lambda key_value: tuple(key_value.\n split('=')), info[2].split(' ')))\n data.append((info[0], info[1], key_values))\n else:\n data.append((info[0], info[1], []))\n return data\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Audio:\n\n def __init__(self):\n self.sox_process = None\n\n def kill_sox(self, timeout=1):\n if self.sox_process is not None:\n self.sox_process.terminate()\n try:\n self.sox_process.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n self.sox_process.kill()\n self.sox_process.wait(timeout=timeout)\n self.sox_process = None\n\n def run_sox(self, scale, preset, buffer=20):\n \"\"\"\n Builds and returns a sox command from a preset object\n \"\"\"\n buffer = 17\n multiplier = 100\n command_effects = []\n command_effects += ['pitch', str(scale * multiplier)]\n if preset.volume_boost != None:\n command_effects += ['vol', str(preset.volume_boost) + 'dB']\n else:\n command_effects += ['vol', '0']\n if preset.downsample_amount != None:\n command_effects += ['downsample', str(preset.downsample_amount)]\n else:\n command_effects += ['downsample', '1']\n command = ['sox', '--buffer', str(buffer), '-q', '-t', 'pulseaudio',\n 'default', '-t', 'pulseaudio', 'Lyrebird-Output'] + command_effects\n self.sox_process = subprocess.Popen(command)\n\n def get_sink_name(self, tuple):\n if tuple[0] == 'sink_name':\n return tuple[1]\n elif tuple[0] == 'source_name':\n return tuple[1]\n else:\n return None\n\n def load_pa_modules(self):\n self.null_sink = subprocess.check_call(\n 'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description=\"Lyrebird Output\"'\n .split(' '))\n self.remap_sink = subprocess.check_call(\n 'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description=\"Lyrebird Virtual Input\"'\n .split(' '))\n\n def get_pactl_modules(self):\n \"\"\"\n Parses `pactl info short` into tuples containing the module ID,\n the module type and the attributes of the module. It is designed\n only for named modules and as such junk data may be included in\n the returned list.\n \n Returns an array of tuples that take the form:\n (module_id (str), module_type (str), attributes (attribute tuples))\n \n The attribute tuples:\n (key (str), value (str))\n \n An example output might look like:\n [\n ( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),\n ( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )\n ]\n \"\"\"\n pactl_list = subprocess.run(['pactl', 'list', 'short'],\n capture_output=True, encoding='utf8')\n lines = pactl_list.stdout\n data = []\n split_lines = lines.split('\\n')\n for line in split_lines:\n info = line.split('\\t')\n if len(info) <= 2:\n continue\n if info[2] and len(info[2]) > 0:\n key_values = list(map(lambda key_value: tuple(key_value.\n split('=')), info[2].split(' ')))\n data.append((info[0], info[1], key_values))\n else:\n data.append((info[0], info[1], []))\n return data\n\n def unload_pa_modules(self):\n \"\"\"\n Unloads all Lyrebird null sinks.\n \"\"\"\n modules = self.get_pactl_modules()\n lyrebird_module_ids = []\n for module in modules:\n if len(module) < 3:\n continue\n if len(module[2]) < 1:\n continue\n if module[1] == 'module-null-sink':\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == 'Lyrebird-Output':\n lyrebird_module_ids.append(module[0])\n elif module[1] == 'module-remap-source':\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == 'Lyrebird-Input':\n lyrebird_module_ids.append(module[0])\n for id in lyrebird_module_ids:\n subprocess.run(['pactl', 'unload-module', str(id)])\n",
"step-4": "import subprocess\n\n\nclass Audio:\n\n def __init__(self):\n self.sox_process = None\n\n def kill_sox(self, timeout=1):\n if self.sox_process is not None:\n self.sox_process.terminate()\n try:\n self.sox_process.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n self.sox_process.kill()\n self.sox_process.wait(timeout=timeout)\n self.sox_process = None\n\n def run_sox(self, scale, preset, buffer=20):\n \"\"\"\n Builds and returns a sox command from a preset object\n \"\"\"\n buffer = 17\n multiplier = 100\n command_effects = []\n command_effects += ['pitch', str(scale * multiplier)]\n if preset.volume_boost != None:\n command_effects += ['vol', str(preset.volume_boost) + 'dB']\n else:\n command_effects += ['vol', '0']\n if preset.downsample_amount != None:\n command_effects += ['downsample', str(preset.downsample_amount)]\n else:\n command_effects += ['downsample', '1']\n command = ['sox', '--buffer', str(buffer), '-q', '-t', 'pulseaudio',\n 'default', '-t', 'pulseaudio', 'Lyrebird-Output'] + command_effects\n self.sox_process = subprocess.Popen(command)\n\n def get_sink_name(self, tuple):\n if tuple[0] == 'sink_name':\n return tuple[1]\n elif tuple[0] == 'source_name':\n return tuple[1]\n else:\n return None\n\n def load_pa_modules(self):\n self.null_sink = subprocess.check_call(\n 'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description=\"Lyrebird Output\"'\n .split(' '))\n self.remap_sink = subprocess.check_call(\n 'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description=\"Lyrebird Virtual Input\"'\n .split(' '))\n\n def get_pactl_modules(self):\n \"\"\"\n Parses `pactl info short` into tuples containing the module ID,\n the module type and the attributes of the module. It is designed\n only for named modules and as such junk data may be included in\n the returned list.\n \n Returns an array of tuples that take the form:\n (module_id (str), module_type (str), attributes (attribute tuples))\n \n The attribute tuples:\n (key (str), value (str))\n \n An example output might look like:\n [\n ( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),\n ( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )\n ]\n \"\"\"\n pactl_list = subprocess.run(['pactl', 'list', 'short'],\n capture_output=True, encoding='utf8')\n lines = pactl_list.stdout\n data = []\n split_lines = lines.split('\\n')\n for line in split_lines:\n info = line.split('\\t')\n if len(info) <= 2:\n continue\n if info[2] and len(info[2]) > 0:\n key_values = list(map(lambda key_value: tuple(key_value.\n split('=')), info[2].split(' ')))\n data.append((info[0], info[1], key_values))\n else:\n data.append((info[0], info[1], []))\n return data\n\n def unload_pa_modules(self):\n \"\"\"\n Unloads all Lyrebird null sinks.\n \"\"\"\n modules = self.get_pactl_modules()\n lyrebird_module_ids = []\n for module in modules:\n if len(module) < 3:\n continue\n if len(module[2]) < 1:\n continue\n if module[1] == 'module-null-sink':\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == 'Lyrebird-Output':\n lyrebird_module_ids.append(module[0])\n elif module[1] == 'module-remap-source':\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == 'Lyrebird-Input':\n lyrebird_module_ids.append(module[0])\n for id in lyrebird_module_ids:\n subprocess.run(['pactl', 'unload-module', str(id)])\n",
"step-5": "import subprocess\n\nclass Audio:\n def __init__(self):\n self.sox_process = None\n\n def kill_sox(self, timeout=1):\n if self.sox_process is not None:\n self.sox_process.terminate()\n try:\n self.sox_process.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n self.sox_process.kill()\n self.sox_process.wait(timeout=timeout)\n self.sox_process = None\n\n # trying a lower buffer size\n def run_sox(self, scale, preset, buffer=20):\n '''\n Builds and returns a sox command from a preset object\n '''\n buffer = 17\n multiplier = 100\n command_effects = []\n\n command_effects += [\"pitch\", str(scale * multiplier)]\n\n # Volume boosting\n if preset.volume_boost != None:\n command_effects += [\"vol\", str(preset.volume_boost) + \"dB\"]\n else:\n # Fix a bug where SoX uses last given volumne\n command_effects += [\"vol\", \"0\"]\n\n # Downsampling\n if preset.downsample_amount != None:\n command_effects += [\"downsample\", str(preset.downsample_amount)]\n else:\n # Append downsample of 1 to fix a bug where the downsample isn't being reverted\n # when we disable the effect with it on.\n command_effects += [\"downsample\", \"1\"]\n\n command = [\"sox\", \"--buffer\", str(buffer), \"-q\", \"-t\", \"pulseaudio\", \"default\", \"-t\", \"pulseaudio\", \"Lyrebird-Output\"] + command_effects\n self.sox_process = subprocess.Popen(command)\n\n def get_sink_name(self, tuple):\n if tuple[0] == \"sink_name\":\n return tuple[1]\n elif tuple[0] == \"source_name\":\n return tuple[1]\n else:\n return None\n\n def load_pa_modules(self):\n self.null_sink = subprocess.check_call(\n 'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description=\"Lyrebird Output\"'.split(' ')\n )\n self.remap_sink = subprocess.check_call(\n 'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description=\"Lyrebird Virtual Input\"'\\\n .split(' ')\n )\n\n def get_pactl_modules(self):\n '''\n Parses `pactl info short` into tuples containing the module ID,\n the module type and the attributes of the module. It is designed\n only for named modules and as such junk data may be included in\n the returned list.\n \n Returns an array of tuples that take the form:\n (module_id (str), module_type (str), attributes (attribute tuples))\n \n The attribute tuples:\n (key (str), value (str))\n \n An example output might look like:\n [\n ( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),\n ( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )\n ]\n '''\n pactl_list = subprocess.run([\"pactl\", \"list\", \"short\"], capture_output=True, encoding=\"utf8\")\n lines = pactl_list.stdout\n data = []\n split_lines = lines.split(\"\\n\")\n for line in split_lines:\n info = line.split(\"\\t\")\n if len(info) <= 2:\n continue\n \n if info[2] and len(info[2]) > 0:\n key_values = list(map(lambda key_value: tuple(key_value.split(\"=\")), info[2].split(\" \")))\n data.append((info[0], info[1], key_values))\n else:\n data.append((info[0], info[1], []))\n return data\n\n def unload_pa_modules(self):\n '''\n Unloads all Lyrebird null sinks.\n '''\n modules = self.get_pactl_modules()\n lyrebird_module_ids = []\n for module in modules:\n if len(module) < 3:\n continue;\n if len(module[2]) < 1:\n continue;\n\n if module[1] == \"module-null-sink\":\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == \"Lyrebird-Output\":\n lyrebird_module_ids.append(module[0])\n elif module[1] == \"module-remap-source\":\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == \"Lyrebird-Input\":\n lyrebird_module_ids.append(module[0])\n\n for id in lyrebird_module_ids:\n subprocess.run([\"pactl\", \"unload-module\", str(id)])\n",
"step-ids": [
1,
6,
8,
9,
10
]
}
|
[
1,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
class popen:
<|reserved_special_token_0|>
def __init__(self, command):
self._command = command
self._process = None
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class popen:
<|reserved_special_token_0|>
def __init__(self, command):
self._command = command
self._process = None
def __enter__(self):
self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,
close_fds=True, preexec_fn=os.setsid)
return self._process
def __exit__(self, type, value, traceback):
if self._process.poll() is None:
os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class popen:
"""Runs subprocess.Popen and returns the process object.
This is meant to be used as a context manager. For example:
with popen(['echo', 'hello']) as p:
# Use p here
This object ensures that any child processes spawned by the command
are killed by forcing the subprocess to use a process group. This
prevents e.g. the emulator from sticking around as a zombie process
after the test is complete.
Args:
command -- The list of command line arguments.
"""
def __init__(self, command):
self._command = command
self._process = None
def __enter__(self):
self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,
close_fds=True, preexec_fn=os.setsid)
return self._process
def __exit__(self, type, value, traceback):
if self._process.poll() is None:
os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import signal
import sys
import subprocess
from subprocess import Popen, PIPE
class popen:
"""Runs subprocess.Popen and returns the process object.
This is meant to be used as a context manager. For example:
with popen(['echo', 'hello']) as p:
# Use p here
This object ensures that any child processes spawned by the command
are killed by forcing the subprocess to use a process group. This
prevents e.g. the emulator from sticking around as a zombie process
after the test is complete.
Args:
command -- The list of command line arguments.
"""
def __init__(self, command):
self._command = command
self._process = None
def __enter__(self):
self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,
close_fds=True, preexec_fn=os.setsid)
return self._process
def __exit__(self, type, value, traceback):
if self._process.poll() is None:
os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)
<|reserved_special_token_1|>
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module implements helpers for GN SDK e2e tests.
"""
# Note, this is run on bots, which only support python2.7.
# Be sure to only use python2.7 features in this module.
import os
import signal
import sys
import subprocess
from subprocess import Popen, PIPE
class popen:
"""Runs subprocess.Popen and returns the process object.
This is meant to be used as a context manager. For example:
with popen(['echo', 'hello']) as p:
# Use p here
This object ensures that any child processes spawned by the command
are killed by forcing the subprocess to use a process group. This
prevents e.g. the emulator from sticking around as a zombie process
after the test is complete.
Args:
command -- The list of command line arguments.
"""
def __init__(self, command):
self._command = command
self._process = None
def __enter__(self):
self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,
close_fds=True, preexec_fn=os.setsid)
return self._process
def __exit__(self, type, value, traceback):
if self._process.poll() is None:
os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)
|
flexible
|
{
"blob_id": "bbb3d27ce8f4c1943ecc7ab542346c9f41cbd30e",
"index": 1256,
"step-1": "<mask token>\n\n\nclass popen:\n <mask token>\n\n def __init__(self, command):\n self._command = command\n self._process = None\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass popen:\n <mask token>\n\n def __init__(self, command):\n self._command = command\n self._process = None\n\n def __enter__(self):\n self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,\n close_fds=True, preexec_fn=os.setsid)\n return self._process\n\n def __exit__(self, type, value, traceback):\n if self._process.poll() is None:\n os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)\n",
"step-3": "<mask token>\n\n\nclass popen:\n \"\"\"Runs subprocess.Popen and returns the process object.\n\n This is meant to be used as a context manager. For example:\n\n with popen(['echo', 'hello']) as p:\n # Use p here\n\n This object ensures that any child processes spawned by the command\n are killed by forcing the subprocess to use a process group. This\n prevents e.g. the emulator from sticking around as a zombie process\n after the test is complete.\n\n Args:\n command -- The list of command line arguments.\n \"\"\"\n\n def __init__(self, command):\n self._command = command\n self._process = None\n\n def __enter__(self):\n self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,\n close_fds=True, preexec_fn=os.setsid)\n return self._process\n\n def __exit__(self, type, value, traceback):\n if self._process.poll() is None:\n os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)\n",
"step-4": "<mask token>\nimport os\nimport signal\nimport sys\nimport subprocess\nfrom subprocess import Popen, PIPE\n\n\nclass popen:\n \"\"\"Runs subprocess.Popen and returns the process object.\n\n This is meant to be used as a context manager. For example:\n\n with popen(['echo', 'hello']) as p:\n # Use p here\n\n This object ensures that any child processes spawned by the command\n are killed by forcing the subprocess to use a process group. This\n prevents e.g. the emulator from sticking around as a zombie process\n after the test is complete.\n\n Args:\n command -- The list of command line arguments.\n \"\"\"\n\n def __init__(self, command):\n self._command = command\n self._process = None\n\n def __enter__(self):\n self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,\n close_fds=True, preexec_fn=os.setsid)\n return self._process\n\n def __exit__(self, type, value, traceback):\n if self._process.poll() is None:\n os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)\n",
"step-5": "# Copyright 2020 The Fuchsia Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\"\"\"This module implements helpers for GN SDK e2e tests.\n\"\"\"\n\n# Note, this is run on bots, which only support python2.7.\n# Be sure to only use python2.7 features in this module.\n\nimport os\nimport signal\nimport sys\nimport subprocess\nfrom subprocess import Popen, PIPE\n\nclass popen:\n \"\"\"Runs subprocess.Popen and returns the process object.\n\n This is meant to be used as a context manager. For example:\n\n with popen(['echo', 'hello']) as p:\n # Use p here\n\n This object ensures that any child processes spawned by the command\n are killed by forcing the subprocess to use a process group. This\n prevents e.g. the emulator from sticking around as a zombie process\n after the test is complete.\n\n Args:\n command -- The list of command line arguments.\n \"\"\"\n def __init__(self, command):\n self._command = command\n self._process = None\n\n def __enter__(self):\n self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,\n close_fds=True, preexec_fn=os.setsid)\n return self._process\n\n def __exit__(self, type, value, traceback):\n if self._process.poll() is None:\n os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)\n\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
/home/khang/anaconda3/lib/python3.6/tempfile.py
|
normal
|
{
"blob_id": "399a22450d215638051a7d643fb6d391156779c5",
"index": 5855,
"step-1": "/home/khang/anaconda3/lib/python3.6/tempfile.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import math
# type defining of the variable and playing with variables.
a = 5.0
print(id(a))
a = 10
print("hello.....")
print(type(a))
print(id(a))
# locating addresses...
b = [5, 6, 7]
print(id(b))
b.append(10)
print(id(b))
# Strings...
name = input("Enter Your Name:: ") # iNPUTTING AS NAME
print(name)
print(len(name))
print(name[2])
print(name[0:3])
print(name[-2:])
# Escape Sequence
# \'
# \"
# \\
# \n
message = 'Python "Programming"'
print(message)
message = """Python
New Line..
Programmin"""
print(message)
# string Concatenation
lastname = input("Enter Your Last Name:: ") # iNPUTTING AS NAME
print(lastname)
print(name + " " + lastname)
full = f"{name} {lastname}"
print("Another way of writing... \n" + full)
print(full.upper()) # converts into upper case.
print(full.find("ip")) # finding location of specific char. Returns index number.
print("Dipesh" in full) # returns Boolean value either true or false..
print("Patel" in full)
print(full.replace("Rafaliya", "Patel"))
# Binary representation of any number...
print(bin(a)) # binary of a = 10
print(hex(a)) # Hexadecimal of a..
x = 0b0101
print((x)) # binary num a
print(bin(x)) # binary printing of a
# complex Number...
complex = a + 5j
print(complex) # printing complex number
y = 3
# operations
q = a + y # addition
print(q)
w = a - y # substraction
print(w)
e = a * y # multiplication
print(e)
r = a / y # division
print(r)
t = a // y # division but only print integer value
print(t)
g = a ** y # to the power of
print(g)
m = a % y # remainder
print(m)
# constants variables..
PI = 3.14 # this is a var with a constant value
print(abs(PI)) # absolute value of PI
print(round(PI)) # round up value of PI
no = -8.56
print(math.floor(no)) # floor value of no
print(math.ceil(no)) # ceiling value of no
# if-elif-else loop
age = 10
if age >= 21:
print("Adult")
elif age >= 13:
print("Teenager")
else:
print("Child")
# ternary operator
print("Adult" if age >= 21 else "Teenager")
# for loops
for p in "Dipesh":
print(p)
for l in range(0, 10, 2): # range is a kind of list...
print(l)
answer = 10
guess = 1
while answer != guess: # while loop for guessing
guess = int(input("Enter your Guess:: "))
else:
pass # this is used to break the loop...
# defining a function ... Number is even or odd..
def evenodd(numb):
if numb % 2 == 0:
return "even"
else:
return "odd"
print("The Number is " + evenodd(20))
# printing the row at a time...
def rows(**ro):
print(ro)
rows(name="Dipesh", id=1)
|
normal
|
{
"blob_id": "95b75395cafc6ba9f75ecf48157421e37ced2518",
"index": 815,
"step-1": "<mask token>\n\n\ndef rows(**ro):\n print(ro)\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(id(a))\n<mask token>\nprint('hello.....')\nprint(type(a))\nprint(id(a))\n<mask token>\nprint(id(b))\nb.append(10)\nprint(id(b))\n<mask token>\nprint(name)\nprint(len(name))\nprint(name[2])\nprint(name[0:3])\nprint(name[-2:])\n<mask token>\nprint(message)\n<mask token>\nprint(message)\n<mask token>\nprint(lastname)\nprint(name + ' ' + lastname)\n<mask token>\nprint('Another way of writing... \\n' + full)\nprint(full.upper())\nprint(full.find('ip'))\nprint('Dipesh' in full)\nprint('Patel' in full)\nprint(full.replace('Rafaliya', 'Patel'))\nprint(bin(a))\nprint(hex(a))\n<mask token>\nprint(x)\nprint(bin(x))\n<mask token>\nprint(complex)\n<mask token>\nprint(q)\n<mask token>\nprint(w)\n<mask token>\nprint(e)\n<mask token>\nprint(r)\n<mask token>\nprint(t)\n<mask token>\nprint(g)\n<mask token>\nprint(m)\n<mask token>\nprint(abs(PI))\nprint(round(PI))\n<mask token>\nprint(math.floor(no))\nprint(math.ceil(no))\n<mask token>\nif age >= 21:\n print('Adult')\nelif age >= 13:\n print('Teenager')\nelse:\n print('Child')\nprint('Adult' if age >= 21 else 'Teenager')\nfor p in 'Dipesh':\n print(p)\nfor l in range(0, 10, 2):\n print(l)\n<mask token>\nwhile answer != guess:\n guess = int(input('Enter your Guess:: '))\nelse:\n pass\n\n\ndef evenodd(numb):\n if numb % 2 == 0:\n return 'even'\n else:\n return 'odd'\n\n\nprint('The Number is ' + evenodd(20))\n\n\ndef rows(**ro):\n print(ro)\n\n\nrows(name='Dipesh', id=1)\n",
"step-3": "<mask token>\na = 5.0\nprint(id(a))\na = 10\nprint('hello.....')\nprint(type(a))\nprint(id(a))\nb = [5, 6, 7]\nprint(id(b))\nb.append(10)\nprint(id(b))\nname = input('Enter Your Name:: ')\nprint(name)\nprint(len(name))\nprint(name[2])\nprint(name[0:3])\nprint(name[-2:])\nmessage = 'Python \"Programming\"'\nprint(message)\nmessage = \"\"\"Python \nNew Line..\nProgrammin\"\"\"\nprint(message)\nlastname = input('Enter Your Last Name:: ')\nprint(lastname)\nprint(name + ' ' + lastname)\nfull = f'{name} {lastname}'\nprint('Another way of writing... \\n' + full)\nprint(full.upper())\nprint(full.find('ip'))\nprint('Dipesh' in full)\nprint('Patel' in full)\nprint(full.replace('Rafaliya', 'Patel'))\nprint(bin(a))\nprint(hex(a))\nx = 5\nprint(x)\nprint(bin(x))\ncomplex = a + 5.0j\nprint(complex)\ny = 3\nq = a + y\nprint(q)\nw = a - y\nprint(w)\ne = a * y\nprint(e)\nr = a / y\nprint(r)\nt = a // y\nprint(t)\ng = a ** y\nprint(g)\nm = a % y\nprint(m)\nPI = 3.14\nprint(abs(PI))\nprint(round(PI))\nno = -8.56\nprint(math.floor(no))\nprint(math.ceil(no))\nage = 10\nif age >= 21:\n print('Adult')\nelif age >= 13:\n print('Teenager')\nelse:\n print('Child')\nprint('Adult' if age >= 21 else 'Teenager')\nfor p in 'Dipesh':\n print(p)\nfor l in range(0, 10, 2):\n print(l)\nanswer = 10\nguess = 1\nwhile answer != guess:\n guess = int(input('Enter your Guess:: '))\nelse:\n pass\n\n\ndef evenodd(numb):\n if numb % 2 == 0:\n return 'even'\n else:\n return 'odd'\n\n\nprint('The Number is ' + evenodd(20))\n\n\ndef rows(**ro):\n print(ro)\n\n\nrows(name='Dipesh', id=1)\n",
"step-4": "import math\na = 5.0\nprint(id(a))\na = 10\nprint('hello.....')\nprint(type(a))\nprint(id(a))\nb = [5, 6, 7]\nprint(id(b))\nb.append(10)\nprint(id(b))\nname = input('Enter Your Name:: ')\nprint(name)\nprint(len(name))\nprint(name[2])\nprint(name[0:3])\nprint(name[-2:])\nmessage = 'Python \"Programming\"'\nprint(message)\nmessage = \"\"\"Python \nNew Line..\nProgrammin\"\"\"\nprint(message)\nlastname = input('Enter Your Last Name:: ')\nprint(lastname)\nprint(name + ' ' + lastname)\nfull = f'{name} {lastname}'\nprint('Another way of writing... \\n' + full)\nprint(full.upper())\nprint(full.find('ip'))\nprint('Dipesh' in full)\nprint('Patel' in full)\nprint(full.replace('Rafaliya', 'Patel'))\nprint(bin(a))\nprint(hex(a))\nx = 5\nprint(x)\nprint(bin(x))\ncomplex = a + 5.0j\nprint(complex)\ny = 3\nq = a + y\nprint(q)\nw = a - y\nprint(w)\ne = a * y\nprint(e)\nr = a / y\nprint(r)\nt = a // y\nprint(t)\ng = a ** y\nprint(g)\nm = a % y\nprint(m)\nPI = 3.14\nprint(abs(PI))\nprint(round(PI))\nno = -8.56\nprint(math.floor(no))\nprint(math.ceil(no))\nage = 10\nif age >= 21:\n print('Adult')\nelif age >= 13:\n print('Teenager')\nelse:\n print('Child')\nprint('Adult' if age >= 21 else 'Teenager')\nfor p in 'Dipesh':\n print(p)\nfor l in range(0, 10, 2):\n print(l)\nanswer = 10\nguess = 1\nwhile answer != guess:\n guess = int(input('Enter your Guess:: '))\nelse:\n pass\n\n\ndef evenodd(numb):\n if numb % 2 == 0:\n return 'even'\n else:\n return 'odd'\n\n\nprint('The Number is ' + evenodd(20))\n\n\ndef rows(**ro):\n print(ro)\n\n\nrows(name='Dipesh', id=1)\n",
"step-5": "import math\r\n\r\n# type defining of the variable and playing with variables.\r\na = 5.0\r\nprint(id(a))\r\na = 10\r\nprint(\"hello.....\")\r\nprint(type(a))\r\nprint(id(a))\r\n\r\n# locating addresses...\r\nb = [5, 6, 7]\r\nprint(id(b))\r\nb.append(10)\r\nprint(id(b))\r\n\r\n# Strings...\r\n\r\nname = input(\"Enter Your Name:: \") # iNPUTTING AS NAME\r\nprint(name)\r\nprint(len(name))\r\nprint(name[2])\r\nprint(name[0:3])\r\nprint(name[-2:])\r\n\r\n# Escape Sequence\r\n# \\'\r\n# \\\"\r\n# \\\\\r\n# \\n\r\nmessage = 'Python \"Programming\"'\r\nprint(message)\r\nmessage = \"\"\"Python \r\nNew Line..\r\nProgrammin\"\"\"\r\nprint(message)\r\n# string Concatenation\r\n\r\nlastname = input(\"Enter Your Last Name:: \") # iNPUTTING AS NAME\r\nprint(lastname)\r\nprint(name + \" \" + lastname)\r\n\r\nfull = f\"{name} {lastname}\"\r\nprint(\"Another way of writing... \\n\" + full)\r\nprint(full.upper()) # converts into upper case.\r\nprint(full.find(\"ip\")) # finding location of specific char. Returns index number.\r\n\r\nprint(\"Dipesh\" in full) # returns Boolean value either true or false..\r\nprint(\"Patel\" in full)\r\nprint(full.replace(\"Rafaliya\", \"Patel\"))\r\n\r\n# Binary representation of any number...\r\nprint(bin(a)) # binary of a = 10\r\nprint(hex(a)) # Hexadecimal of a..\r\n\r\nx = 0b0101\r\nprint((x)) # binary num a\r\nprint(bin(x)) # binary printing of a\r\n\r\n# complex Number...\r\ncomplex = a + 5j\r\nprint(complex) # printing complex number\r\ny = 3\r\n# operations\r\nq = a + y # addition\r\nprint(q)\r\nw = a - y # substraction\r\nprint(w)\r\ne = a * y # multiplication\r\nprint(e)\r\nr = a / y # division\r\nprint(r)\r\nt = a // y # division but only print integer value\r\nprint(t)\r\ng = a ** y # to the power of\r\nprint(g)\r\nm = a % y # remainder\r\nprint(m)\r\n\r\n# constants variables..\r\nPI = 3.14 # this is a var with a constant value\r\nprint(abs(PI)) # absolute value of PI\r\nprint(round(PI)) # round up value of PI\r\nno = -8.56\r\nprint(math.floor(no)) # floor value of no\r\nprint(math.ceil(no)) # ceiling value of no\r\n\r\n# if-elif-else loop\r\nage = 10\r\nif age >= 21:\r\n print(\"Adult\")\r\nelif age >= 13:\r\n print(\"Teenager\")\r\nelse:\r\n print(\"Child\")\r\n\r\n# ternary operator\r\nprint(\"Adult\" if age >= 21 else \"Teenager\")\r\n\r\n# for loops\r\nfor p in \"Dipesh\":\r\n print(p)\r\n\r\nfor l in range(0, 10, 2): # range is a kind of list...\r\n print(l)\r\n\r\nanswer = 10\r\nguess = 1\r\nwhile answer != guess: # while loop for guessing\r\n guess = int(input(\"Enter your Guess:: \"))\r\nelse:\r\n pass # this is used to break the loop...\r\n\r\n# defining a function ... Number is even or odd..\r\ndef evenodd(numb):\r\n if numb % 2 == 0:\r\n return \"even\"\r\n else:\r\n return \"odd\"\r\n\r\n\r\nprint(\"The Number is \" + evenodd(20))\r\n\r\n# printing the row at a time...\r\ndef rows(**ro):\r\n print(ro)\r\n\r\n\r\nrows(name=\"Dipesh\", id=1)\r\n\r\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
registry = load.PrimitiveRegistry({bool: dict(true=True, false=False).
__getitem__, datetime: partial(flip(datetime.strptime),
'%Y-%m-%dT%H:%M:%S%z'), str: str.strip, **{c: c for c in [int, float,
types.Journey.Status, types.Journey.Component.Status]}}
) | load.GenericRegistry({t.List: load.list_loader}
) | load.get_optional_loader | load.DataclassRegistry({types.Station: {
**valmap(xml.textgetter, {'code': 'Code', 'type': 'Type', 'country':
'Land', 'uic': 'UICCode', 'lat': 'Lat', 'lon': 'Lon', 'name':
'Namen/Middel', 'full_name': 'Namen/Lang', 'short_name': 'Namen/Kort'}),
**{'synonyms': xml.textsgetter('Synoniemen/Synoniem')}}, types.Journey:
{**valmap(xml.textgetter, {'transfer_count': 'AantalOverstappen',
'planned_duration': 'GeplandeReisTijd', 'planned_departure':
'GeplandeVertrekTijd', 'planned_arrival': 'GeplandeAankomstTijd',
'actual_duration': 'ActueleReisTijd', 'actual_departure':
'ActueleVertrekTijd', 'actual_arrival': 'ActueleAankomstTijd', 'status':
'Status'}), **{'components': xml.elemsgetter('ReisDeel'),
'notifications': xml.elemsgetter('Melding')}, **{'optimal': xml.
textgetter('Optimaal', default='false')}}, types.Departure: {**valmap(
xml.textgetter, {'ride_number': 'RitNummer', 'time': 'VertrekTijd',
'destination': 'EindBestemming', 'train_type': 'TreinSoort', 'carrier':
'Vervoerder', 'platform': 'VertrekSpoor'}), **{'platform_changed': xml.
attribgetter('VertrekSpoor', 'wijziging'), 'comments': xml.textsgetter(
'Opmerkingen/Opmerking'), 'delay': xml.textgetter(
'VertrekVertragingTekst', default=None), 'travel_tip': xml.textgetter(
'ReisTip', default=None), 'route_text': xml.textgetter('RouteTekst',
default=None)}}, types.Journey.Component: {**valmap(xml.textgetter, {
'carrier': 'Vervoerder', 'type': 'VervoerType', 'ride_number':
'RitNummer', 'status': 'Status'}), **{'details': xml.textsgetter(
'Reisdetails/Reisdetail'), 'kind': xml.attribgetter('.', 'reisSoort'),
'stops': xml.elemsgetter('ReisStop')}}, types.Journey.Component.Stop: {
'name': xml.textgetter('Naam'), 'time': compose(lambda x: x or None,
xml.textgetter('Tijd')), 'platform_changed': xml.attribgetter('Spoor',
'wijziging', default=None), 'delay': xml.textgetter('VertrekVertraging',
default=None), 'platform': xml.textgetter('Spoor', default=None)},
types.Journey.Notification: valmap(xml.textgetter, {'id': 'Id',
'serious': 'Ernstig', 'text': 'Text'})})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import typing as t
from datetime import datetime
from functools import partial
from toolz import compose, flip, valmap
from valuable import load, xml
from . import types
registry = load.PrimitiveRegistry({bool: dict(true=True, false=False).
__getitem__, datetime: partial(flip(datetime.strptime),
'%Y-%m-%dT%H:%M:%S%z'), str: str.strip, **{c: c for c in [int, float,
types.Journey.Status, types.Journey.Component.Status]}}
) | load.GenericRegistry({t.List: load.list_loader}
) | load.get_optional_loader | load.DataclassRegistry({types.Station: {
**valmap(xml.textgetter, {'code': 'Code', 'type': 'Type', 'country':
'Land', 'uic': 'UICCode', 'lat': 'Lat', 'lon': 'Lon', 'name':
'Namen/Middel', 'full_name': 'Namen/Lang', 'short_name': 'Namen/Kort'}),
**{'synonyms': xml.textsgetter('Synoniemen/Synoniem')}}, types.Journey:
{**valmap(xml.textgetter, {'transfer_count': 'AantalOverstappen',
'planned_duration': 'GeplandeReisTijd', 'planned_departure':
'GeplandeVertrekTijd', 'planned_arrival': 'GeplandeAankomstTijd',
'actual_duration': 'ActueleReisTijd', 'actual_departure':
'ActueleVertrekTijd', 'actual_arrival': 'ActueleAankomstTijd', 'status':
'Status'}), **{'components': xml.elemsgetter('ReisDeel'),
'notifications': xml.elemsgetter('Melding')}, **{'optimal': xml.
textgetter('Optimaal', default='false')}}, types.Departure: {**valmap(
xml.textgetter, {'ride_number': 'RitNummer', 'time': 'VertrekTijd',
'destination': 'EindBestemming', 'train_type': 'TreinSoort', 'carrier':
'Vervoerder', 'platform': 'VertrekSpoor'}), **{'platform_changed': xml.
attribgetter('VertrekSpoor', 'wijziging'), 'comments': xml.textsgetter(
'Opmerkingen/Opmerking'), 'delay': xml.textgetter(
'VertrekVertragingTekst', default=None), 'travel_tip': xml.textgetter(
'ReisTip', default=None), 'route_text': xml.textgetter('RouteTekst',
default=None)}}, types.Journey.Component: {**valmap(xml.textgetter, {
'carrier': 'Vervoerder', 'type': 'VervoerType', 'ride_number':
'RitNummer', 'status': 'Status'}), **{'details': xml.textsgetter(
'Reisdetails/Reisdetail'), 'kind': xml.attribgetter('.', 'reisSoort'),
'stops': xml.elemsgetter('ReisStop')}}, types.Journey.Component.Stop: {
'name': xml.textgetter('Naam'), 'time': compose(lambda x: x or None,
xml.textgetter('Tijd')), 'platform_changed': xml.attribgetter('Spoor',
'wijziging', default=None), 'delay': xml.textgetter('VertrekVertraging',
default=None), 'platform': xml.textgetter('Spoor', default=None)},
types.Journey.Notification: valmap(xml.textgetter, {'id': 'Id',
'serious': 'Ernstig', 'text': 'Text'})})
<|reserved_special_token_1|>
"""deserialization tools"""
import typing as t
from datetime import datetime
from functools import partial
from toolz import compose, flip, valmap
from valuable import load, xml
from . import types
registry = load.PrimitiveRegistry({
bool: dict(true=True, false=False).__getitem__,
datetime: partial(flip(datetime.strptime), '%Y-%m-%dT%H:%M:%S%z'),
str: str.strip,
**{
c: c for c in [
int,
float,
types.Journey.Status,
types.Journey.Component.Status
]
}
}) | load.GenericRegistry({
t.List: load.list_loader,
}) | load.get_optional_loader | load.DataclassRegistry({
types.Station: {**valmap(xml.textgetter, {
'code': 'Code',
'type': 'Type',
'country': 'Land',
'uic': 'UICCode',
'lat': 'Lat',
'lon': 'Lon',
'name': 'Namen/Middel',
'full_name': 'Namen/Lang',
'short_name': 'Namen/Kort',
}), **{
'synonyms': xml.textsgetter('Synoniemen/Synoniem'),
}},
types.Journey: {**valmap(xml.textgetter, {
'transfer_count': 'AantalOverstappen',
'planned_duration': 'GeplandeReisTijd',
'planned_departure': 'GeplandeVertrekTijd',
'planned_arrival': 'GeplandeAankomstTijd',
'actual_duration': 'ActueleReisTijd',
'actual_departure': 'ActueleVertrekTijd',
'actual_arrival': 'ActueleAankomstTijd',
'status': 'Status',
}), **{
'components': xml.elemsgetter('ReisDeel'),
'notifications': xml.elemsgetter('Melding'),
}, **{
'optimal': xml.textgetter('Optimaal', default='false')
}},
types.Departure: {**valmap(xml.textgetter, {
'ride_number': 'RitNummer',
'time': 'VertrekTijd',
'destination': 'EindBestemming',
'train_type': 'TreinSoort',
'carrier': 'Vervoerder',
'platform': 'VertrekSpoor',
}), **{
'platform_changed': xml.attribgetter('VertrekSpoor', 'wijziging'),
'comments': xml.textsgetter('Opmerkingen/Opmerking'),
'delay': xml.textgetter('VertrekVertragingTekst',
default=None),
'travel_tip': xml.textgetter('ReisTip', default=None),
'route_text': xml.textgetter('RouteTekst', default=None),
}},
types.Journey.Component: {**valmap(xml.textgetter, {
'carrier': 'Vervoerder',
'type': 'VervoerType',
'ride_number': 'RitNummer',
'status': 'Status',
}), **{
'details': xml.textsgetter('Reisdetails/Reisdetail'),
'kind': xml.attribgetter('.', 'reisSoort'),
'stops': xml.elemsgetter('ReisStop'),
}},
types.Journey.Component.Stop: {
'name': xml.textgetter('Naam'),
'time': compose(lambda x: x or None,
xml.textgetter('Tijd')),
'platform_changed': xml.attribgetter('Spoor', 'wijziging',
default=None),
'delay': xml.textgetter('VertrekVertraging', default=None),
'platform': xml.textgetter('Spoor', default=None)
},
types.Journey.Notification: valmap(xml.textgetter, {
'id': 'Id',
'serious': 'Ernstig',
'text': 'Text',
})
})
|
flexible
|
{
"blob_id": "2dcb2d8d41096f0affe569d8ddbdd190885d5f14",
"index": 4738,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nregistry = load.PrimitiveRegistry({bool: dict(true=True, false=False).\n __getitem__, datetime: partial(flip(datetime.strptime),\n '%Y-%m-%dT%H:%M:%S%z'), str: str.strip, **{c: c for c in [int, float,\n types.Journey.Status, types.Journey.Component.Status]}}\n ) | load.GenericRegistry({t.List: load.list_loader}\n ) | load.get_optional_loader | load.DataclassRegistry({types.Station: {\n **valmap(xml.textgetter, {'code': 'Code', 'type': 'Type', 'country':\n 'Land', 'uic': 'UICCode', 'lat': 'Lat', 'lon': 'Lon', 'name':\n 'Namen/Middel', 'full_name': 'Namen/Lang', 'short_name': 'Namen/Kort'}),\n **{'synonyms': xml.textsgetter('Synoniemen/Synoniem')}}, types.Journey:\n {**valmap(xml.textgetter, {'transfer_count': 'AantalOverstappen',\n 'planned_duration': 'GeplandeReisTijd', 'planned_departure':\n 'GeplandeVertrekTijd', 'planned_arrival': 'GeplandeAankomstTijd',\n 'actual_duration': 'ActueleReisTijd', 'actual_departure':\n 'ActueleVertrekTijd', 'actual_arrival': 'ActueleAankomstTijd', 'status':\n 'Status'}), **{'components': xml.elemsgetter('ReisDeel'),\n 'notifications': xml.elemsgetter('Melding')}, **{'optimal': xml.\n textgetter('Optimaal', default='false')}}, types.Departure: {**valmap(\n xml.textgetter, {'ride_number': 'RitNummer', 'time': 'VertrekTijd',\n 'destination': 'EindBestemming', 'train_type': 'TreinSoort', 'carrier':\n 'Vervoerder', 'platform': 'VertrekSpoor'}), **{'platform_changed': xml.\n attribgetter('VertrekSpoor', 'wijziging'), 'comments': xml.textsgetter(\n 'Opmerkingen/Opmerking'), 'delay': xml.textgetter(\n 'VertrekVertragingTekst', default=None), 'travel_tip': xml.textgetter(\n 'ReisTip', default=None), 'route_text': xml.textgetter('RouteTekst',\n default=None)}}, types.Journey.Component: {**valmap(xml.textgetter, {\n 'carrier': 'Vervoerder', 'type': 'VervoerType', 'ride_number':\n 'RitNummer', 'status': 'Status'}), **{'details': xml.textsgetter(\n 'Reisdetails/Reisdetail'), 'kind': xml.attribgetter('.', 'reisSoort'),\n 'stops': xml.elemsgetter('ReisStop')}}, types.Journey.Component.Stop: {\n 'name': xml.textgetter('Naam'), 'time': compose(lambda x: x or None,\n xml.textgetter('Tijd')), 'platform_changed': xml.attribgetter('Spoor',\n 'wijziging', default=None), 'delay': xml.textgetter('VertrekVertraging',\n default=None), 'platform': xml.textgetter('Spoor', default=None)},\n types.Journey.Notification: valmap(xml.textgetter, {'id': 'Id',\n 'serious': 'Ernstig', 'text': 'Text'})})\n",
"step-3": "<mask token>\nimport typing as t\nfrom datetime import datetime\nfrom functools import partial\nfrom toolz import compose, flip, valmap\nfrom valuable import load, xml\nfrom . import types\nregistry = load.PrimitiveRegistry({bool: dict(true=True, false=False).\n __getitem__, datetime: partial(flip(datetime.strptime),\n '%Y-%m-%dT%H:%M:%S%z'), str: str.strip, **{c: c for c in [int, float,\n types.Journey.Status, types.Journey.Component.Status]}}\n ) | load.GenericRegistry({t.List: load.list_loader}\n ) | load.get_optional_loader | load.DataclassRegistry({types.Station: {\n **valmap(xml.textgetter, {'code': 'Code', 'type': 'Type', 'country':\n 'Land', 'uic': 'UICCode', 'lat': 'Lat', 'lon': 'Lon', 'name':\n 'Namen/Middel', 'full_name': 'Namen/Lang', 'short_name': 'Namen/Kort'}),\n **{'synonyms': xml.textsgetter('Synoniemen/Synoniem')}}, types.Journey:\n {**valmap(xml.textgetter, {'transfer_count': 'AantalOverstappen',\n 'planned_duration': 'GeplandeReisTijd', 'planned_departure':\n 'GeplandeVertrekTijd', 'planned_arrival': 'GeplandeAankomstTijd',\n 'actual_duration': 'ActueleReisTijd', 'actual_departure':\n 'ActueleVertrekTijd', 'actual_arrival': 'ActueleAankomstTijd', 'status':\n 'Status'}), **{'components': xml.elemsgetter('ReisDeel'),\n 'notifications': xml.elemsgetter('Melding')}, **{'optimal': xml.\n textgetter('Optimaal', default='false')}}, types.Departure: {**valmap(\n xml.textgetter, {'ride_number': 'RitNummer', 'time': 'VertrekTijd',\n 'destination': 'EindBestemming', 'train_type': 'TreinSoort', 'carrier':\n 'Vervoerder', 'platform': 'VertrekSpoor'}), **{'platform_changed': xml.\n attribgetter('VertrekSpoor', 'wijziging'), 'comments': xml.textsgetter(\n 'Opmerkingen/Opmerking'), 'delay': xml.textgetter(\n 'VertrekVertragingTekst', default=None), 'travel_tip': xml.textgetter(\n 'ReisTip', default=None), 'route_text': xml.textgetter('RouteTekst',\n default=None)}}, types.Journey.Component: {**valmap(xml.textgetter, {\n 'carrier': 'Vervoerder', 'type': 'VervoerType', 'ride_number':\n 'RitNummer', 'status': 'Status'}), **{'details': xml.textsgetter(\n 'Reisdetails/Reisdetail'), 'kind': xml.attribgetter('.', 'reisSoort'),\n 'stops': xml.elemsgetter('ReisStop')}}, types.Journey.Component.Stop: {\n 'name': xml.textgetter('Naam'), 'time': compose(lambda x: x or None,\n xml.textgetter('Tijd')), 'platform_changed': xml.attribgetter('Spoor',\n 'wijziging', default=None), 'delay': xml.textgetter('VertrekVertraging',\n default=None), 'platform': xml.textgetter('Spoor', default=None)},\n types.Journey.Notification: valmap(xml.textgetter, {'id': 'Id',\n 'serious': 'Ernstig', 'text': 'Text'})})\n",
"step-4": "\"\"\"deserialization tools\"\"\"\nimport typing as t\nfrom datetime import datetime\nfrom functools import partial\n\nfrom toolz import compose, flip, valmap\nfrom valuable import load, xml\n\nfrom . import types\n\nregistry = load.PrimitiveRegistry({\n bool: dict(true=True, false=False).__getitem__,\n datetime: partial(flip(datetime.strptime), '%Y-%m-%dT%H:%M:%S%z'),\n str: str.strip,\n **{\n c: c for c in [\n int,\n float,\n types.Journey.Status,\n types.Journey.Component.Status\n ]\n }\n}) | load.GenericRegistry({\n t.List: load.list_loader,\n}) | load.get_optional_loader | load.DataclassRegistry({\n types.Station: {**valmap(xml.textgetter, {\n 'code': 'Code',\n 'type': 'Type',\n 'country': 'Land',\n 'uic': 'UICCode',\n 'lat': 'Lat',\n 'lon': 'Lon',\n 'name': 'Namen/Middel',\n 'full_name': 'Namen/Lang',\n 'short_name': 'Namen/Kort',\n }), **{\n 'synonyms': xml.textsgetter('Synoniemen/Synoniem'),\n }},\n types.Journey: {**valmap(xml.textgetter, {\n 'transfer_count': 'AantalOverstappen',\n 'planned_duration': 'GeplandeReisTijd',\n 'planned_departure': 'GeplandeVertrekTijd',\n 'planned_arrival': 'GeplandeAankomstTijd',\n 'actual_duration': 'ActueleReisTijd',\n 'actual_departure': 'ActueleVertrekTijd',\n 'actual_arrival': 'ActueleAankomstTijd',\n 'status': 'Status',\n }), **{\n 'components': xml.elemsgetter('ReisDeel'),\n 'notifications': xml.elemsgetter('Melding'),\n }, **{\n 'optimal': xml.textgetter('Optimaal', default='false')\n }},\n types.Departure: {**valmap(xml.textgetter, {\n 'ride_number': 'RitNummer',\n 'time': 'VertrekTijd',\n 'destination': 'EindBestemming',\n 'train_type': 'TreinSoort',\n 'carrier': 'Vervoerder',\n 'platform': 'VertrekSpoor',\n }), **{\n 'platform_changed': xml.attribgetter('VertrekSpoor', 'wijziging'),\n 'comments': xml.textsgetter('Opmerkingen/Opmerking'),\n 'delay': xml.textgetter('VertrekVertragingTekst',\n default=None),\n 'travel_tip': xml.textgetter('ReisTip', default=None),\n 'route_text': xml.textgetter('RouteTekst', default=None),\n }},\n types.Journey.Component: {**valmap(xml.textgetter, {\n 'carrier': 'Vervoerder',\n 'type': 'VervoerType',\n 'ride_number': 'RitNummer',\n 'status': 'Status',\n }), **{\n 'details': xml.textsgetter('Reisdetails/Reisdetail'),\n 'kind': xml.attribgetter('.', 'reisSoort'),\n 'stops': xml.elemsgetter('ReisStop'),\n }},\n types.Journey.Component.Stop: {\n 'name': xml.textgetter('Naam'),\n 'time': compose(lambda x: x or None,\n xml.textgetter('Tijd')),\n 'platform_changed': xml.attribgetter('Spoor', 'wijziging',\n default=None),\n 'delay': xml.textgetter('VertrekVertraging', default=None),\n 'platform': xml.textgetter('Spoor', default=None)\n },\n types.Journey.Notification: valmap(xml.textgetter, {\n 'id': 'Id',\n 'serious': 'Ernstig',\n 'text': 'Text',\n })\n})\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Solution:
def minimumDeviation(self, nums: List[int]) ->int:
hq, left, right, res = [], inf, 0, inf
for num in nums:
if num % 2:
num = num * 2
heapq.heappush(hq, -num)
left = min(left, num)
while True:
right = -heapq.heappop(hq)
if right - left < res:
res = right - left
if right % 2 == 0:
heapq.heappush(hq, -right // 2)
left = min(left, right // 2)
else:
break
return res
|
normal
|
{
"blob_id": "975b2f3443e19f910c71f872484350aef9f09dd2",
"index": 7370,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def minimumDeviation(self, nums: List[int]) ->int:\n hq, left, right, res = [], inf, 0, inf\n for num in nums:\n if num % 2:\n num = num * 2\n heapq.heappush(hq, -num)\n left = min(left, num)\n while True:\n right = -heapq.heappop(hq)\n if right - left < res:\n res = right - left\n if right % 2 == 0:\n heapq.heappush(hq, -right // 2)\n left = min(left, right // 2)\n else:\n break\n return res\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import pygame
import textwrap
import client.Button as Btn
from client.ClickableImage import ClickableImage as ClickImg
from client.CreateDisplay import CreateDisplay
import client.LiverpoolButtons as RuleSetsButtons_LP
import client.HandAndFootButtons as RuleSetsButtons_HF
import client.HandManagement as HandManagement
from client.UICardWrapper import UICardWrapper
import client.UIConstants as UIC
from common.Card import Card
class HandView:
"""This class handles player's cards and enables actions.
Actions are primarily performed using buttons, since these need to somewhat customized by game
the buttons are in ***.py (*** is Liverpool or HandAndFoot) and are imported as RuleSetsButtons.
Management of displaying the hand's cards is not game specific, and methods that help with that
are in HandManagement.py.
Player can arrange their own hand, and prepare to play cards during other players' turns.
"""
def __init__(self, controller, display, ruleset):
self.controller = controller
self.display = display
self.ruleset = ruleset
self.Meld_Threshold = controller._state.rules.Meld_Threshold
self.deal_size = controller._state.rules.Deal_Size
self.help_text = controller._state.rules.help_text
if ruleset == 'Liverpool':
self.buttons_per_player = self.Meld_Threshold[0][0] + self.Meld_Threshold[0][1]
self.RuleSetsButtons = RuleSetsButtons_LP
elif ruleset == 'HandAndFoot':
self.RuleSetsButtons = RuleSetsButtons_HF
self.hand_scaling = (UIC.scale, UIC.Card_Spacing)
self.current_hand = []
self.last_hand = []
self.hand_info = [] # will contain UICardWrapped elements of current_hand
self.prepared_cards = [] # will contain list of prepared cards from controller
self.discards = []
self.discard_confirm = False
# num_wilds is HandAndFoot specific, only non-zero if by prepare_card_btn in HandAndFootButtons.py is triggered.
self.num_wilds = 0
self.wild_cards = []
self.selected_list = []
self.round_index = 0
self.round_advance = False
self.num_players = 1
# In Liverpool and other Shared_Board games: prepare cards buttons must be updated each round
self.need_updated_buttons = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
#
# if someone joins between rounds, then they won't know the correct meld requirement until the round begins.
# (self.controller._state.round = -1 until play commences).
# In HandAndFoot: Correct meld requirement will be written in lower right corner once play commences.
# In Liverpool: Will see correct buttons once round commences.
self.RuleSetsButtons.CreateButtons(self)
def update(self, player_index=0, num_players=1, visible_scards = []):
"""This updates the view of the hand, between rounds it displays a message. """
self.visible_scards = visible_scards
self.controller._state.player_index = player_index
if self.num_players > num_players and self.controller._state.rules.Shared_Board \
and not self.need_updated_buttons:
# A player has left the game after the round has begun -- make adjustments so game can continue.
self.playerLeftGame(num_players)
self.num_players = num_players
if self.controller._state.round == -1:
self.mesgBetweenRounds(self.help_text)
if self.round_advance:
self.round_index = self.round_index + 1
if self.round_index < len(self.Meld_Threshold):
self.help_text[0] = 'This is the round of ' + str(self.Meld_Threshold[self.round_index]) + ' ! '
self.need_updated_buttons = True # used for Liverpool.
else:
self.help_text = ['Game has concluded. Scores for each round can be found in command window.']
self.round_advance = False
else:
if not self.round_index == self.controller._state.round:
# Need this to true up round_index if a player joins mid-game.
skipped_rounds = self.controller._state.round - self.round_index
for idx in range(skipped_rounds):
#todo: How to score latecomers should be moved to ruleset.
score = 0
self.controller.lateJoinScores(score)
self.round_index = self.controller._state.round
self.round_advance = True
# reset outline colors on ready buttons to what they need to be at the start of the "between rounds" state.
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.last_hand = self.current_hand
self.current_hand = self.controller.getHand()
if len(self.current_hand) == 0:
self.hand_info = []
elif not self.last_hand == self.current_hand:
self.hand_info = HandManagement.WrapHand(self, self.current_hand, self.hand_info)
HandManagement.ShowHolding(self, self.hand_info) # displays hand
self.RuleSetsButtons.ButtonDisplay(self)
def nextEventWildsOnBoard(self):
"""This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.
It is looking for key strokes to designate ambiguous wild cards in runs.
The mouse is ignored until you designate all the wilds (turn phase goes back to play)."""
if self.controller._state.rules.Shared_Board and self.num_wilds > 0:
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
else:
# in Shared_Board games, check if there are wilds that need to be updated.
# All other events are ignored until play is finished.
HandManagement.wildsHiLoGetInput(self)
def nextEvent(self):
"""This submits the next user input to the controller,
In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything
unless designating values for prepared wild cards, at which time the mouse is ignored
unless you want to clear the prepared cards.
In games with Shared_Board = True wilds on board might change designation upon other cards being played.
IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then
it must be designated before play is completed.
This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled."""
if self.controller._state.rules.Shared_Board:
self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())
if self.num_wilds > 0:
self.nextEventWildsOnBoard()
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
wild_instructions = 'Use the keyboard to designate your prepared wild cards \r\n '
wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).'
self.controller.note = wild_instructions
pos = pygame.mouse.get_pos()
if self.event.type == pygame.MOUSEBUTTONDOWN:
self.RuleSetsButtons.ClickedButton(self, pos)
for element in self.hand_info:
# cannot select prepared cards, so not included in logic below.
if element.img_clickable.isOver(pos):
if element.status == 1:
element.status = 0
element.img_clickable.changeOutline(0)
elif element.status == 0:
element.status = 1
element.img_clickable.changeOutline(2)
elif self.event.type == pygame.MOUSEMOTION:
self.RuleSetsButtons.MouseHiLight(self, pos)
HandManagement.MouseHiLight(self.hand_info, pos)
elif self.event.type == pygame.KEYDOWN:
if self.controller._state.rules.Buy_Option:
if self.controller.buying_opportunity:
if self.event.key == pygame.K_y:
self.controller.wantTopCard(True)
self.controller.note = 'You have signaled you want to buy the card.'
elif self.event.key == pygame.K_n:
self.controller.wantTopCard(False)
self.controller.note = 'You have signaled you do not want to buy the card.'
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
HandManagement.ManuallyAssign(self)
def gatherSelected(self):
""" gathers selected cards
in order to take action on selected cards (either discarding them or preparing them)
"""
self.selected_list = []
for element in self.hand_info:
if element.status == 1:
self.selected_list.append(element)
return self.selected_list
def discardConfirmation(self, confirmed, wrapped_discards):
""" Confirm a user is sure about a discard and then perform it once confirmed."""
discards = []
for element in wrapped_discards:
discards.append(element.card)
if self.discards != discards:
confirmed = False
self.discards = discards
if not confirmed:
self.controller.note = "Please confirm - discard " + "{0}".format(self.discards)
return True # ask for confirmation
else:
# confirmed is True, performing discard and removing discarded wrapped cards from hand_info.
if self.discard_confirm:
controller_response = self.controller.discard(self.discards)
if controller_response:
for element in wrapped_discards:
self.hand_info.remove(element)
return False # now that this is done, we don't have anything waiting on confirmation
def mesgBetweenRounds(self, message):
"""print message where cards usually displayed until Ready button is clicked for next round."""
font = UIC.Medium_Text
y_offset = (UIC.Disp_Height * (1 - (UIC.Hand_Row_Fraction * 0.8)))
for message_string in message:
text_surface = font.render(message_string, True, UIC.Black)
text_rect = text_surface.get_rect()
text_rect.center = ((UIC.Disp_Width * 0.5), y_offset)
y_offset = y_offset + UIC.Medium_Text_Feed
self.display.blit(text_surface, text_rect)
def labelMedium(self, labelstr, x_offset, y_offset):
font = UIC.Medium_Text
text_surface = font.render(labelstr, True, UIC.Bright_Blue)
text_rect = text_surface.get_rect()
text_rect.center = (x_offset, y_offset)
self.display.blit(text_surface, text_rect)
def playerLeftGame(self, num_players):
# a player has disconnected a game with a Shared_Board = True. Must make adjustments to
# (i) card group dictionaries, (ii) prepared cards & (iii) buttons locations.
self.controller.resetProcessedCards(self.visible_scards)
self.controller.clearPreparedCards() # so that prepared cards won't be mistakenly played on wrong group.
self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.hand_info)
self.controller.note = "A player has left the game, all prepared cards are automatically cleared."
# reset set/run button locations:
if num_players > 1:
players_sp_w = UIC.Disp_Width / num_players
else:
players_sp_w = UIC.Disp_Width
for idx in range(num_players):
for button in self.assign_cards_btns[idx]:
button.x = 10 + (players_sp_w * idx)
|
normal
|
{
"blob_id": "1cdd315eec6792a8588dc2e6a221bc024be47078",
"index": 7885,
"step-1": "<mask token>\n\n\nclass HandView:\n <mask token>\n\n def __init__(self, controller, display, ruleset):\n self.controller = controller\n self.display = display\n self.ruleset = ruleset\n self.Meld_Threshold = controller._state.rules.Meld_Threshold\n self.deal_size = controller._state.rules.Deal_Size\n self.help_text = controller._state.rules.help_text\n if ruleset == 'Liverpool':\n self.buttons_per_player = self.Meld_Threshold[0][0\n ] + self.Meld_Threshold[0][1]\n self.RuleSetsButtons = RuleSetsButtons_LP\n elif ruleset == 'HandAndFoot':\n self.RuleSetsButtons = RuleSetsButtons_HF\n self.hand_scaling = UIC.scale, UIC.Card_Spacing\n self.current_hand = []\n self.last_hand = []\n self.hand_info = []\n self.prepared_cards = []\n self.discards = []\n self.discard_confirm = False\n self.num_wilds = 0\n self.wild_cards = []\n self.selected_list = []\n self.round_index = 0\n self.round_advance = False\n self.num_players = 1\n self.need_updated_buttons = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.RuleSetsButtons.CreateButtons(self)\n\n def update(self, player_index=0, num_players=1, visible_scards=[]):\n \"\"\"This updates the view of the hand, between rounds it displays a message. \"\"\"\n self.visible_scards = visible_scards\n self.controller._state.player_index = player_index\n if (self.num_players > num_players and self.controller._state.rules\n .Shared_Board and not self.need_updated_buttons):\n self.playerLeftGame(num_players)\n self.num_players = num_players\n if self.controller._state.round == -1:\n self.mesgBetweenRounds(self.help_text)\n if self.round_advance:\n self.round_index = self.round_index + 1\n if self.round_index < len(self.Meld_Threshold):\n self.help_text[0] = 'This is the round of ' + str(self.\n Meld_Threshold[self.round_index]) + ' ! '\n self.need_updated_buttons = True\n else:\n self.help_text = [\n 'Game has concluded. Scores for each round can be found in command window.'\n ]\n self.round_advance = False\n else:\n if not self.round_index == self.controller._state.round:\n skipped_rounds = (self.controller._state.round - self.\n round_index)\n for idx in range(skipped_rounds):\n score = 0\n self.controller.lateJoinScores(score)\n self.round_index = self.controller._state.round\n self.round_advance = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.last_hand = self.current_hand\n self.current_hand = self.controller.getHand()\n if len(self.current_hand) == 0:\n self.hand_info = []\n elif not self.last_hand == self.current_hand:\n self.hand_info = HandManagement.WrapHand(self, self.\n current_hand, self.hand_info)\n HandManagement.ShowHolding(self, self.hand_info)\n self.RuleSetsButtons.ButtonDisplay(self)\n\n def nextEventWildsOnBoard(self):\n \"\"\"This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.\n\n It is looking for key strokes to designate ambiguous wild cards in runs.\n The mouse is ignored until you designate all the wilds (turn phase goes back to play).\"\"\"\n if self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n else:\n HandManagement.wildsHiLoGetInput(self)\n <mask token>\n <mask token>\n\n def discardConfirmation(self, confirmed, wrapped_discards):\n \"\"\" Confirm a user is sure about a discard and then perform it once confirmed.\"\"\"\n discards = []\n for element in wrapped_discards:\n discards.append(element.card)\n if self.discards != discards:\n confirmed = False\n self.discards = discards\n if not confirmed:\n self.controller.note = 'Please confirm - discard ' + '{0}'.format(\n self.discards)\n return True\n else:\n if self.discard_confirm:\n controller_response = self.controller.discard(self.discards)\n if controller_response:\n for element in wrapped_discards:\n self.hand_info.remove(element)\n return False\n <mask token>\n\n def labelMedium(self, labelstr, x_offset, y_offset):\n font = UIC.Medium_Text\n text_surface = font.render(labelstr, True, UIC.Bright_Blue)\n text_rect = text_surface.get_rect()\n text_rect.center = x_offset, y_offset\n self.display.blit(text_surface, text_rect)\n\n def playerLeftGame(self, num_players):\n self.controller.resetProcessedCards(self.visible_scards)\n self.controller.clearPreparedCards()\n self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.\n hand_info)\n self.controller.note = (\n 'A player has left the game, all prepared cards are automatically cleared.'\n )\n if num_players > 1:\n players_sp_w = UIC.Disp_Width / num_players\n else:\n players_sp_w = UIC.Disp_Width\n for idx in range(num_players):\n for button in self.assign_cards_btns[idx]:\n button.x = 10 + players_sp_w * idx\n",
"step-2": "<mask token>\n\n\nclass HandView:\n <mask token>\n\n def __init__(self, controller, display, ruleset):\n self.controller = controller\n self.display = display\n self.ruleset = ruleset\n self.Meld_Threshold = controller._state.rules.Meld_Threshold\n self.deal_size = controller._state.rules.Deal_Size\n self.help_text = controller._state.rules.help_text\n if ruleset == 'Liverpool':\n self.buttons_per_player = self.Meld_Threshold[0][0\n ] + self.Meld_Threshold[0][1]\n self.RuleSetsButtons = RuleSetsButtons_LP\n elif ruleset == 'HandAndFoot':\n self.RuleSetsButtons = RuleSetsButtons_HF\n self.hand_scaling = UIC.scale, UIC.Card_Spacing\n self.current_hand = []\n self.last_hand = []\n self.hand_info = []\n self.prepared_cards = []\n self.discards = []\n self.discard_confirm = False\n self.num_wilds = 0\n self.wild_cards = []\n self.selected_list = []\n self.round_index = 0\n self.round_advance = False\n self.num_players = 1\n self.need_updated_buttons = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.RuleSetsButtons.CreateButtons(self)\n\n def update(self, player_index=0, num_players=1, visible_scards=[]):\n \"\"\"This updates the view of the hand, between rounds it displays a message. \"\"\"\n self.visible_scards = visible_scards\n self.controller._state.player_index = player_index\n if (self.num_players > num_players and self.controller._state.rules\n .Shared_Board and not self.need_updated_buttons):\n self.playerLeftGame(num_players)\n self.num_players = num_players\n if self.controller._state.round == -1:\n self.mesgBetweenRounds(self.help_text)\n if self.round_advance:\n self.round_index = self.round_index + 1\n if self.round_index < len(self.Meld_Threshold):\n self.help_text[0] = 'This is the round of ' + str(self.\n Meld_Threshold[self.round_index]) + ' ! '\n self.need_updated_buttons = True\n else:\n self.help_text = [\n 'Game has concluded. Scores for each round can be found in command window.'\n ]\n self.round_advance = False\n else:\n if not self.round_index == self.controller._state.round:\n skipped_rounds = (self.controller._state.round - self.\n round_index)\n for idx in range(skipped_rounds):\n score = 0\n self.controller.lateJoinScores(score)\n self.round_index = self.controller._state.round\n self.round_advance = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.last_hand = self.current_hand\n self.current_hand = self.controller.getHand()\n if len(self.current_hand) == 0:\n self.hand_info = []\n elif not self.last_hand == self.current_hand:\n self.hand_info = HandManagement.WrapHand(self, self.\n current_hand, self.hand_info)\n HandManagement.ShowHolding(self, self.hand_info)\n self.RuleSetsButtons.ButtonDisplay(self)\n\n def nextEventWildsOnBoard(self):\n \"\"\"This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.\n\n It is looking for key strokes to designate ambiguous wild cards in runs.\n The mouse is ignored until you designate all the wilds (turn phase goes back to play).\"\"\"\n if self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n else:\n HandManagement.wildsHiLoGetInput(self)\n\n def nextEvent(self):\n \"\"\"This submits the next user input to the controller,\n\n In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything\n unless designating values for prepared wild cards, at which time the mouse is ignored\n unless you want to clear the prepared cards.\n In games with Shared_Board = True wilds on board might change designation upon other cards being played.\n IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then\n it must be designated before play is completed.\n This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled.\"\"\"\n if self.controller._state.rules.Shared_Board:\n self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())\n if self.num_wilds > 0:\n self.nextEventWildsOnBoard()\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n if (not self.controller._state.rules.Shared_Board and self.\n num_wilds > 0):\n wild_instructions = (\n 'Use the keyboard to designate your prepared wild cards \\r\\n '\n )\n wild_instructions = (wild_instructions +\n '(use 0 for 10 and J, Q, or K for facecards).')\n self.controller.note = wild_instructions\n pos = pygame.mouse.get_pos()\n if self.event.type == pygame.MOUSEBUTTONDOWN:\n self.RuleSetsButtons.ClickedButton(self, pos)\n for element in self.hand_info:\n if element.img_clickable.isOver(pos):\n if element.status == 1:\n element.status = 0\n element.img_clickable.changeOutline(0)\n elif element.status == 0:\n element.status = 1\n element.img_clickable.changeOutline(2)\n elif self.event.type == pygame.MOUSEMOTION:\n self.RuleSetsButtons.MouseHiLight(self, pos)\n HandManagement.MouseHiLight(self.hand_info, pos)\n elif self.event.type == pygame.KEYDOWN:\n if self.controller._state.rules.Buy_Option:\n if self.controller.buying_opportunity:\n if self.event.key == pygame.K_y:\n self.controller.wantTopCard(True)\n self.controller.note = (\n 'You have signaled you want to buy the card.')\n elif self.event.key == pygame.K_n:\n self.controller.wantTopCard(False)\n self.controller.note = (\n 'You have signaled you do not want to buy the card.'\n )\n if (not self.controller._state.rules.Shared_Board and self.\n num_wilds > 0):\n HandManagement.ManuallyAssign(self)\n\n def gatherSelected(self):\n \"\"\" gathers selected cards\n in order to take action on selected cards (either discarding them or preparing them)\n \"\"\"\n self.selected_list = []\n for element in self.hand_info:\n if element.status == 1:\n self.selected_list.append(element)\n return self.selected_list\n\n def discardConfirmation(self, confirmed, wrapped_discards):\n \"\"\" Confirm a user is sure about a discard and then perform it once confirmed.\"\"\"\n discards = []\n for element in wrapped_discards:\n discards.append(element.card)\n if self.discards != discards:\n confirmed = False\n self.discards = discards\n if not confirmed:\n self.controller.note = 'Please confirm - discard ' + '{0}'.format(\n self.discards)\n return True\n else:\n if self.discard_confirm:\n controller_response = self.controller.discard(self.discards)\n if controller_response:\n for element in wrapped_discards:\n self.hand_info.remove(element)\n return False\n <mask token>\n\n def labelMedium(self, labelstr, x_offset, y_offset):\n font = UIC.Medium_Text\n text_surface = font.render(labelstr, True, UIC.Bright_Blue)\n text_rect = text_surface.get_rect()\n text_rect.center = x_offset, y_offset\n self.display.blit(text_surface, text_rect)\n\n def playerLeftGame(self, num_players):\n self.controller.resetProcessedCards(self.visible_scards)\n self.controller.clearPreparedCards()\n self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.\n hand_info)\n self.controller.note = (\n 'A player has left the game, all prepared cards are automatically cleared.'\n )\n if num_players > 1:\n players_sp_w = UIC.Disp_Width / num_players\n else:\n players_sp_w = UIC.Disp_Width\n for idx in range(num_players):\n for button in self.assign_cards_btns[idx]:\n button.x = 10 + players_sp_w * idx\n",
"step-3": "<mask token>\n\n\nclass HandView:\n \"\"\"This class handles player's cards and enables actions.\n\n Actions are primarily performed using buttons, since these need to somewhat customized by game\n the buttons are in ***.py (*** is Liverpool or HandAndFoot) and are imported as RuleSetsButtons.\n Management of displaying the hand's cards is not game specific, and methods that help with that\n are in HandManagement.py.\n\n Player can arrange their own hand, and prepare to play cards during other players' turns.\n \"\"\"\n\n def __init__(self, controller, display, ruleset):\n self.controller = controller\n self.display = display\n self.ruleset = ruleset\n self.Meld_Threshold = controller._state.rules.Meld_Threshold\n self.deal_size = controller._state.rules.Deal_Size\n self.help_text = controller._state.rules.help_text\n if ruleset == 'Liverpool':\n self.buttons_per_player = self.Meld_Threshold[0][0\n ] + self.Meld_Threshold[0][1]\n self.RuleSetsButtons = RuleSetsButtons_LP\n elif ruleset == 'HandAndFoot':\n self.RuleSetsButtons = RuleSetsButtons_HF\n self.hand_scaling = UIC.scale, UIC.Card_Spacing\n self.current_hand = []\n self.last_hand = []\n self.hand_info = []\n self.prepared_cards = []\n self.discards = []\n self.discard_confirm = False\n self.num_wilds = 0\n self.wild_cards = []\n self.selected_list = []\n self.round_index = 0\n self.round_advance = False\n self.num_players = 1\n self.need_updated_buttons = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.RuleSetsButtons.CreateButtons(self)\n\n def update(self, player_index=0, num_players=1, visible_scards=[]):\n \"\"\"This updates the view of the hand, between rounds it displays a message. \"\"\"\n self.visible_scards = visible_scards\n self.controller._state.player_index = player_index\n if (self.num_players > num_players and self.controller._state.rules\n .Shared_Board and not self.need_updated_buttons):\n self.playerLeftGame(num_players)\n self.num_players = num_players\n if self.controller._state.round == -1:\n self.mesgBetweenRounds(self.help_text)\n if self.round_advance:\n self.round_index = self.round_index + 1\n if self.round_index < len(self.Meld_Threshold):\n self.help_text[0] = 'This is the round of ' + str(self.\n Meld_Threshold[self.round_index]) + ' ! '\n self.need_updated_buttons = True\n else:\n self.help_text = [\n 'Game has concluded. Scores for each round can be found in command window.'\n ]\n self.round_advance = False\n else:\n if not self.round_index == self.controller._state.round:\n skipped_rounds = (self.controller._state.round - self.\n round_index)\n for idx in range(skipped_rounds):\n score = 0\n self.controller.lateJoinScores(score)\n self.round_index = self.controller._state.round\n self.round_advance = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.last_hand = self.current_hand\n self.current_hand = self.controller.getHand()\n if len(self.current_hand) == 0:\n self.hand_info = []\n elif not self.last_hand == self.current_hand:\n self.hand_info = HandManagement.WrapHand(self, self.\n current_hand, self.hand_info)\n HandManagement.ShowHolding(self, self.hand_info)\n self.RuleSetsButtons.ButtonDisplay(self)\n\n def nextEventWildsOnBoard(self):\n \"\"\"This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.\n\n It is looking for key strokes to designate ambiguous wild cards in runs.\n The mouse is ignored until you designate all the wilds (turn phase goes back to play).\"\"\"\n if self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n else:\n HandManagement.wildsHiLoGetInput(self)\n\n def nextEvent(self):\n \"\"\"This submits the next user input to the controller,\n\n In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything\n unless designating values for prepared wild cards, at which time the mouse is ignored\n unless you want to clear the prepared cards.\n In games with Shared_Board = True wilds on board might change designation upon other cards being played.\n IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then\n it must be designated before play is completed.\n This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled.\"\"\"\n if self.controller._state.rules.Shared_Board:\n self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())\n if self.num_wilds > 0:\n self.nextEventWildsOnBoard()\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n if (not self.controller._state.rules.Shared_Board and self.\n num_wilds > 0):\n wild_instructions = (\n 'Use the keyboard to designate your prepared wild cards \\r\\n '\n )\n wild_instructions = (wild_instructions +\n '(use 0 for 10 and J, Q, or K for facecards).')\n self.controller.note = wild_instructions\n pos = pygame.mouse.get_pos()\n if self.event.type == pygame.MOUSEBUTTONDOWN:\n self.RuleSetsButtons.ClickedButton(self, pos)\n for element in self.hand_info:\n if element.img_clickable.isOver(pos):\n if element.status == 1:\n element.status = 0\n element.img_clickable.changeOutline(0)\n elif element.status == 0:\n element.status = 1\n element.img_clickable.changeOutline(2)\n elif self.event.type == pygame.MOUSEMOTION:\n self.RuleSetsButtons.MouseHiLight(self, pos)\n HandManagement.MouseHiLight(self.hand_info, pos)\n elif self.event.type == pygame.KEYDOWN:\n if self.controller._state.rules.Buy_Option:\n if self.controller.buying_opportunity:\n if self.event.key == pygame.K_y:\n self.controller.wantTopCard(True)\n self.controller.note = (\n 'You have signaled you want to buy the card.')\n elif self.event.key == pygame.K_n:\n self.controller.wantTopCard(False)\n self.controller.note = (\n 'You have signaled you do not want to buy the card.'\n )\n if (not self.controller._state.rules.Shared_Board and self.\n num_wilds > 0):\n HandManagement.ManuallyAssign(self)\n\n def gatherSelected(self):\n \"\"\" gathers selected cards\n in order to take action on selected cards (either discarding them or preparing them)\n \"\"\"\n self.selected_list = []\n for element in self.hand_info:\n if element.status == 1:\n self.selected_list.append(element)\n return self.selected_list\n\n def discardConfirmation(self, confirmed, wrapped_discards):\n \"\"\" Confirm a user is sure about a discard and then perform it once confirmed.\"\"\"\n discards = []\n for element in wrapped_discards:\n discards.append(element.card)\n if self.discards != discards:\n confirmed = False\n self.discards = discards\n if not confirmed:\n self.controller.note = 'Please confirm - discard ' + '{0}'.format(\n self.discards)\n return True\n else:\n if self.discard_confirm:\n controller_response = self.controller.discard(self.discards)\n if controller_response:\n for element in wrapped_discards:\n self.hand_info.remove(element)\n return False\n\n def mesgBetweenRounds(self, message):\n \"\"\"print message where cards usually displayed until Ready button is clicked for next round.\"\"\"\n font = UIC.Medium_Text\n y_offset = UIC.Disp_Height * (1 - UIC.Hand_Row_Fraction * 0.8)\n for message_string in message:\n text_surface = font.render(message_string, True, UIC.Black)\n text_rect = text_surface.get_rect()\n text_rect.center = UIC.Disp_Width * 0.5, y_offset\n y_offset = y_offset + UIC.Medium_Text_Feed\n self.display.blit(text_surface, text_rect)\n\n def labelMedium(self, labelstr, x_offset, y_offset):\n font = UIC.Medium_Text\n text_surface = font.render(labelstr, True, UIC.Bright_Blue)\n text_rect = text_surface.get_rect()\n text_rect.center = x_offset, y_offset\n self.display.blit(text_surface, text_rect)\n\n def playerLeftGame(self, num_players):\n self.controller.resetProcessedCards(self.visible_scards)\n self.controller.clearPreparedCards()\n self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.\n hand_info)\n self.controller.note = (\n 'A player has left the game, all prepared cards are automatically cleared.'\n )\n if num_players > 1:\n players_sp_w = UIC.Disp_Width / num_players\n else:\n players_sp_w = UIC.Disp_Width\n for idx in range(num_players):\n for button in self.assign_cards_btns[idx]:\n button.x = 10 + players_sp_w * idx\n",
"step-4": "import pygame\nimport textwrap\nimport client.Button as Btn\nfrom client.ClickableImage import ClickableImage as ClickImg\nfrom client.CreateDisplay import CreateDisplay\nimport client.LiverpoolButtons as RuleSetsButtons_LP\nimport client.HandAndFootButtons as RuleSetsButtons_HF\nimport client.HandManagement as HandManagement\nfrom client.UICardWrapper import UICardWrapper\nimport client.UIConstants as UIC\nfrom common.Card import Card\n\n\nclass HandView:\n \"\"\"This class handles player's cards and enables actions.\n\n Actions are primarily performed using buttons, since these need to somewhat customized by game\n the buttons are in ***.py (*** is Liverpool or HandAndFoot) and are imported as RuleSetsButtons.\n Management of displaying the hand's cards is not game specific, and methods that help with that\n are in HandManagement.py.\n\n Player can arrange their own hand, and prepare to play cards during other players' turns.\n \"\"\"\n\n def __init__(self, controller, display, ruleset):\n self.controller = controller\n self.display = display\n self.ruleset = ruleset\n self.Meld_Threshold = controller._state.rules.Meld_Threshold\n self.deal_size = controller._state.rules.Deal_Size\n self.help_text = controller._state.rules.help_text\n if ruleset == 'Liverpool':\n self.buttons_per_player = self.Meld_Threshold[0][0\n ] + self.Meld_Threshold[0][1]\n self.RuleSetsButtons = RuleSetsButtons_LP\n elif ruleset == 'HandAndFoot':\n self.RuleSetsButtons = RuleSetsButtons_HF\n self.hand_scaling = UIC.scale, UIC.Card_Spacing\n self.current_hand = []\n self.last_hand = []\n self.hand_info = []\n self.prepared_cards = []\n self.discards = []\n self.discard_confirm = False\n self.num_wilds = 0\n self.wild_cards = []\n self.selected_list = []\n self.round_index = 0\n self.round_advance = False\n self.num_players = 1\n self.need_updated_buttons = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.RuleSetsButtons.CreateButtons(self)\n\n def update(self, player_index=0, num_players=1, visible_scards=[]):\n \"\"\"This updates the view of the hand, between rounds it displays a message. \"\"\"\n self.visible_scards = visible_scards\n self.controller._state.player_index = player_index\n if (self.num_players > num_players and self.controller._state.rules\n .Shared_Board and not self.need_updated_buttons):\n self.playerLeftGame(num_players)\n self.num_players = num_players\n if self.controller._state.round == -1:\n self.mesgBetweenRounds(self.help_text)\n if self.round_advance:\n self.round_index = self.round_index + 1\n if self.round_index < len(self.Meld_Threshold):\n self.help_text[0] = 'This is the round of ' + str(self.\n Meld_Threshold[self.round_index]) + ' ! '\n self.need_updated_buttons = True\n else:\n self.help_text = [\n 'Game has concluded. Scores for each round can be found in command window.'\n ]\n self.round_advance = False\n else:\n if not self.round_index == self.controller._state.round:\n skipped_rounds = (self.controller._state.round - self.\n round_index)\n for idx in range(skipped_rounds):\n score = 0\n self.controller.lateJoinScores(score)\n self.round_index = self.controller._state.round\n self.round_advance = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.last_hand = self.current_hand\n self.current_hand = self.controller.getHand()\n if len(self.current_hand) == 0:\n self.hand_info = []\n elif not self.last_hand == self.current_hand:\n self.hand_info = HandManagement.WrapHand(self, self.\n current_hand, self.hand_info)\n HandManagement.ShowHolding(self, self.hand_info)\n self.RuleSetsButtons.ButtonDisplay(self)\n\n def nextEventWildsOnBoard(self):\n \"\"\"This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.\n\n It is looking for key strokes to designate ambiguous wild cards in runs.\n The mouse is ignored until you designate all the wilds (turn phase goes back to play).\"\"\"\n if self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n else:\n HandManagement.wildsHiLoGetInput(self)\n\n def nextEvent(self):\n \"\"\"This submits the next user input to the controller,\n\n In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything\n unless designating values for prepared wild cards, at which time the mouse is ignored\n unless you want to clear the prepared cards.\n In games with Shared_Board = True wilds on board might change designation upon other cards being played.\n IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then\n it must be designated before play is completed.\n This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled.\"\"\"\n if self.controller._state.rules.Shared_Board:\n self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())\n if self.num_wilds > 0:\n self.nextEventWildsOnBoard()\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n if (not self.controller._state.rules.Shared_Board and self.\n num_wilds > 0):\n wild_instructions = (\n 'Use the keyboard to designate your prepared wild cards \\r\\n '\n )\n wild_instructions = (wild_instructions +\n '(use 0 for 10 and J, Q, or K for facecards).')\n self.controller.note = wild_instructions\n pos = pygame.mouse.get_pos()\n if self.event.type == pygame.MOUSEBUTTONDOWN:\n self.RuleSetsButtons.ClickedButton(self, pos)\n for element in self.hand_info:\n if element.img_clickable.isOver(pos):\n if element.status == 1:\n element.status = 0\n element.img_clickable.changeOutline(0)\n elif element.status == 0:\n element.status = 1\n element.img_clickable.changeOutline(2)\n elif self.event.type == pygame.MOUSEMOTION:\n self.RuleSetsButtons.MouseHiLight(self, pos)\n HandManagement.MouseHiLight(self.hand_info, pos)\n elif self.event.type == pygame.KEYDOWN:\n if self.controller._state.rules.Buy_Option:\n if self.controller.buying_opportunity:\n if self.event.key == pygame.K_y:\n self.controller.wantTopCard(True)\n self.controller.note = (\n 'You have signaled you want to buy the card.')\n elif self.event.key == pygame.K_n:\n self.controller.wantTopCard(False)\n self.controller.note = (\n 'You have signaled you do not want to buy the card.'\n )\n if (not self.controller._state.rules.Shared_Board and self.\n num_wilds > 0):\n HandManagement.ManuallyAssign(self)\n\n def gatherSelected(self):\n \"\"\" gathers selected cards\n in order to take action on selected cards (either discarding them or preparing them)\n \"\"\"\n self.selected_list = []\n for element in self.hand_info:\n if element.status == 1:\n self.selected_list.append(element)\n return self.selected_list\n\n def discardConfirmation(self, confirmed, wrapped_discards):\n \"\"\" Confirm a user is sure about a discard and then perform it once confirmed.\"\"\"\n discards = []\n for element in wrapped_discards:\n discards.append(element.card)\n if self.discards != discards:\n confirmed = False\n self.discards = discards\n if not confirmed:\n self.controller.note = 'Please confirm - discard ' + '{0}'.format(\n self.discards)\n return True\n else:\n if self.discard_confirm:\n controller_response = self.controller.discard(self.discards)\n if controller_response:\n for element in wrapped_discards:\n self.hand_info.remove(element)\n return False\n\n def mesgBetweenRounds(self, message):\n \"\"\"print message where cards usually displayed until Ready button is clicked for next round.\"\"\"\n font = UIC.Medium_Text\n y_offset = UIC.Disp_Height * (1 - UIC.Hand_Row_Fraction * 0.8)\n for message_string in message:\n text_surface = font.render(message_string, True, UIC.Black)\n text_rect = text_surface.get_rect()\n text_rect.center = UIC.Disp_Width * 0.5, y_offset\n y_offset = y_offset + UIC.Medium_Text_Feed\n self.display.blit(text_surface, text_rect)\n\n def labelMedium(self, labelstr, x_offset, y_offset):\n font = UIC.Medium_Text\n text_surface = font.render(labelstr, True, UIC.Bright_Blue)\n text_rect = text_surface.get_rect()\n text_rect.center = x_offset, y_offset\n self.display.blit(text_surface, text_rect)\n\n def playerLeftGame(self, num_players):\n self.controller.resetProcessedCards(self.visible_scards)\n self.controller.clearPreparedCards()\n self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.\n hand_info)\n self.controller.note = (\n 'A player has left the game, all prepared cards are automatically cleared.'\n )\n if num_players > 1:\n players_sp_w = UIC.Disp_Width / num_players\n else:\n players_sp_w = UIC.Disp_Width\n for idx in range(num_players):\n for button in self.assign_cards_btns[idx]:\n button.x = 10 + players_sp_w * idx\n",
"step-5": "import pygame\nimport textwrap\nimport client.Button as Btn\nfrom client.ClickableImage import ClickableImage as ClickImg\nfrom client.CreateDisplay import CreateDisplay\nimport client.LiverpoolButtons as RuleSetsButtons_LP\nimport client.HandAndFootButtons as RuleSetsButtons_HF\nimport client.HandManagement as HandManagement\nfrom client.UICardWrapper import UICardWrapper\nimport client.UIConstants as UIC\nfrom common.Card import Card\n\n\nclass HandView:\n \"\"\"This class handles player's cards and enables actions.\n\n Actions are primarily performed using buttons, since these need to somewhat customized by game\n the buttons are in ***.py (*** is Liverpool or HandAndFoot) and are imported as RuleSetsButtons.\n Management of displaying the hand's cards is not game specific, and methods that help with that\n are in HandManagement.py.\n\n Player can arrange their own hand, and prepare to play cards during other players' turns.\n \"\"\"\n def __init__(self, controller, display, ruleset):\n self.controller = controller\n self.display = display\n self.ruleset = ruleset\n self.Meld_Threshold = controller._state.rules.Meld_Threshold\n self.deal_size = controller._state.rules.Deal_Size\n self.help_text = controller._state.rules.help_text\n if ruleset == 'Liverpool':\n self.buttons_per_player = self.Meld_Threshold[0][0] + self.Meld_Threshold[0][1]\n self.RuleSetsButtons = RuleSetsButtons_LP\n elif ruleset == 'HandAndFoot':\n self.RuleSetsButtons = RuleSetsButtons_HF\n self.hand_scaling = (UIC.scale, UIC.Card_Spacing)\n self.current_hand = []\n self.last_hand = []\n self.hand_info = [] # will contain UICardWrapped elements of current_hand\n self.prepared_cards = [] # will contain list of prepared cards from controller\n self.discards = []\n self.discard_confirm = False\n # num_wilds is HandAndFoot specific, only non-zero if by prepare_card_btn in HandAndFootButtons.py is triggered.\n self.num_wilds = 0\n self.wild_cards = []\n self.selected_list = []\n self.round_index = 0\n self.round_advance = False\n self.num_players = 1\n # In Liverpool and other Shared_Board games: prepare cards buttons must be updated each round\n self.need_updated_buttons = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n #\n # if someone joins between rounds, then they won't know the correct meld requirement until the round begins.\n # (self.controller._state.round = -1 until play commences).\n # In HandAndFoot: Correct meld requirement will be written in lower right corner once play commences.\n # In Liverpool: Will see correct buttons once round commences.\n self.RuleSetsButtons.CreateButtons(self)\n\n def update(self, player_index=0, num_players=1, visible_scards = []):\n \"\"\"This updates the view of the hand, between rounds it displays a message. \"\"\"\n\n self.visible_scards = visible_scards\n self.controller._state.player_index = player_index\n if self.num_players > num_players and self.controller._state.rules.Shared_Board \\\n and not self.need_updated_buttons:\n # A player has left the game after the round has begun -- make adjustments so game can continue.\n self.playerLeftGame(num_players)\n self.num_players = num_players\n if self.controller._state.round == -1:\n self.mesgBetweenRounds(self.help_text)\n if self.round_advance:\n self.round_index = self.round_index + 1\n if self.round_index < len(self.Meld_Threshold):\n self.help_text[0] = 'This is the round of ' + str(self.Meld_Threshold[self.round_index]) + ' ! '\n self.need_updated_buttons = True # used for Liverpool.\n else:\n self.help_text = ['Game has concluded. Scores for each round can be found in command window.']\n self.round_advance = False\n else:\n if not self.round_index == self.controller._state.round:\n # Need this to true up round_index if a player joins mid-game.\n skipped_rounds = self.controller._state.round - self.round_index\n for idx in range(skipped_rounds):\n #todo: How to score latecomers should be moved to ruleset.\n score = 0\n self.controller.lateJoinScores(score)\n self.round_index = self.controller._state.round\n self.round_advance = True\n # reset outline colors on ready buttons to what they need to be at the start of the \"between rounds\" state.\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.last_hand = self.current_hand\n self.current_hand = self.controller.getHand()\n if len(self.current_hand) == 0:\n self.hand_info = []\n elif not self.last_hand == self.current_hand:\n self.hand_info = HandManagement.WrapHand(self, self.current_hand, self.hand_info)\n HandManagement.ShowHolding(self, self.hand_info) # displays hand\n self.RuleSetsButtons.ButtonDisplay(self)\n\n def nextEventWildsOnBoard(self):\n \"\"\"This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.\n\n It is looking for key strokes to designate ambiguous wild cards in runs.\n The mouse is ignored until you designate all the wilds (turn phase goes back to play).\"\"\"\n\n if self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n # The window crashed, we should handle this\n print(\"pygame crash, AAAHHH\")\n pygame.quit()\n quit()\n else:\n # in Shared_Board games, check if there are wilds that need to be updated.\n # All other events are ignored until play is finished.\n HandManagement.wildsHiLoGetInput(self)\n\n def nextEvent(self):\n \"\"\"This submits the next user input to the controller,\n\n In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything\n unless designating values for prepared wild cards, at which time the mouse is ignored\n unless you want to clear the prepared cards.\n In games with Shared_Board = True wilds on board might change designation upon other cards being played.\n IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then\n it must be designated before play is completed.\n This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled.\"\"\"\n\n if self.controller._state.rules.Shared_Board:\n self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())\n if self.num_wilds > 0:\n self.nextEventWildsOnBoard()\n\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n # The window crashed, we should handle this\n print(\"pygame crash, AAAHHH\")\n pygame.quit()\n quit()\n\n if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n wild_instructions = 'Use the keyboard to designate your prepared wild cards \\r\\n '\n wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).'\n self.controller.note = wild_instructions\n pos = pygame.mouse.get_pos()\n\n if self.event.type == pygame.MOUSEBUTTONDOWN:\n self.RuleSetsButtons.ClickedButton(self, pos)\n for element in self.hand_info:\n # cannot select prepared cards, so not included in logic below.\n if element.img_clickable.isOver(pos):\n if element.status == 1:\n element.status = 0\n element.img_clickable.changeOutline(0)\n elif element.status == 0:\n element.status = 1\n element.img_clickable.changeOutline(2)\n\n elif self.event.type == pygame.MOUSEMOTION:\n self.RuleSetsButtons.MouseHiLight(self, pos)\n HandManagement.MouseHiLight(self.hand_info, pos)\n elif self.event.type == pygame.KEYDOWN:\n if self.controller._state.rules.Buy_Option:\n if self.controller.buying_opportunity:\n if self.event.key == pygame.K_y:\n self.controller.wantTopCard(True)\n self.controller.note = 'You have signaled you want to buy the card.'\n elif self.event.key == pygame.K_n:\n self.controller.wantTopCard(False)\n self.controller.note = 'You have signaled you do not want to buy the card.'\n if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n HandManagement.ManuallyAssign(self)\n\n\n def gatherSelected(self):\n \"\"\" gathers selected cards\n in order to take action on selected cards (either discarding them or preparing them)\n \"\"\"\n self.selected_list = []\n for element in self.hand_info:\n if element.status == 1:\n self.selected_list.append(element)\n return self.selected_list\n\n def discardConfirmation(self, confirmed, wrapped_discards):\n \"\"\" Confirm a user is sure about a discard and then perform it once confirmed.\"\"\"\n discards = []\n for element in wrapped_discards:\n discards.append(element.card)\n if self.discards != discards:\n confirmed = False\n self.discards = discards\n if not confirmed:\n self.controller.note = \"Please confirm - discard \" + \"{0}\".format(self.discards)\n return True # ask for confirmation\n else:\n # confirmed is True, performing discard and removing discarded wrapped cards from hand_info.\n if self.discard_confirm:\n controller_response = self.controller.discard(self.discards)\n if controller_response:\n for element in wrapped_discards:\n self.hand_info.remove(element)\n return False # now that this is done, we don't have anything waiting on confirmation\n\n def mesgBetweenRounds(self, message):\n \"\"\"print message where cards usually displayed until Ready button is clicked for next round.\"\"\"\n font = UIC.Medium_Text\n y_offset = (UIC.Disp_Height * (1 - (UIC.Hand_Row_Fraction * 0.8)))\n for message_string in message:\n text_surface = font.render(message_string, True, UIC.Black)\n text_rect = text_surface.get_rect()\n text_rect.center = ((UIC.Disp_Width * 0.5), y_offset)\n y_offset = y_offset + UIC.Medium_Text_Feed\n self.display.blit(text_surface, text_rect)\n\n def labelMedium(self, labelstr, x_offset, y_offset):\n font = UIC.Medium_Text\n text_surface = font.render(labelstr, True, UIC.Bright_Blue)\n text_rect = text_surface.get_rect()\n text_rect.center = (x_offset, y_offset)\n self.display.blit(text_surface, text_rect)\n\n def playerLeftGame(self, num_players):\n # a player has disconnected a game with a Shared_Board = True. Must make adjustments to\n # (i) card group dictionaries, (ii) prepared cards & (iii) buttons locations.\n self.controller.resetProcessedCards(self.visible_scards)\n self.controller.clearPreparedCards() # so that prepared cards won't be mistakenly played on wrong group.\n self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.hand_info)\n self.controller.note = \"A player has left the game, all prepared cards are automatically cleared.\"\n # reset set/run button locations:\n if num_players > 1:\n players_sp_w = UIC.Disp_Width / num_players\n else:\n players_sp_w = UIC.Disp_Width\n for idx in range(num_players):\n for button in self.assign_cards_btns[idx]:\n button.x = 10 + (players_sp_w * idx)\n",
"step-ids": [
7,
9,
11,
12,
13
]
}
|
[
7,
9,
11,
12,
13
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-16 12:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0036_auto_20180516_1818'),
]
operations = [
migrations.AddField(
model_name='promotion',
name='image',
field=models.ImageField(default=1, upload_to='images/promotion', verbose_name='Image 1318x790'),
preserve_default=False,
),
]
|
normal
|
{
"blob_id": "a7add26a919a41e52ae41c6b4c4079eadaa8aa1d",
"index": 851,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main', '0036_auto_20180516_1818')]\n operations = [migrations.AddField(model_name='promotion', name='image',\n field=models.ImageField(default=1, upload_to='images/promotion',\n verbose_name='Image 1318x790'), preserve_default=False)]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main', '0036_auto_20180516_1818')]\n operations = [migrations.AddField(model_name='promotion', name='image',\n field=models.ImageField(default=1, upload_to='images/promotion',\n verbose_name='Image 1318x790'), preserve_default=False)]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-16 12:24\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0036_auto_20180516_1818'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='promotion',\n name='image',\n field=models.ImageField(default=1, upload_to='images/promotion', verbose_name='Image 1318x790'),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from matplotlib import pyplot as plt
dev_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
dev_y = [4000, 45000, 50000, 55000, 60000,
56000, 62316, 64928, 67317, 68748, 73752]
plt.plot(dev_x, dev_y, label='All Devs')
#dev_x and dev_y are respectively x-axis and y-axis
# Median Python Developer Salaries by Age
py_dev_y = [45372, 48876, 53850, 57287, 63016,
65998, 70003, 70000, 71496, 75370, 83640]
plt.plot(dev_x, py_dev_y, label='Python')
plt.xlabel('Ages')
plt.ylabel('Median Salary')
plt.title('Median Salary (USD) by Age')
#Shows the title above the figure
plt.legend()
#This shows indexing of the chart or figure
plt.show()
|
normal
|
{
"blob_id": "796a13de72c2879956c5f9c9c9bdef7253760c9d",
"index": 9895,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.plot(dev_x, dev_y, label='All Devs')\n<mask token>\nplt.plot(dev_x, py_dev_y, label='Python')\nplt.xlabel('Ages')\nplt.ylabel('Median Salary')\nplt.title('Median Salary (USD) by Age')\nplt.legend()\nplt.show()\n",
"step-3": "<mask token>\ndev_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]\ndev_y = [4000, 45000, 50000, 55000, 60000, 56000, 62316, 64928, 67317, \n 68748, 73752]\nplt.plot(dev_x, dev_y, label='All Devs')\npy_dev_y = [45372, 48876, 53850, 57287, 63016, 65998, 70003, 70000, 71496, \n 75370, 83640]\nplt.plot(dev_x, py_dev_y, label='Python')\nplt.xlabel('Ages')\nplt.ylabel('Median Salary')\nplt.title('Median Salary (USD) by Age')\nplt.legend()\nplt.show()\n",
"step-4": "from matplotlib import pyplot as plt\ndev_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]\ndev_y = [4000, 45000, 50000, 55000, 60000, 56000, 62316, 64928, 67317, \n 68748, 73752]\nplt.plot(dev_x, dev_y, label='All Devs')\npy_dev_y = [45372, 48876, 53850, 57287, 63016, 65998, 70003, 70000, 71496, \n 75370, 83640]\nplt.plot(dev_x, py_dev_y, label='Python')\nplt.xlabel('Ages')\nplt.ylabel('Median Salary')\nplt.title('Median Salary (USD) by Age')\nplt.legend()\nplt.show()\n",
"step-5": "from matplotlib import pyplot as plt\n\n\n\n\ndev_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]\n\ndev_y = [4000, 45000, 50000, 55000, 60000,\n 56000, 62316, 64928, 67317, 68748, 73752]\n\nplt.plot(dev_x, dev_y, label='All Devs')\n#dev_x and dev_y are respectively x-axis and y-axis\n\n\n\n\n\n# Median Python Developer Salaries by Age\n\npy_dev_y = [45372, 48876, 53850, 57287, 63016,\n 65998, 70003, 70000, 71496, 75370, 83640]\n\nplt.plot(dev_x, py_dev_y, label='Python')\n\n\n\n\n\nplt.xlabel('Ages')\n\nplt.ylabel('Median Salary')\n\nplt.title('Median Salary (USD) by Age')\n#Shows the title above the figure\n\nplt.legend()\n#This shows indexing of the chart or figure\n\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@ClassFactory.register(ClassType.METRIC, alias='accuracy')
class Accuracy(MetricBase):
<|reserved_special_token_0|>
__metric_name__ = 'accuracy'
def __init__(self, topk=(1, 5)):
"""Init Accuracy metric."""
self.topk = topk
self.sum = [0.0] * len(topk)
self.data_num = 0
self.pfm = [0.0] * len(topk)
def __call__(self, output, target, *args, **kwargs):
"""Perform top k accuracy.
:param output: output of classification network
:param target: ground truth from dataset
:return: pfm
"""
if isinstance(output, tuple):
output = output[0]
if isinstance(target, tuple) or isinstance(target, list):
target = target[0]
res = accuracy(output, target, self.topk)
n = output.size(0)
self.data_num += n
self.sum = [(self.sum[index] + item.item() * n) for index, item in
enumerate(res)]
self.pfm = [(item / self.data_num) for item in self.sum]
return res
def reset(self):
"""Reset states for new evaluation after each epoch."""
self.sum = [0.0] * len(self.topk)
self.data_num = 0
self.pfm = [0.0] * len(self.topk)
def summary(self):
"""Summary all cached records, here is the last pfm record."""
if len(self.pfm) == 1:
return self.pfm[0]
perf_dict = {}
perf_dict[self.name] = self.pfm[0]
perf_dict.update({'{}_top{}'.format(self.name, self.topk[idx]):
value for idx, value in enumerate(self.pfm)})
return perf_dict
@ClassFactory.register(ClassType.METRIC)
class SklearnMetrics(MetricBase):
"""Wrapper class for Sklearn Metrics."""
def __init__(self, name, **kwargs):
super().__init__()
self.__metric_name__ = name
self.metric_func = getattr(me, name)
if kwargs:
self.metric_func = partial(self.metric_func, kwargs)
def __call__(self, output, target, *args, **kwargs):
"""Perform top k accuracy.
:param output: output of classification network
:param target: ground truth from dataset
:return: pfm
"""
_, y_pred = output.topk(1, 1, True, True)
y_pred = y_pred.t().detach().cpu().numpy()[0]
y_true = target.detach().cpu().numpy()
self.pfm = self.metric_func(y_true, y_pred)
return self.pfm
def reset(self):
"""Reset states for new evaluation after each epoch."""
pass
def summary(self):
"""Summary all cached records, here is the last pfm record."""
return self.pfm
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@ClassFactory.register(ClassType.METRIC, alias='accuracy')
class Accuracy(MetricBase):
"""Calculate classification accuracy between output and target."""
__metric_name__ = 'accuracy'
def __init__(self, topk=(1, 5)):
"""Init Accuracy metric."""
self.topk = topk
self.sum = [0.0] * len(topk)
self.data_num = 0
self.pfm = [0.0] * len(topk)
def __call__(self, output, target, *args, **kwargs):
"""Perform top k accuracy.
:param output: output of classification network
:param target: ground truth from dataset
:return: pfm
"""
if isinstance(output, tuple):
output = output[0]
if isinstance(target, tuple) or isinstance(target, list):
target = target[0]
res = accuracy(output, target, self.topk)
n = output.size(0)
self.data_num += n
self.sum = [(self.sum[index] + item.item() * n) for index, item in
enumerate(res)]
self.pfm = [(item / self.data_num) for item in self.sum]
return res
def reset(self):
"""Reset states for new evaluation after each epoch."""
self.sum = [0.0] * len(self.topk)
self.data_num = 0
self.pfm = [0.0] * len(self.topk)
def summary(self):
"""Summary all cached records, here is the last pfm record."""
if len(self.pfm) == 1:
return self.pfm[0]
perf_dict = {}
perf_dict[self.name] = self.pfm[0]
perf_dict.update({'{}_top{}'.format(self.name, self.topk[idx]):
value for idx, value in enumerate(self.pfm)})
return perf_dict
@ClassFactory.register(ClassType.METRIC)
class SklearnMetrics(MetricBase):
"""Wrapper class for Sklearn Metrics."""
def __init__(self, name, **kwargs):
super().__init__()
self.__metric_name__ = name
self.metric_func = getattr(me, name)
if kwargs:
self.metric_func = partial(self.metric_func, kwargs)
def __call__(self, output, target, *args, **kwargs):
"""Perform top k accuracy.
:param output: output of classification network
:param target: ground truth from dataset
:return: pfm
"""
_, y_pred = output.topk(1, 1, True, True)
y_pred = y_pred.t().detach().cpu().numpy()[0]
y_true = target.detach().cpu().numpy()
self.pfm = self.metric_func(y_true, y_pred)
return self.pfm
def reset(self):
"""Reset states for new evaluation after each epoch."""
pass
def summary(self):
"""Summary all cached records, here is the last pfm record."""
return self.pfm
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def accuracy(output, target, top_k=(1,)):
"""Calculate classification accuracy between output and target.
:param output: output of classification network
:type output: pytorch tensor
:param target: ground truth from dataset
:type target: pytorch tensor
:param top_k: top k of metric, k is an interger
:type top_k: tuple of interger
:return: results of top k
:rtype: list
"""
labels_count = output.shape[1]
max_k = labels_count if max(top_k) > labels_count else max(top_k)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in top_k:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k / batch_size)
return res
@ClassFactory.register(ClassType.METRIC, alias='accuracy')
class Accuracy(MetricBase):
"""Calculate classification accuracy between output and target."""
__metric_name__ = 'accuracy'
def __init__(self, topk=(1, 5)):
"""Init Accuracy metric."""
self.topk = topk
self.sum = [0.0] * len(topk)
self.data_num = 0
self.pfm = [0.0] * len(topk)
def __call__(self, output, target, *args, **kwargs):
"""Perform top k accuracy.
:param output: output of classification network
:param target: ground truth from dataset
:return: pfm
"""
if isinstance(output, tuple):
output = output[0]
if isinstance(target, tuple) or isinstance(target, list):
target = target[0]
res = accuracy(output, target, self.topk)
n = output.size(0)
self.data_num += n
self.sum = [(self.sum[index] + item.item() * n) for index, item in
enumerate(res)]
self.pfm = [(item / self.data_num) for item in self.sum]
return res
def reset(self):
"""Reset states for new evaluation after each epoch."""
self.sum = [0.0] * len(self.topk)
self.data_num = 0
self.pfm = [0.0] * len(self.topk)
def summary(self):
"""Summary all cached records, here is the last pfm record."""
if len(self.pfm) == 1:
return self.pfm[0]
perf_dict = {}
perf_dict[self.name] = self.pfm[0]
perf_dict.update({'{}_top{}'.format(self.name, self.topk[idx]):
value for idx, value in enumerate(self.pfm)})
return perf_dict
@ClassFactory.register(ClassType.METRIC)
class SklearnMetrics(MetricBase):
"""Wrapper class for Sklearn Metrics."""
def __init__(self, name, **kwargs):
super().__init__()
self.__metric_name__ = name
self.metric_func = getattr(me, name)
if kwargs:
self.metric_func = partial(self.metric_func, kwargs)
def __call__(self, output, target, *args, **kwargs):
"""Perform top k accuracy.
:param output: output of classification network
:param target: ground truth from dataset
:return: pfm
"""
_, y_pred = output.topk(1, 1, True, True)
y_pred = y_pred.t().detach().cpu().numpy()[0]
y_true = target.detach().cpu().numpy()
self.pfm = self.metric_func(y_true, y_pred)
return self.pfm
def reset(self):
"""Reset states for new evaluation after each epoch."""
pass
def summary(self):
"""Summary all cached records, here is the last pfm record."""
return self.pfm
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from functools import partial
from vega.metrics.pytorch.metrics import MetricBase
from vega.common import ClassFactory, ClassType
import sklearn.metrics as me
def accuracy(output, target, top_k=(1,)):
"""Calculate classification accuracy between output and target.
:param output: output of classification network
:type output: pytorch tensor
:param target: ground truth from dataset
:type target: pytorch tensor
:param top_k: top k of metric, k is an interger
:type top_k: tuple of interger
:return: results of top k
:rtype: list
"""
labels_count = output.shape[1]
max_k = labels_count if max(top_k) > labels_count else max(top_k)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in top_k:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k / batch_size)
return res
@ClassFactory.register(ClassType.METRIC, alias='accuracy')
class Accuracy(MetricBase):
"""Calculate classification accuracy between output and target."""
__metric_name__ = 'accuracy'
def __init__(self, topk=(1, 5)):
"""Init Accuracy metric."""
self.topk = topk
self.sum = [0.0] * len(topk)
self.data_num = 0
self.pfm = [0.0] * len(topk)
def __call__(self, output, target, *args, **kwargs):
"""Perform top k accuracy.
:param output: output of classification network
:param target: ground truth from dataset
:return: pfm
"""
if isinstance(output, tuple):
output = output[0]
if isinstance(target, tuple) or isinstance(target, list):
target = target[0]
res = accuracy(output, target, self.topk)
n = output.size(0)
self.data_num += n
self.sum = [(self.sum[index] + item.item() * n) for index, item in
enumerate(res)]
self.pfm = [(item / self.data_num) for item in self.sum]
return res
def reset(self):
"""Reset states for new evaluation after each epoch."""
self.sum = [0.0] * len(self.topk)
self.data_num = 0
self.pfm = [0.0] * len(self.topk)
def summary(self):
"""Summary all cached records, here is the last pfm record."""
if len(self.pfm) == 1:
return self.pfm[0]
perf_dict = {}
perf_dict[self.name] = self.pfm[0]
perf_dict.update({'{}_top{}'.format(self.name, self.topk[idx]):
value for idx, value in enumerate(self.pfm)})
return perf_dict
@ClassFactory.register(ClassType.METRIC)
class SklearnMetrics(MetricBase):
"""Wrapper class for Sklearn Metrics."""
def __init__(self, name, **kwargs):
super().__init__()
self.__metric_name__ = name
self.metric_func = getattr(me, name)
if kwargs:
self.metric_func = partial(self.metric_func, kwargs)
def __call__(self, output, target, *args, **kwargs):
"""Perform top k accuracy.
:param output: output of classification network
:param target: ground truth from dataset
:return: pfm
"""
_, y_pred = output.topk(1, 1, True, True)
y_pred = y_pred.t().detach().cpu().numpy()[0]
y_true = target.detach().cpu().numpy()
self.pfm = self.metric_func(y_true, y_pred)
return self.pfm
def reset(self):
"""Reset states for new evaluation after each epoch."""
pass
def summary(self):
"""Summary all cached records, here is the last pfm record."""
return self.pfm
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric of classifier task."""
from functools import partial
from vega.metrics.pytorch.metrics import MetricBase
from vega.common import ClassFactory, ClassType
import sklearn.metrics as me
def accuracy(output, target, top_k=(1,)):
"""Calculate classification accuracy between output and target.
:param output: output of classification network
:type output: pytorch tensor
:param target: ground truth from dataset
:type target: pytorch tensor
:param top_k: top k of metric, k is an interger
:type top_k: tuple of interger
:return: results of top k
:rtype: list
"""
labels_count = output.shape[1]
max_k = labels_count if max(top_k) > labels_count else max(top_k)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in top_k:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k / batch_size)
return res
@ClassFactory.register(ClassType.METRIC, alias='accuracy')
class Accuracy(MetricBase):
"""Calculate classification accuracy between output and target."""
__metric_name__ = 'accuracy'
def __init__(self, topk=(1, 5)):
"""Init Accuracy metric."""
self.topk = topk
self.sum = [0.] * len(topk)
self.data_num = 0
self.pfm = [0.] * len(topk)
def __call__(self, output, target, *args, **kwargs):
"""Perform top k accuracy.
:param output: output of classification network
:param target: ground truth from dataset
:return: pfm
"""
if isinstance(output, tuple):
output = output[0]
if isinstance(target, tuple) or isinstance(target, list):
target = target[0]
res = accuracy(output, target, self.topk)
n = output.size(0)
self.data_num += n
self.sum = [self.sum[index] + item.item() * n for index, item in enumerate(res)]
self.pfm = [item / self.data_num for item in self.sum]
return res
def reset(self):
"""Reset states for new evaluation after each epoch."""
self.sum = [0.] * len(self.topk)
self.data_num = 0
self.pfm = [0.] * len(self.topk)
def summary(self):
"""Summary all cached records, here is the last pfm record."""
if len(self.pfm) == 1:
return self.pfm[0]
perf_dict = {}
perf_dict[self.name] = self.pfm[0]
perf_dict.update({'{}_top{}'.format(self.name, self.topk[idx]): value for idx, value in enumerate(self.pfm)})
return perf_dict
@ClassFactory.register(ClassType.METRIC)
class SklearnMetrics(MetricBase):
"""Wrapper class for Sklearn Metrics."""
def __init__(self, name, **kwargs):
super().__init__()
self.__metric_name__ = name
self.metric_func = getattr(me, name)
if kwargs:
self.metric_func = partial(self.metric_func, kwargs)
def __call__(self, output, target, *args, **kwargs):
"""Perform top k accuracy.
:param output: output of classification network
:param target: ground truth from dataset
:return: pfm
"""
_, y_pred = output.topk(1, 1, True, True)
y_pred = y_pred.t().detach().cpu().numpy()[0]
y_true = target.detach().cpu().numpy()
self.pfm = self.metric_func(y_true, y_pred)
return self.pfm
def reset(self):
"""Reset states for new evaluation after each epoch."""
pass
def summary(self):
"""Summary all cached records, here is the last pfm record."""
return self.pfm
|
flexible
|
{
"blob_id": "a491772258a52bdfc93083343d2a2e48a240340d",
"index": 490,
"step-1": "<mask token>\n\n\n@ClassFactory.register(ClassType.METRIC, alias='accuracy')\nclass Accuracy(MetricBase):\n <mask token>\n __metric_name__ = 'accuracy'\n\n def __init__(self, topk=(1, 5)):\n \"\"\"Init Accuracy metric.\"\"\"\n self.topk = topk\n self.sum = [0.0] * len(topk)\n self.data_num = 0\n self.pfm = [0.0] * len(topk)\n\n def __call__(self, output, target, *args, **kwargs):\n \"\"\"Perform top k accuracy.\n\n :param output: output of classification network\n :param target: ground truth from dataset\n :return: pfm\n \"\"\"\n if isinstance(output, tuple):\n output = output[0]\n if isinstance(target, tuple) or isinstance(target, list):\n target = target[0]\n res = accuracy(output, target, self.topk)\n n = output.size(0)\n self.data_num += n\n self.sum = [(self.sum[index] + item.item() * n) for index, item in\n enumerate(res)]\n self.pfm = [(item / self.data_num) for item in self.sum]\n return res\n\n def reset(self):\n \"\"\"Reset states for new evaluation after each epoch.\"\"\"\n self.sum = [0.0] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.0] * len(self.topk)\n\n def summary(self):\n \"\"\"Summary all cached records, here is the last pfm record.\"\"\"\n if len(self.pfm) == 1:\n return self.pfm[0]\n perf_dict = {}\n perf_dict[self.name] = self.pfm[0]\n perf_dict.update({'{}_top{}'.format(self.name, self.topk[idx]):\n value for idx, value in enumerate(self.pfm)})\n return perf_dict\n\n\n@ClassFactory.register(ClassType.METRIC)\nclass SklearnMetrics(MetricBase):\n \"\"\"Wrapper class for Sklearn Metrics.\"\"\"\n\n def __init__(self, name, **kwargs):\n super().__init__()\n self.__metric_name__ = name\n self.metric_func = getattr(me, name)\n if kwargs:\n self.metric_func = partial(self.metric_func, kwargs)\n\n def __call__(self, output, target, *args, **kwargs):\n \"\"\"Perform top k accuracy.\n\n :param output: output of classification network\n :param target: ground truth from dataset\n :return: pfm\n \"\"\"\n _, y_pred = output.topk(1, 1, True, True)\n y_pred = y_pred.t().detach().cpu().numpy()[0]\n y_true = target.detach().cpu().numpy()\n self.pfm = self.metric_func(y_true, y_pred)\n return self.pfm\n\n def reset(self):\n \"\"\"Reset states for new evaluation after each epoch.\"\"\"\n pass\n\n def summary(self):\n \"\"\"Summary all cached records, here is the last pfm record.\"\"\"\n return self.pfm\n",
"step-2": "<mask token>\n\n\n@ClassFactory.register(ClassType.METRIC, alias='accuracy')\nclass Accuracy(MetricBase):\n \"\"\"Calculate classification accuracy between output and target.\"\"\"\n __metric_name__ = 'accuracy'\n\n def __init__(self, topk=(1, 5)):\n \"\"\"Init Accuracy metric.\"\"\"\n self.topk = topk\n self.sum = [0.0] * len(topk)\n self.data_num = 0\n self.pfm = [0.0] * len(topk)\n\n def __call__(self, output, target, *args, **kwargs):\n \"\"\"Perform top k accuracy.\n\n :param output: output of classification network\n :param target: ground truth from dataset\n :return: pfm\n \"\"\"\n if isinstance(output, tuple):\n output = output[0]\n if isinstance(target, tuple) or isinstance(target, list):\n target = target[0]\n res = accuracy(output, target, self.topk)\n n = output.size(0)\n self.data_num += n\n self.sum = [(self.sum[index] + item.item() * n) for index, item in\n enumerate(res)]\n self.pfm = [(item / self.data_num) for item in self.sum]\n return res\n\n def reset(self):\n \"\"\"Reset states for new evaluation after each epoch.\"\"\"\n self.sum = [0.0] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.0] * len(self.topk)\n\n def summary(self):\n \"\"\"Summary all cached records, here is the last pfm record.\"\"\"\n if len(self.pfm) == 1:\n return self.pfm[0]\n perf_dict = {}\n perf_dict[self.name] = self.pfm[0]\n perf_dict.update({'{}_top{}'.format(self.name, self.topk[idx]):\n value for idx, value in enumerate(self.pfm)})\n return perf_dict\n\n\n@ClassFactory.register(ClassType.METRIC)\nclass SklearnMetrics(MetricBase):\n \"\"\"Wrapper class for Sklearn Metrics.\"\"\"\n\n def __init__(self, name, **kwargs):\n super().__init__()\n self.__metric_name__ = name\n self.metric_func = getattr(me, name)\n if kwargs:\n self.metric_func = partial(self.metric_func, kwargs)\n\n def __call__(self, output, target, *args, **kwargs):\n \"\"\"Perform top k accuracy.\n\n :param output: output of classification network\n :param target: ground truth from dataset\n :return: pfm\n \"\"\"\n _, y_pred = output.topk(1, 1, True, True)\n y_pred = y_pred.t().detach().cpu().numpy()[0]\n y_true = target.detach().cpu().numpy()\n self.pfm = self.metric_func(y_true, y_pred)\n return self.pfm\n\n def reset(self):\n \"\"\"Reset states for new evaluation after each epoch.\"\"\"\n pass\n\n def summary(self):\n \"\"\"Summary all cached records, here is the last pfm record.\"\"\"\n return self.pfm\n",
"step-3": "<mask token>\n\n\ndef accuracy(output, target, top_k=(1,)):\n \"\"\"Calculate classification accuracy between output and target.\n\n :param output: output of classification network\n :type output: pytorch tensor\n :param target: ground truth from dataset\n :type target: pytorch tensor\n :param top_k: top k of metric, k is an interger\n :type top_k: tuple of interger\n :return: results of top k\n :rtype: list\n\n \"\"\"\n labels_count = output.shape[1]\n max_k = labels_count if max(top_k) > labels_count else max(top_k)\n batch_size = target.size(0)\n _, pred = output.topk(max_k, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in top_k:\n correct_k = correct[:k].reshape(-1).float().sum(0)\n res.append(correct_k / batch_size)\n return res\n\n\n@ClassFactory.register(ClassType.METRIC, alias='accuracy')\nclass Accuracy(MetricBase):\n \"\"\"Calculate classification accuracy between output and target.\"\"\"\n __metric_name__ = 'accuracy'\n\n def __init__(self, topk=(1, 5)):\n \"\"\"Init Accuracy metric.\"\"\"\n self.topk = topk\n self.sum = [0.0] * len(topk)\n self.data_num = 0\n self.pfm = [0.0] * len(topk)\n\n def __call__(self, output, target, *args, **kwargs):\n \"\"\"Perform top k accuracy.\n\n :param output: output of classification network\n :param target: ground truth from dataset\n :return: pfm\n \"\"\"\n if isinstance(output, tuple):\n output = output[0]\n if isinstance(target, tuple) or isinstance(target, list):\n target = target[0]\n res = accuracy(output, target, self.topk)\n n = output.size(0)\n self.data_num += n\n self.sum = [(self.sum[index] + item.item() * n) for index, item in\n enumerate(res)]\n self.pfm = [(item / self.data_num) for item in self.sum]\n return res\n\n def reset(self):\n \"\"\"Reset states for new evaluation after each epoch.\"\"\"\n self.sum = [0.0] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.0] * len(self.topk)\n\n def summary(self):\n \"\"\"Summary all cached records, here is the last pfm record.\"\"\"\n if len(self.pfm) == 1:\n return self.pfm[0]\n perf_dict = {}\n perf_dict[self.name] = self.pfm[0]\n perf_dict.update({'{}_top{}'.format(self.name, self.topk[idx]):\n value for idx, value in enumerate(self.pfm)})\n return perf_dict\n\n\n@ClassFactory.register(ClassType.METRIC)\nclass SklearnMetrics(MetricBase):\n \"\"\"Wrapper class for Sklearn Metrics.\"\"\"\n\n def __init__(self, name, **kwargs):\n super().__init__()\n self.__metric_name__ = name\n self.metric_func = getattr(me, name)\n if kwargs:\n self.metric_func = partial(self.metric_func, kwargs)\n\n def __call__(self, output, target, *args, **kwargs):\n \"\"\"Perform top k accuracy.\n\n :param output: output of classification network\n :param target: ground truth from dataset\n :return: pfm\n \"\"\"\n _, y_pred = output.topk(1, 1, True, True)\n y_pred = y_pred.t().detach().cpu().numpy()[0]\n y_true = target.detach().cpu().numpy()\n self.pfm = self.metric_func(y_true, y_pred)\n return self.pfm\n\n def reset(self):\n \"\"\"Reset states for new evaluation after each epoch.\"\"\"\n pass\n\n def summary(self):\n \"\"\"Summary all cached records, here is the last pfm record.\"\"\"\n return self.pfm\n",
"step-4": "<mask token>\nfrom functools import partial\nfrom vega.metrics.pytorch.metrics import MetricBase\nfrom vega.common import ClassFactory, ClassType\nimport sklearn.metrics as me\n\n\ndef accuracy(output, target, top_k=(1,)):\n \"\"\"Calculate classification accuracy between output and target.\n\n :param output: output of classification network\n :type output: pytorch tensor\n :param target: ground truth from dataset\n :type target: pytorch tensor\n :param top_k: top k of metric, k is an interger\n :type top_k: tuple of interger\n :return: results of top k\n :rtype: list\n\n \"\"\"\n labels_count = output.shape[1]\n max_k = labels_count if max(top_k) > labels_count else max(top_k)\n batch_size = target.size(0)\n _, pred = output.topk(max_k, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in top_k:\n correct_k = correct[:k].reshape(-1).float().sum(0)\n res.append(correct_k / batch_size)\n return res\n\n\n@ClassFactory.register(ClassType.METRIC, alias='accuracy')\nclass Accuracy(MetricBase):\n \"\"\"Calculate classification accuracy between output and target.\"\"\"\n __metric_name__ = 'accuracy'\n\n def __init__(self, topk=(1, 5)):\n \"\"\"Init Accuracy metric.\"\"\"\n self.topk = topk\n self.sum = [0.0] * len(topk)\n self.data_num = 0\n self.pfm = [0.0] * len(topk)\n\n def __call__(self, output, target, *args, **kwargs):\n \"\"\"Perform top k accuracy.\n\n :param output: output of classification network\n :param target: ground truth from dataset\n :return: pfm\n \"\"\"\n if isinstance(output, tuple):\n output = output[0]\n if isinstance(target, tuple) or isinstance(target, list):\n target = target[0]\n res = accuracy(output, target, self.topk)\n n = output.size(0)\n self.data_num += n\n self.sum = [(self.sum[index] + item.item() * n) for index, item in\n enumerate(res)]\n self.pfm = [(item / self.data_num) for item in self.sum]\n return res\n\n def reset(self):\n \"\"\"Reset states for new evaluation after each epoch.\"\"\"\n self.sum = [0.0] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.0] * len(self.topk)\n\n def summary(self):\n \"\"\"Summary all cached records, here is the last pfm record.\"\"\"\n if len(self.pfm) == 1:\n return self.pfm[0]\n perf_dict = {}\n perf_dict[self.name] = self.pfm[0]\n perf_dict.update({'{}_top{}'.format(self.name, self.topk[idx]):\n value for idx, value in enumerate(self.pfm)})\n return perf_dict\n\n\n@ClassFactory.register(ClassType.METRIC)\nclass SklearnMetrics(MetricBase):\n \"\"\"Wrapper class for Sklearn Metrics.\"\"\"\n\n def __init__(self, name, **kwargs):\n super().__init__()\n self.__metric_name__ = name\n self.metric_func = getattr(me, name)\n if kwargs:\n self.metric_func = partial(self.metric_func, kwargs)\n\n def __call__(self, output, target, *args, **kwargs):\n \"\"\"Perform top k accuracy.\n\n :param output: output of classification network\n :param target: ground truth from dataset\n :return: pfm\n \"\"\"\n _, y_pred = output.topk(1, 1, True, True)\n y_pred = y_pred.t().detach().cpu().numpy()[0]\n y_true = target.detach().cpu().numpy()\n self.pfm = self.metric_func(y_true, y_pred)\n return self.pfm\n\n def reset(self):\n \"\"\"Reset states for new evaluation after each epoch.\"\"\"\n pass\n\n def summary(self):\n \"\"\"Summary all cached records, here is the last pfm record.\"\"\"\n return self.pfm\n",
"step-5": "# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Metric of classifier task.\"\"\"\nfrom functools import partial\nfrom vega.metrics.pytorch.metrics import MetricBase\nfrom vega.common import ClassFactory, ClassType\nimport sklearn.metrics as me\n\n\ndef accuracy(output, target, top_k=(1,)):\n \"\"\"Calculate classification accuracy between output and target.\n\n :param output: output of classification network\n :type output: pytorch tensor\n :param target: ground truth from dataset\n :type target: pytorch tensor\n :param top_k: top k of metric, k is an interger\n :type top_k: tuple of interger\n :return: results of top k\n :rtype: list\n\n \"\"\"\n labels_count = output.shape[1]\n max_k = labels_count if max(top_k) > labels_count else max(top_k)\n batch_size = target.size(0)\n _, pred = output.topk(max_k, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in top_k:\n correct_k = correct[:k].reshape(-1).float().sum(0)\n res.append(correct_k / batch_size)\n return res\n\n\n@ClassFactory.register(ClassType.METRIC, alias='accuracy')\nclass Accuracy(MetricBase):\n \"\"\"Calculate classification accuracy between output and target.\"\"\"\n\n __metric_name__ = 'accuracy'\n\n def __init__(self, topk=(1, 5)):\n \"\"\"Init Accuracy metric.\"\"\"\n self.topk = topk\n self.sum = [0.] * len(topk)\n self.data_num = 0\n self.pfm = [0.] * len(topk)\n\n def __call__(self, output, target, *args, **kwargs):\n \"\"\"Perform top k accuracy.\n\n :param output: output of classification network\n :param target: ground truth from dataset\n :return: pfm\n \"\"\"\n if isinstance(output, tuple):\n output = output[0]\n if isinstance(target, tuple) or isinstance(target, list):\n target = target[0]\n res = accuracy(output, target, self.topk)\n n = output.size(0)\n self.data_num += n\n self.sum = [self.sum[index] + item.item() * n for index, item in enumerate(res)]\n self.pfm = [item / self.data_num for item in self.sum]\n return res\n\n def reset(self):\n \"\"\"Reset states for new evaluation after each epoch.\"\"\"\n self.sum = [0.] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.] * len(self.topk)\n\n def summary(self):\n \"\"\"Summary all cached records, here is the last pfm record.\"\"\"\n if len(self.pfm) == 1:\n return self.pfm[0]\n perf_dict = {}\n perf_dict[self.name] = self.pfm[0]\n perf_dict.update({'{}_top{}'.format(self.name, self.topk[idx]): value for idx, value in enumerate(self.pfm)})\n return perf_dict\n\n\n@ClassFactory.register(ClassType.METRIC)\nclass SklearnMetrics(MetricBase):\n \"\"\"Wrapper class for Sklearn Metrics.\"\"\"\n\n def __init__(self, name, **kwargs):\n super().__init__()\n self.__metric_name__ = name\n self.metric_func = getattr(me, name)\n if kwargs:\n self.metric_func = partial(self.metric_func, kwargs)\n\n def __call__(self, output, target, *args, **kwargs):\n \"\"\"Perform top k accuracy.\n\n :param output: output of classification network\n :param target: ground truth from dataset\n :return: pfm\n \"\"\"\n _, y_pred = output.topk(1, 1, True, True)\n y_pred = y_pred.t().detach().cpu().numpy()[0]\n y_true = target.detach().cpu().numpy()\n self.pfm = self.metric_func(y_true, y_pred)\n return self.pfm\n\n def reset(self):\n \"\"\"Reset states for new evaluation after each epoch.\"\"\"\n pass\n\n def summary(self):\n \"\"\"Summary all cached records, here is the last pfm record.\"\"\"\n return self.pfm\n",
"step-ids": [
12,
13,
14,
15,
16
]
}
|
[
12,
13,
14,
15,
16
] |
from appium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from appium.webdriver.common.touch_action import TouchAction
import time
import re
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import pymongo
def getSize():
x = driver.get_window_size()['width']
y = driver.get_window_size()['height']
return (x, y)
'''
解释:int start x-开始滑动的x坐标,
int start y -开始滑动的y坐标。
int end x -结束点x坐标,
int end y -结束点y坐标。
duration 滑动时间(默认5毫秒);
'''
def swipeUp(t):
l = getSize()
x1 = int(l[0] * 0.5) #x坐标
y1 = int(l[1] * 0.75) #起始y坐标
y2 = int(l[1] * 0.25) #终点y坐标
driver.swipe(x1, y1, x1, y2,t)
def crawl():
while True:
items = wait.until(EC.presence_of_all_elements_located(
(By.XPATH,'/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.support.v4.view.ViewPager/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.view.ViewGroup/android.support.v7.widget.RecyclerView' )))
swipeUp(1500)
for item in items:
try:
nickname = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderCompany').get_attribute('text')
content = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderStartTime').get_attribute('text')
list_time = content.split("至", 1)
start_time = list_time[0]
deadline = list_time[1]
send = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderDetailStartAdd').get_attribute('text')
receive = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderDetailEndAdd').get_attribute('text')
type = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderDetailGoodsType1').get_attribute('text')
raw_price= item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderDetailFreight1').get_attribute('text')
price = re.findall(r"\d+\.?\d*", raw_price)[0]
raw_distance = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_search_goods_distance').get_attribute('text')
list_raw = re.findall(r"\d+\.?\d*",raw_distance)
distance = list_raw[1]
data = {'nickname': nickname, 'start_time':start_time, 'deadline':deadline,'send':send,'receive':receive,'type':type,'price':price,'distance':distance}
#self.collection.update({'nickname': nickname, 'content': content}, {'$set': data}, True)
print(data)
collection.update_one({'nickname': nickname,'start_time':start_time,'deadline':deadline,'send':send,'receive':receive,'type':type,'price':price,'distance':distance}, {'$set': data},upsert=True)
except BaseException as e:
print(e)
client=pymongo.MongoClient("127.0.0.1",27017)
db=client.kc_data
collection=db.data_detail
desired_caps = {}
desired_caps['platformName'] ='Android'
desired_caps['deviceName']='f866d421'
desired_caps['appPackage']='com.kuaichengwuliu.driver'
desired_caps['appActivity']='.guide.GuideActivity'#'.guide.GuideActivity'
driver_server='http://localhost:4723/wd/hub'
desired_caps['autoAcceptAlerts']="true"
desired_caps['platformVersion'] = '6.0.1'
driver = webdriver.Remote(driver_server,desired_caps)
wait = WebDriverWait(driver, 300)
#WebDriverWait(driver, 20).until(lambda the_driver: the_driver.find_element_by_id("com.kuyu:id/tv_login").is_displayed())
#time.sleep(30)
WebDriverWait(driver, 7).until(lambda the_driver: driver.find_element_by_id("android:id/content").is_displayed())
TouchAction(driver).tap(x=545, y=181).release().perform()
time.sleep(1)
TouchAction(driver).tap(x=161, y=706).release().perform()
time.sleep(1)
TouchAction(driver).tap(x=534, y=1029).release().perform()
time.sleep(1)
TouchAction(driver).tap(x=183, y=1029).release().perform()
time.sleep(1)
TouchAction(driver).tap(x=528, y=701).release().perform()
time.sleep(1)
TouchAction(driver).tap(x=183, y=684).release().perform()
time.sleep(4)
TouchAction(driver).tap(x=161, y=306).release().perform()
time.sleep(4)
TouchAction(driver).tap(x=128, y=303).release().perform()
time.sleep(5)
crawl()
# 输入用户名
#driver.find_element_by_id("com.kuyu:id/et_email").send_keys("******")
# 输入密码
#driver.find_element_by_id("com.kuyu:id/et_pwd").send_keys("******")
# 点击登录
#driver.find_element_by_id("com.kuyu:id/tv_login").click()
# 这里加了一个等待,判断指定的元素出现则为登录成功(等待方法不懂没有关系,以后会再讲解如何设置等待)
#WebDriverWait(driver, 20).until(
# lambda the_driver: the_driver.find_element_by_id("com.kuyu:id/include_study_iv_add").is_displayed())
print(u"登录成功")
#driver.quit()
#TouchAction(driver).press(x=297, y=1073).move_to(x=309, y=459).release().perform()
|
normal
|
{
"blob_id": "6e614d1235a98ef496956001eef46b4447f0bf9b",
"index": 4677,
"step-1": "<mask token>\n\n\ndef getSize():\n x = driver.get_window_size()['width']\n y = driver.get_window_size()['height']\n return x, y\n\n\n<mask token>\n\n\ndef swipeUp(t):\n l = getSize()\n x1 = int(l[0] * 0.5)\n y1 = int(l[1] * 0.75)\n y2 = int(l[1] * 0.25)\n driver.swipe(x1, y1, x1, y2, t)\n\n\ndef crawl():\n while True:\n items = wait.until(EC.presence_of_all_elements_located((By.XPATH,\n '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.support.v4.view.ViewPager/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.view.ViewGroup/android.support.v7.widget.RecyclerView'\n )))\n swipeUp(1500)\n for item in items:\n try:\n nickname = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderCompany'\n ).get_attribute('text')\n content = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderStartTime'\n ).get_attribute('text')\n list_time = content.split('至', 1)\n start_time = list_time[0]\n deadline = list_time[1]\n send = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailStartAdd'\n ).get_attribute('text')\n receive = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailEndAdd'\n ).get_attribute('text')\n type = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailGoodsType1'\n ).get_attribute('text')\n raw_price = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailFreight1'\n ).get_attribute('text')\n price = re.findall('\\\\d+\\\\.?\\\\d*', raw_price)[0]\n raw_distance = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_search_goods_distance'\n ).get_attribute('text')\n list_raw = re.findall('\\\\d+\\\\.?\\\\d*', raw_distance)\n distance = list_raw[1]\n data = {'nickname': nickname, 'start_time': start_time,\n 'deadline': deadline, 'send': send, 'receive': receive,\n 'type': type, 'price': price, 'distance': distance}\n print(data)\n collection.update_one({'nickname': nickname, 'start_time':\n start_time, 'deadline': deadline, 'send': send,\n 'receive': receive, 'type': type, 'price': price,\n 'distance': distance}, {'$set': data}, upsert=True)\n except BaseException as e:\n print(e)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getSize():\n x = driver.get_window_size()['width']\n y = driver.get_window_size()['height']\n return x, y\n\n\n<mask token>\n\n\ndef swipeUp(t):\n l = getSize()\n x1 = int(l[0] * 0.5)\n y1 = int(l[1] * 0.75)\n y2 = int(l[1] * 0.25)\n driver.swipe(x1, y1, x1, y2, t)\n\n\ndef crawl():\n while True:\n items = wait.until(EC.presence_of_all_elements_located((By.XPATH,\n '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.support.v4.view.ViewPager/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.view.ViewGroup/android.support.v7.widget.RecyclerView'\n )))\n swipeUp(1500)\n for item in items:\n try:\n nickname = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderCompany'\n ).get_attribute('text')\n content = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderStartTime'\n ).get_attribute('text')\n list_time = content.split('至', 1)\n start_time = list_time[0]\n deadline = list_time[1]\n send = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailStartAdd'\n ).get_attribute('text')\n receive = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailEndAdd'\n ).get_attribute('text')\n type = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailGoodsType1'\n ).get_attribute('text')\n raw_price = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailFreight1'\n ).get_attribute('text')\n price = re.findall('\\\\d+\\\\.?\\\\d*', raw_price)[0]\n raw_distance = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_search_goods_distance'\n ).get_attribute('text')\n list_raw = re.findall('\\\\d+\\\\.?\\\\d*', raw_distance)\n distance = list_raw[1]\n data = {'nickname': nickname, 'start_time': start_time,\n 'deadline': deadline, 'send': send, 'receive': receive,\n 'type': type, 'price': price, 'distance': distance}\n print(data)\n collection.update_one({'nickname': nickname, 'start_time':\n start_time, 'deadline': deadline, 'send': send,\n 'receive': receive, 'type': type, 'price': price,\n 'distance': distance}, {'$set': data}, upsert=True)\n except BaseException as e:\n print(e)\n\n\n<mask token>\nWebDriverWait(driver, 7).until(lambda the_driver: driver.find_element_by_id\n ('android:id/content').is_displayed())\nTouchAction(driver).tap(x=545, y=181).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=161, y=706).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=534, y=1029).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=183, y=1029).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=528, y=701).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=183, y=684).release().perform()\ntime.sleep(4)\nTouchAction(driver).tap(x=161, y=306).release().perform()\ntime.sleep(4)\nTouchAction(driver).tap(x=128, y=303).release().perform()\ntime.sleep(5)\ncrawl()\nprint(u'登录成功')\n",
"step-3": "<mask token>\n\n\ndef getSize():\n x = driver.get_window_size()['width']\n y = driver.get_window_size()['height']\n return x, y\n\n\n<mask token>\n\n\ndef swipeUp(t):\n l = getSize()\n x1 = int(l[0] * 0.5)\n y1 = int(l[1] * 0.75)\n y2 = int(l[1] * 0.25)\n driver.swipe(x1, y1, x1, y2, t)\n\n\ndef crawl():\n while True:\n items = wait.until(EC.presence_of_all_elements_located((By.XPATH,\n '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.support.v4.view.ViewPager/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.view.ViewGroup/android.support.v7.widget.RecyclerView'\n )))\n swipeUp(1500)\n for item in items:\n try:\n nickname = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderCompany'\n ).get_attribute('text')\n content = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderStartTime'\n ).get_attribute('text')\n list_time = content.split('至', 1)\n start_time = list_time[0]\n deadline = list_time[1]\n send = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailStartAdd'\n ).get_attribute('text')\n receive = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailEndAdd'\n ).get_attribute('text')\n type = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailGoodsType1'\n ).get_attribute('text')\n raw_price = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailFreight1'\n ).get_attribute('text')\n price = re.findall('\\\\d+\\\\.?\\\\d*', raw_price)[0]\n raw_distance = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_search_goods_distance'\n ).get_attribute('text')\n list_raw = re.findall('\\\\d+\\\\.?\\\\d*', raw_distance)\n distance = list_raw[1]\n data = {'nickname': nickname, 'start_time': start_time,\n 'deadline': deadline, 'send': send, 'receive': receive,\n 'type': type, 'price': price, 'distance': distance}\n print(data)\n collection.update_one({'nickname': nickname, 'start_time':\n start_time, 'deadline': deadline, 'send': send,\n 'receive': receive, 'type': type, 'price': price,\n 'distance': distance}, {'$set': data}, upsert=True)\n except BaseException as e:\n print(e)\n\n\nclient = pymongo.MongoClient('127.0.0.1', 27017)\ndb = client.kc_data\ncollection = db.data_detail\ndesired_caps = {}\ndesired_caps['platformName'] = 'Android'\ndesired_caps['deviceName'] = 'f866d421'\ndesired_caps['appPackage'] = 'com.kuaichengwuliu.driver'\ndesired_caps['appActivity'] = '.guide.GuideActivity'\ndriver_server = 'http://localhost:4723/wd/hub'\ndesired_caps['autoAcceptAlerts'] = 'true'\ndesired_caps['platformVersion'] = '6.0.1'\ndriver = webdriver.Remote(driver_server, desired_caps)\nwait = WebDriverWait(driver, 300)\nWebDriverWait(driver, 7).until(lambda the_driver: driver.find_element_by_id\n ('android:id/content').is_displayed())\nTouchAction(driver).tap(x=545, y=181).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=161, y=706).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=534, y=1029).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=183, y=1029).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=528, y=701).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=183, y=684).release().perform()\ntime.sleep(4)\nTouchAction(driver).tap(x=161, y=306).release().perform()\ntime.sleep(4)\nTouchAction(driver).tap(x=128, y=303).release().perform()\ntime.sleep(5)\ncrawl()\nprint(u'登录成功')\n",
"step-4": "from appium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom appium.webdriver.common.touch_action import TouchAction\nimport time\nimport re\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport pymongo\n\n\ndef getSize():\n x = driver.get_window_size()['width']\n y = driver.get_window_size()['height']\n return x, y\n\n\n<mask token>\n\n\ndef swipeUp(t):\n l = getSize()\n x1 = int(l[0] * 0.5)\n y1 = int(l[1] * 0.75)\n y2 = int(l[1] * 0.25)\n driver.swipe(x1, y1, x1, y2, t)\n\n\ndef crawl():\n while True:\n items = wait.until(EC.presence_of_all_elements_located((By.XPATH,\n '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.support.v4.view.ViewPager/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.view.ViewGroup/android.support.v7.widget.RecyclerView'\n )))\n swipeUp(1500)\n for item in items:\n try:\n nickname = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderCompany'\n ).get_attribute('text')\n content = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderStartTime'\n ).get_attribute('text')\n list_time = content.split('至', 1)\n start_time = list_time[0]\n deadline = list_time[1]\n send = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailStartAdd'\n ).get_attribute('text')\n receive = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailEndAdd'\n ).get_attribute('text')\n type = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailGoodsType1'\n ).get_attribute('text')\n raw_price = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_orderDetailFreight1'\n ).get_attribute('text')\n price = re.findall('\\\\d+\\\\.?\\\\d*', raw_price)[0]\n raw_distance = item.find_element_by_id(\n 'com.kuaichengwuliu.driver:id/tv_search_goods_distance'\n ).get_attribute('text')\n list_raw = re.findall('\\\\d+\\\\.?\\\\d*', raw_distance)\n distance = list_raw[1]\n data = {'nickname': nickname, 'start_time': start_time,\n 'deadline': deadline, 'send': send, 'receive': receive,\n 'type': type, 'price': price, 'distance': distance}\n print(data)\n collection.update_one({'nickname': nickname, 'start_time':\n start_time, 'deadline': deadline, 'send': send,\n 'receive': receive, 'type': type, 'price': price,\n 'distance': distance}, {'$set': data}, upsert=True)\n except BaseException as e:\n print(e)\n\n\nclient = pymongo.MongoClient('127.0.0.1', 27017)\ndb = client.kc_data\ncollection = db.data_detail\ndesired_caps = {}\ndesired_caps['platformName'] = 'Android'\ndesired_caps['deviceName'] = 'f866d421'\ndesired_caps['appPackage'] = 'com.kuaichengwuliu.driver'\ndesired_caps['appActivity'] = '.guide.GuideActivity'\ndriver_server = 'http://localhost:4723/wd/hub'\ndesired_caps['autoAcceptAlerts'] = 'true'\ndesired_caps['platformVersion'] = '6.0.1'\ndriver = webdriver.Remote(driver_server, desired_caps)\nwait = WebDriverWait(driver, 300)\nWebDriverWait(driver, 7).until(lambda the_driver: driver.find_element_by_id\n ('android:id/content').is_displayed())\nTouchAction(driver).tap(x=545, y=181).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=161, y=706).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=534, y=1029).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=183, y=1029).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=528, y=701).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=183, y=684).release().perform()\ntime.sleep(4)\nTouchAction(driver).tap(x=161, y=306).release().perform()\ntime.sleep(4)\nTouchAction(driver).tap(x=128, y=303).release().perform()\ntime.sleep(5)\ncrawl()\nprint(u'登录成功')\n",
"step-5": "from appium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom appium.webdriver.common.touch_action import TouchAction\nimport time\nimport re\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport pymongo\n\ndef getSize():\n x = driver.get_window_size()['width']\n y = driver.get_window_size()['height']\n return (x, y)\n\n'''\n解释:int start x-开始滑动的x坐标,\n\n int start y -开始滑动的y坐标。\n\n int end x -结束点x坐标,\n\n int end y -结束点y坐标。\n\n duration 滑动时间(默认5毫秒);\n'''\ndef swipeUp(t):\n l = getSize()\n x1 = int(l[0] * 0.5) #x坐标\n y1 = int(l[1] * 0.75) #起始y坐标\n y2 = int(l[1] * 0.25) #终点y坐标\n driver.swipe(x1, y1, x1, y2,t)\n\ndef crawl():\n while True:\n items = wait.until(EC.presence_of_all_elements_located(\n (By.XPATH,'/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.support.v4.view.ViewPager/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.view.ViewGroup/android.support.v7.widget.RecyclerView' )))\n swipeUp(1500)\n for item in items:\n try:\n nickname = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderCompany').get_attribute('text')\n content = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderStartTime').get_attribute('text')\n list_time = content.split(\"至\", 1)\n start_time = list_time[0]\n deadline = list_time[1]\n send = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderDetailStartAdd').get_attribute('text')\n receive = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderDetailEndAdd').get_attribute('text')\n type = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderDetailGoodsType1').get_attribute('text')\n raw_price= item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderDetailFreight1').get_attribute('text')\n price = re.findall(r\"\\d+\\.?\\d*\", raw_price)[0]\n raw_distance = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_search_goods_distance').get_attribute('text')\n list_raw = re.findall(r\"\\d+\\.?\\d*\",raw_distance)\n distance = list_raw[1]\n data = {'nickname': nickname, 'start_time':start_time, 'deadline':deadline,'send':send,'receive':receive,'type':type,'price':price,'distance':distance}\n #self.collection.update({'nickname': nickname, 'content': content}, {'$set': data}, True)\n print(data)\n\n collection.update_one({'nickname': nickname,'start_time':start_time,'deadline':deadline,'send':send,'receive':receive,'type':type,'price':price,'distance':distance}, {'$set': data},upsert=True)\n\n except BaseException as e:\n print(e)\n\n\n\nclient=pymongo.MongoClient(\"127.0.0.1\",27017)\ndb=client.kc_data\ncollection=db.data_detail\ndesired_caps = {}\ndesired_caps['platformName'] ='Android'\ndesired_caps['deviceName']='f866d421'\ndesired_caps['appPackage']='com.kuaichengwuliu.driver'\ndesired_caps['appActivity']='.guide.GuideActivity'#'.guide.GuideActivity'\ndriver_server='http://localhost:4723/wd/hub'\ndesired_caps['autoAcceptAlerts']=\"true\"\ndesired_caps['platformVersion'] = '6.0.1'\ndriver = webdriver.Remote(driver_server,desired_caps)\nwait = WebDriverWait(driver, 300)\n\n#WebDriverWait(driver, 20).until(lambda the_driver: the_driver.find_element_by_id(\"com.kuyu:id/tv_login\").is_displayed())\n#time.sleep(30)\nWebDriverWait(driver, 7).until(lambda the_driver: driver.find_element_by_id(\"android:id/content\").is_displayed())\nTouchAction(driver).tap(x=545, y=181).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=161, y=706).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=534, y=1029).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=183, y=1029).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=528, y=701).release().perform()\ntime.sleep(1)\nTouchAction(driver).tap(x=183, y=684).release().perform()\ntime.sleep(4)\nTouchAction(driver).tap(x=161, y=306).release().perform()\ntime.sleep(4)\nTouchAction(driver).tap(x=128, y=303).release().perform()\ntime.sleep(5)\ncrawl()\n\n\n# 输入用户名\n#driver.find_element_by_id(\"com.kuyu:id/et_email\").send_keys(\"******\")\n# 输入密码\n#driver.find_element_by_id(\"com.kuyu:id/et_pwd\").send_keys(\"******\")\n# 点击登录\n#driver.find_element_by_id(\"com.kuyu:id/tv_login\").click()\n# 这里加了一个等待,判断指定的元素出现则为登录成功(等待方法不懂没有关系,以后会再讲解如何设置等待)\n#WebDriverWait(driver, 20).until(\n# lambda the_driver: the_driver.find_element_by_id(\"com.kuyu:id/include_study_iv_add\").is_displayed())\nprint(u\"登录成功\")\n#driver.quit()\n#TouchAction(driver).press(x=297, y=1073).move_to(x=309, y=459).release().perform()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AppValidationsConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AppValidationsConfig(AppConfig):
name = 'app_validations'
<|reserved_special_token_1|>
from django.apps import AppConfig
class AppValidationsConfig(AppConfig):
name = 'app_validations'
|
flexible
|
{
"blob_id": "7a6a8b5e344a7b60e369f100885d1e26afa28f46",
"index": 7600,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass AppValidationsConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AppValidationsConfig(AppConfig):\n name = 'app_validations'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass AppValidationsConfig(AppConfig):\n name = 'app_validations'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!flask/bin/python
from config import SQLALCHEMY_DATABASE_URI
from app.models import Patient, Appointment, PhoneCalls
from app import db
import os.path
db.create_all()
# Patient.generate_fake();
# Appointment.generate_fake();
# PhoneCalls.generate_fake();
Patient.add_patient();
Appointment.add_appointment();
PhoneCalls.add_call();
|
normal
|
{
"blob_id": "173e6017884a1a4df64018b306ea71bcaa1c5f1d",
"index": 4528,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.create_all()\nPatient.add_patient()\nAppointment.add_appointment()\nPhoneCalls.add_call()\n",
"step-3": "from config import SQLALCHEMY_DATABASE_URI\nfrom app.models import Patient, Appointment, PhoneCalls\nfrom app import db\nimport os.path\ndb.create_all()\nPatient.add_patient()\nAppointment.add_appointment()\nPhoneCalls.add_call()\n",
"step-4": "#!flask/bin/python\nfrom config import SQLALCHEMY_DATABASE_URI\nfrom app.models import Patient, Appointment, PhoneCalls\nfrom app import db\nimport os.path\ndb.create_all()\n\n# Patient.generate_fake();\n# Appointment.generate_fake();\n# PhoneCalls.generate_fake();\n\nPatient.add_patient();\nAppointment.add_appointment();\nPhoneCalls.add_call();",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@celery_app.task
def demo_celery_run():
return 'result is ok'
<|reserved_special_token_1|>
from celery_app import celery_app
@celery_app.task
def demo_celery_run():
return 'result is ok'
|
flexible
|
{
"blob_id": "4bb973b598a9c35394a0cd78ed9ba807f3a595d7",
"index": 2323,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@celery_app.task\ndef demo_celery_run():\n return 'result is ok'\n",
"step-3": "from celery_app import celery_app\n\n\n@celery_app.task\ndef demo_celery_run():\n return 'result is ok'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class NavTest(unittest.TestCase):
<|reserved_special_token_0|>
@classmethod
def tearDownClass(cls) ->None:
pass
def test01_getMarket(self):
resp_c = getParams.get_resp_params('cms_getMarket', 'getMarket', 'code'
)
resp_m = getParams.get_resp_params('cms_getMarket', 'getMarket', 'msg')
response = HttpUtil().do_get(self.url)
self.assertEqual(resp_c, response['code'])
self.assertEqual(resp_m, response['msg'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NavTest(unittest.TestCase):
@classmethod
def setUpClass(cls) ->None:
cls.url = getParams.get_url('cms_getMarket', 'getMarket')
HttpUtil.get_token()
@classmethod
def tearDownClass(cls) ->None:
pass
def test01_getMarket(self):
resp_c = getParams.get_resp_params('cms_getMarket', 'getMarket', 'code'
)
resp_m = getParams.get_resp_params('cms_getMarket', 'getMarket', 'msg')
response = HttpUtil().do_get(self.url)
self.assertEqual(resp_c, response['code'])
self.assertEqual(resp_m, response['msg'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = Log(logger='cms_getMarket').get_log()
class NavTest(unittest.TestCase):
@classmethod
def setUpClass(cls) ->None:
cls.url = getParams.get_url('cms_getMarket', 'getMarket')
HttpUtil.get_token()
@classmethod
def tearDownClass(cls) ->None:
pass
def test01_getMarket(self):
resp_c = getParams.get_resp_params('cms_getMarket', 'getMarket', 'code'
)
resp_m = getParams.get_resp_params('cms_getMarket', 'getMarket', 'msg')
response = HttpUtil().do_get(self.url)
self.assertEqual(resp_c, response['code'])
self.assertEqual(resp_m, response['msg'])
<|reserved_special_token_1|>
import unittest
from utils import getParams
from utils.httpUtil import HttpUtil
from utils.logger import Log
logger = Log(logger='cms_getMarket').get_log()
class NavTest(unittest.TestCase):
@classmethod
def setUpClass(cls) ->None:
cls.url = getParams.get_url('cms_getMarket', 'getMarket')
HttpUtil.get_token()
@classmethod
def tearDownClass(cls) ->None:
pass
def test01_getMarket(self):
resp_c = getParams.get_resp_params('cms_getMarket', 'getMarket', 'code'
)
resp_m = getParams.get_resp_params('cms_getMarket', 'getMarket', 'msg')
response = HttpUtil().do_get(self.url)
self.assertEqual(resp_c, response['code'])
self.assertEqual(resp_m, response['msg'])
|
flexible
|
{
"blob_id": "b328ee0b6c5afaf496297cefe477f933af458a03",
"index": 5654,
"step-1": "<mask token>\n\n\nclass NavTest(unittest.TestCase):\n <mask token>\n\n @classmethod\n def tearDownClass(cls) ->None:\n pass\n\n def test01_getMarket(self):\n resp_c = getParams.get_resp_params('cms_getMarket', 'getMarket', 'code'\n )\n resp_m = getParams.get_resp_params('cms_getMarket', 'getMarket', 'msg')\n response = HttpUtil().do_get(self.url)\n self.assertEqual(resp_c, response['code'])\n self.assertEqual(resp_m, response['msg'])\n",
"step-2": "<mask token>\n\n\nclass NavTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) ->None:\n cls.url = getParams.get_url('cms_getMarket', 'getMarket')\n HttpUtil.get_token()\n\n @classmethod\n def tearDownClass(cls) ->None:\n pass\n\n def test01_getMarket(self):\n resp_c = getParams.get_resp_params('cms_getMarket', 'getMarket', 'code'\n )\n resp_m = getParams.get_resp_params('cms_getMarket', 'getMarket', 'msg')\n response = HttpUtil().do_get(self.url)\n self.assertEqual(resp_c, response['code'])\n self.assertEqual(resp_m, response['msg'])\n",
"step-3": "<mask token>\nlogger = Log(logger='cms_getMarket').get_log()\n\n\nclass NavTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) ->None:\n cls.url = getParams.get_url('cms_getMarket', 'getMarket')\n HttpUtil.get_token()\n\n @classmethod\n def tearDownClass(cls) ->None:\n pass\n\n def test01_getMarket(self):\n resp_c = getParams.get_resp_params('cms_getMarket', 'getMarket', 'code'\n )\n resp_m = getParams.get_resp_params('cms_getMarket', 'getMarket', 'msg')\n response = HttpUtil().do_get(self.url)\n self.assertEqual(resp_c, response['code'])\n self.assertEqual(resp_m, response['msg'])\n",
"step-4": "import unittest\nfrom utils import getParams\nfrom utils.httpUtil import HttpUtil\nfrom utils.logger import Log\nlogger = Log(logger='cms_getMarket').get_log()\n\n\nclass NavTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) ->None:\n cls.url = getParams.get_url('cms_getMarket', 'getMarket')\n HttpUtil.get_token()\n\n @classmethod\n def tearDownClass(cls) ->None:\n pass\n\n def test01_getMarket(self):\n resp_c = getParams.get_resp_params('cms_getMarket', 'getMarket', 'code'\n )\n resp_m = getParams.get_resp_params('cms_getMarket', 'getMarket', 'msg')\n response = HttpUtil().do_get(self.url)\n self.assertEqual(resp_c, response['code'])\n self.assertEqual(resp_m, response['msg'])\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
from cache_replacement.double_linked_list import DoubleLinkedList
from cache_replacement.node import Node
class LRUCache:
def __init__(self, capacity):
self.capacity = capacity
self.size = 0
self.cache_map = {}
self.cache_list = DoubleLinkedList(capacity=capacity)
def get(self, key):
if key not in self.cache_map:
return -1
else:
node = self.cache_map.get(key)
self.cache_list.remove(node)
self.cache_list.append_front(node)
return node.value
def put(self, key, value):
if key in self.cache_map:
old_node = self.cache_map.get(key)
self.cache_list.remove(old_node)
new_node = Node(key, value)
self.cache_list.append(new_node)
self.cache_map[key] = new_node
else:
if self.size == self.capacity:
old_node = self.cache_list.remove()
self.cache_map.pop(old_node.key)
else:
self.size += 1
new_node = Node(key, value)
self.cache_list.append_front(new_node)
self.cache_map[key] = new_node
|
normal
|
{
"blob_id": "898ff6e38e80419d61ec4bbde827e8ca729eb19a",
"index": 5202,
"step-1": "<mask token>\n\n\nclass LRUCache:\n <mask token>\n <mask token>\n\n def put(self, key, value):\n if key in self.cache_map:\n old_node = self.cache_map.get(key)\n self.cache_list.remove(old_node)\n new_node = Node(key, value)\n self.cache_list.append(new_node)\n self.cache_map[key] = new_node\n else:\n if self.size == self.capacity:\n old_node = self.cache_list.remove()\n self.cache_map.pop(old_node.key)\n else:\n self.size += 1\n new_node = Node(key, value)\n self.cache_list.append_front(new_node)\n self.cache_map[key] = new_node\n",
"step-2": "<mask token>\n\n\nclass LRUCache:\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.size = 0\n self.cache_map = {}\n self.cache_list = DoubleLinkedList(capacity=capacity)\n <mask token>\n\n def put(self, key, value):\n if key in self.cache_map:\n old_node = self.cache_map.get(key)\n self.cache_list.remove(old_node)\n new_node = Node(key, value)\n self.cache_list.append(new_node)\n self.cache_map[key] = new_node\n else:\n if self.size == self.capacity:\n old_node = self.cache_list.remove()\n self.cache_map.pop(old_node.key)\n else:\n self.size += 1\n new_node = Node(key, value)\n self.cache_list.append_front(new_node)\n self.cache_map[key] = new_node\n",
"step-3": "<mask token>\n\n\nclass LRUCache:\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.size = 0\n self.cache_map = {}\n self.cache_list = DoubleLinkedList(capacity=capacity)\n\n def get(self, key):\n if key not in self.cache_map:\n return -1\n else:\n node = self.cache_map.get(key)\n self.cache_list.remove(node)\n self.cache_list.append_front(node)\n return node.value\n\n def put(self, key, value):\n if key in self.cache_map:\n old_node = self.cache_map.get(key)\n self.cache_list.remove(old_node)\n new_node = Node(key, value)\n self.cache_list.append(new_node)\n self.cache_map[key] = new_node\n else:\n if self.size == self.capacity:\n old_node = self.cache_list.remove()\n self.cache_map.pop(old_node.key)\n else:\n self.size += 1\n new_node = Node(key, value)\n self.cache_list.append_front(new_node)\n self.cache_map[key] = new_node\n",
"step-4": "from cache_replacement.double_linked_list import DoubleLinkedList\nfrom cache_replacement.node import Node\n\n\nclass LRUCache:\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.size = 0\n self.cache_map = {}\n self.cache_list = DoubleLinkedList(capacity=capacity)\n\n def get(self, key):\n if key not in self.cache_map:\n return -1\n else:\n node = self.cache_map.get(key)\n self.cache_list.remove(node)\n self.cache_list.append_front(node)\n return node.value\n\n def put(self, key, value):\n if key in self.cache_map:\n old_node = self.cache_map.get(key)\n self.cache_list.remove(old_node)\n new_node = Node(key, value)\n self.cache_list.append(new_node)\n self.cache_map[key] = new_node\n else:\n if self.size == self.capacity:\n old_node = self.cache_list.remove()\n self.cache_map.pop(old_node.key)\n else:\n self.size += 1\n new_node = Node(key, value)\n self.cache_list.append_front(new_node)\n self.cache_map[key] = new_node\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
import types
from robot.libraries.BuiltIn import BuiltIn
def GetAllVariableBySuffix (endswith):
all_vars = BuiltIn().get_variables()
result = {}
for var_name, var in all_vars.items():
#print var_name
if var_name.endswith(endswith+"}"):
print var_name
#print var
def CountFinalPoints ():
all_vars = BuiltIn().get_variables()
result = 0
result = int(result)
for var_name, var in all_vars.items():
#print var_name
if var_name.endswith("Points}"):
result += int(var)
#print var
return result
|
normal
|
{
"blob_id": "e9de42bb8ed24b95e5196f305fe658d67279c078",
"index": 3915,
"step-1": "import types\nfrom robot.libraries.BuiltIn import BuiltIn\n\ndef GetAllVariableBySuffix (endswith):\n all_vars = BuiltIn().get_variables()\n result = {}\n for var_name, var in all_vars.items():\n #print var_name\n if var_name.endswith(endswith+\"}\"):\n print var_name\n #print var\n\ndef CountFinalPoints ():\n all_vars = BuiltIn().get_variables()\n result = 0\n result = int(result)\n for var_name, var in all_vars.items():\n #print var_name\n if var_name.endswith(\"Points}\"):\n result += int(var)\n #print var\n return result\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Maps service instances to locations. See class.__doc__ """
from collections import defaultdict
from datetime import datetime
from sys import maxsize
from sqlalchemy import (Column, Integer, Sequence, DateTime, ForeignKey,
UniqueConstraint, CheckConstraint)
from sqlalchemy.orm import (relation, deferred, backref, defer, undefer,
lazyload, contains_eager, object_session)
from sqlalchemy.sql import and_, or_, null, case
from sqlalchemy.sql.functions import coalesce
from aquilon.exceptions_ import InternalError, AquilonError
from aquilon.aqdb.model import (Base, Location, Desk, Rack, Room, Bunker,
Building, City, Campus, Country, Continent, Hub,
Organization, ServiceInstance, Network, Personality,
PersonalityServiceListItem, HostEnvironment)
_TN = 'service_map'
# TODO: We could calculate this map by building a graph of Location subclasses
# using Location.valid_parents as edges, and then doing a topological sort
# NOTE: The actual values here are unimportant, what matters is their order
_LOCATION_PRIORITY = {
# Rack and Desk are at the same level
Rack: 1000,
Desk: 1000,
Room: 1100,
Bunker: 1200,
Building: 1300,
City: 1400,
Campus: 1500,
Country: 1600,
Continent: 1700,
Hub: 1800,
Organization: 1900,
}
# NOTE: The actual value here is unimportant, what matters is the order wrt.
# location-based priorities
_NETWORK_PRIORITY = 100
# NOTE: The actual values here are unimportant, only their order matters
_TARGET_PERSONALITY = 10
_TARGET_ENVIRONMENT = 100
_TARGET_GLOBAL = 1000
class ServiceMap(Base):
""" Service Map: mapping a service_instance to a location.
The rows in this table assert that an instance is a valid useable
default that clients can choose as their provider during service
autoconfiguration.
The contained information is actually a triplet:
- The service instance to use,
- Rules for the scope where the map is valid,
- Rules for which objects does the map apply.
"""
__tablename__ = _TN
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
service_instance_id = Column(ForeignKey(ServiceInstance.id,
ondelete='CASCADE'),
nullable=False)
personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),
nullable=True, index=True)
host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)
location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),
nullable=True, index=True)
network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),
nullable=True, index=True)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
service_instance = relation(ServiceInstance, innerjoin=True,
backref=backref('service_map',
cascade="all, delete-orphan",
passive_deletes=True))
personality = relation(Personality)
host_environment = relation(HostEnvironment)
location = relation(Location)
network = relation(Network)
__table_args__ = (UniqueConstraint(service_instance_id,
personality_id, host_environment_id,
location_id, network_id,
name='%s_uk' % _TN),
# At most one of personality_id and host_environment_id
# can be not NULL
CheckConstraint(case([(personality_id != null(), 1)], else_=0) +
case([(host_environment_id != null(), 1)], else_=0) <= 1,
name='%s_target_ck' % _TN))
@property
def service(self):
return self.service_instance.service
@property
def scope_priority(self):
if self.network:
return _NETWORK_PRIORITY
else:
try:
return _LOCATION_PRIORITY[type(self.location)]
except KeyError: # pragma: no cover
raise InternalError("The service map is not prepared to handle "
"location class %r" % type(self.location))
@property
def object_priority(self):
if self.personality:
return _TARGET_PERSONALITY
elif self.host_environment:
return _TARGET_ENVIRONMENT
else:
return _TARGET_GLOBAL
@property
def priority(self):
return (self.object_priority, self.scope_priority)
@property
def scope(self):
if self.location:
return self.location
else:
return self.network
def __init__(self, service_instance, network=None, location=None, personality=None,
host_environment=None):
if network and location: # pragma: no cover
raise AquilonError("A service can't be mapped to a Network and a "
"Location at the same time")
if network is None and location is None: # pragma: no cover
raise AquilonError("A service should by mapped to a Network or a "
"Location")
if personality and host_environment: # pragma: no cover
raise AquilonError("A service can't be mapped to a Personality and "
"a HostEnvironment at the same time")
super(ServiceMap, self).__init__(service_instance=service_instance,
network=network, location=location,
personality=personality,
host_environment=host_environment)
@staticmethod
def get_location_mapped_instances(dbservice, dblocation):
# Simplified service map lookup - single service, location-based maps
# only, no client bindings
session = object_session(dbservice)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
q = session.query(ServiceMap)
q = q.filter(and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()))
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.join(ServiceInstance)
q = q.filter_by(service=dbservice)
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
lazyload('service_instance.service'))
instances = []
min_seen_priority = (maxsize,)
# We want the instance(s) with the lowest priority
for map in q:
si = map.service_instance
if min_seen_priority > map.priority:
instances = [si]
min_seen_priority = map.priority
elif min_seen_priority == map.priority:
instances.append(si)
return instances
@staticmethod
def get_mapped_instance_cache(dbservices, dbstage, dblocation,
dbnetwork=None):
"""Returns dict of requested services to closest mapped instances."""
session = object_session(dblocation)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
PSLI = PersonalityServiceListItem
q = session.query(ServiceMap)
q = q.join(ServiceInstance)
q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in dbservices))
q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,
PSLI.service_id == ServiceInstance.service_id))
# Rules for filtering by target object
q = q.filter(or_(
and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()),
ServiceMap.personality == dbstage.personality,
ServiceMap.host_environment_id == coalesce(
PSLI.host_environment_id,
dbstage.personality.host_environment.id)))
# Rules for filtering by location/scope
if dbnetwork:
q = q.filter(or_(ServiceMap.location_id.in_(location_ids),
ServiceMap.network_id == dbnetwork.id))
else:
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
undefer('service_instance._client_count'),
lazyload('service_instance.service'))
instance_cache = {}
instance_priority = defaultdict(lambda: (maxsize,))
# For every service, we want the instance(s) with the lowest priority
for map in q:
si = map.service_instance
service = si.service
if instance_priority[service] > map.priority:
instance_cache[service] = [si]
instance_priority[service] = map.priority
elif instance_priority[service] == map.priority:
instance_cache[service].append(si)
return instance_cache
|
normal
|
{
"blob_id": "a9e0659c6a18ffc954079845b7d0de04c46a78c9",
"index": 7204,
"step-1": "<mask token>\n\n\nclass ServiceMap(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError:\n raise InternalError(\n 'The service map is not prepared to handle location class %r'\n % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return self.object_priority, self.scope_priority\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None,\n personality=None, host_environment=None):\n if network and location:\n raise AquilonError(\n \"A service can't be mapped to a Network and a Location at the same time\"\n )\n if network is None and location is None:\n raise AquilonError(\n 'A service should by mapped to a Network or a Location')\n if personality and host_environment:\n raise AquilonError(\n \"A service can't be mapped to a Personality and a HostEnvironment at the same time\"\n )\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location, personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n session = object_session(dbservice)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.\n host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), lazyload('service_instance.service'))\n instances = []\n min_seen_priority = maxsize,\n for map in q:\n si = map.service_instance\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n session = object_session(dblocation)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n PSLI = PersonalityServiceListItem\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in\n dbservices))\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n q = q.filter(or_(and_(ServiceMap.personality_id == null(), \n ServiceMap.host_environment_id == null()), ServiceMap.\n personality == dbstage.personality, ServiceMap.\n host_environment_id == coalesce(PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids), \n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), undefer(\n 'service_instance._client_count'), lazyload(\n 'service_instance.service'))\n instance_cache = {}\n instance_priority = defaultdict(lambda : (maxsize,))\n for map in q:\n si = map.service_instance\n service = si.service\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n return instance_cache\n",
"step-2": "<mask token>\n\n\nclass ServiceMap(Base):\n <mask token>\n __tablename__ = _TN\n id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)\n service_instance_id = Column(ForeignKey(ServiceInstance.id, ondelete=\n 'CASCADE'), nullable=False)\n personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),\n nullable=True, index=True)\n host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)\n location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),\n nullable=True, index=True)\n network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),\n nullable=True, index=True)\n creation_date = deferred(Column(DateTime, default=datetime.now,\n nullable=False))\n service_instance = relation(ServiceInstance, innerjoin=True, backref=\n backref('service_map', cascade='all, delete-orphan',\n passive_deletes=True))\n personality = relation(Personality)\n host_environment = relation(HostEnvironment)\n location = relation(Location)\n network = relation(Network)\n __table_args__ = UniqueConstraint(service_instance_id, personality_id,\n host_environment_id, location_id, network_id, name='%s_uk' % _TN\n ), CheckConstraint(case([(personality_id != null(), 1)], else_=0) +\n case([(host_environment_id != null(), 1)], else_=0) <= 1, name=\n '%s_target_ck' % _TN)\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError:\n raise InternalError(\n 'The service map is not prepared to handle location class %r'\n % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return self.object_priority, self.scope_priority\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None,\n personality=None, host_environment=None):\n if network and location:\n raise AquilonError(\n \"A service can't be mapped to a Network and a Location at the same time\"\n )\n if network is None and location is None:\n raise AquilonError(\n 'A service should by mapped to a Network or a Location')\n if personality and host_environment:\n raise AquilonError(\n \"A service can't be mapped to a Personality and a HostEnvironment at the same time\"\n )\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location, personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n session = object_session(dbservice)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.\n host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), lazyload('service_instance.service'))\n instances = []\n min_seen_priority = maxsize,\n for map in q:\n si = map.service_instance\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n session = object_session(dblocation)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n PSLI = PersonalityServiceListItem\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in\n dbservices))\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n q = q.filter(or_(and_(ServiceMap.personality_id == null(), \n ServiceMap.host_environment_id == null()), ServiceMap.\n personality == dbstage.personality, ServiceMap.\n host_environment_id == coalesce(PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids), \n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), undefer(\n 'service_instance._client_count'), lazyload(\n 'service_instance.service'))\n instance_cache = {}\n instance_priority = defaultdict(lambda : (maxsize,))\n for map in q:\n si = map.service_instance\n service = si.service\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n return instance_cache\n",
"step-3": "<mask token>\n\n\nclass ServiceMap(Base):\n \"\"\" Service Map: mapping a service_instance to a location.\n The rows in this table assert that an instance is a valid useable\n default that clients can choose as their provider during service\n autoconfiguration.\n\n The contained information is actually a triplet:\n - The service instance to use,\n - Rules for the scope where the map is valid,\n - Rules for which objects does the map apply.\n \"\"\"\n __tablename__ = _TN\n id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)\n service_instance_id = Column(ForeignKey(ServiceInstance.id, ondelete=\n 'CASCADE'), nullable=False)\n personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),\n nullable=True, index=True)\n host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)\n location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),\n nullable=True, index=True)\n network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),\n nullable=True, index=True)\n creation_date = deferred(Column(DateTime, default=datetime.now,\n nullable=False))\n service_instance = relation(ServiceInstance, innerjoin=True, backref=\n backref('service_map', cascade='all, delete-orphan',\n passive_deletes=True))\n personality = relation(Personality)\n host_environment = relation(HostEnvironment)\n location = relation(Location)\n network = relation(Network)\n __table_args__ = UniqueConstraint(service_instance_id, personality_id,\n host_environment_id, location_id, network_id, name='%s_uk' % _TN\n ), CheckConstraint(case([(personality_id != null(), 1)], else_=0) +\n case([(host_environment_id != null(), 1)], else_=0) <= 1, name=\n '%s_target_ck' % _TN)\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError:\n raise InternalError(\n 'The service map is not prepared to handle location class %r'\n % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return self.object_priority, self.scope_priority\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None,\n personality=None, host_environment=None):\n if network and location:\n raise AquilonError(\n \"A service can't be mapped to a Network and a Location at the same time\"\n )\n if network is None and location is None:\n raise AquilonError(\n 'A service should by mapped to a Network or a Location')\n if personality and host_environment:\n raise AquilonError(\n \"A service can't be mapped to a Personality and a HostEnvironment at the same time\"\n )\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location, personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n session = object_session(dbservice)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.\n host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), lazyload('service_instance.service'))\n instances = []\n min_seen_priority = maxsize,\n for map in q:\n si = map.service_instance\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n session = object_session(dblocation)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n PSLI = PersonalityServiceListItem\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in\n dbservices))\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n q = q.filter(or_(and_(ServiceMap.personality_id == null(), \n ServiceMap.host_environment_id == null()), ServiceMap.\n personality == dbstage.personality, ServiceMap.\n host_environment_id == coalesce(PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids), \n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), undefer(\n 'service_instance._client_count'), lazyload(\n 'service_instance.service'))\n instance_cache = {}\n instance_priority = defaultdict(lambda : (maxsize,))\n for map in q:\n si = map.service_instance\n service = si.service\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n return instance_cache\n",
"step-4": "<mask token>\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom sys import maxsize\nfrom sqlalchemy import Column, Integer, Sequence, DateTime, ForeignKey, UniqueConstraint, CheckConstraint\nfrom sqlalchemy.orm import relation, deferred, backref, defer, undefer, lazyload, contains_eager, object_session\nfrom sqlalchemy.sql import and_, or_, null, case\nfrom sqlalchemy.sql.functions import coalesce\nfrom aquilon.exceptions_ import InternalError, AquilonError\nfrom aquilon.aqdb.model import Base, Location, Desk, Rack, Room, Bunker, Building, City, Campus, Country, Continent, Hub, Organization, ServiceInstance, Network, Personality, PersonalityServiceListItem, HostEnvironment\n_TN = 'service_map'\n_LOCATION_PRIORITY = {Rack: 1000, Desk: 1000, Room: 1100, Bunker: 1200,\n Building: 1300, City: 1400, Campus: 1500, Country: 1600, Continent: \n 1700, Hub: 1800, Organization: 1900}\n_NETWORK_PRIORITY = 100\n_TARGET_PERSONALITY = 10\n_TARGET_ENVIRONMENT = 100\n_TARGET_GLOBAL = 1000\n\n\nclass ServiceMap(Base):\n \"\"\" Service Map: mapping a service_instance to a location.\n The rows in this table assert that an instance is a valid useable\n default that clients can choose as their provider during service\n autoconfiguration.\n\n The contained information is actually a triplet:\n - The service instance to use,\n - Rules for the scope where the map is valid,\n - Rules for which objects does the map apply.\n \"\"\"\n __tablename__ = _TN\n id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)\n service_instance_id = Column(ForeignKey(ServiceInstance.id, ondelete=\n 'CASCADE'), nullable=False)\n personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),\n nullable=True, index=True)\n host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)\n location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),\n nullable=True, index=True)\n network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),\n nullable=True, index=True)\n creation_date = deferred(Column(DateTime, default=datetime.now,\n nullable=False))\n service_instance = relation(ServiceInstance, innerjoin=True, backref=\n backref('service_map', cascade='all, delete-orphan',\n passive_deletes=True))\n personality = relation(Personality)\n host_environment = relation(HostEnvironment)\n location = relation(Location)\n network = relation(Network)\n __table_args__ = UniqueConstraint(service_instance_id, personality_id,\n host_environment_id, location_id, network_id, name='%s_uk' % _TN\n ), CheckConstraint(case([(personality_id != null(), 1)], else_=0) +\n case([(host_environment_id != null(), 1)], else_=0) <= 1, name=\n '%s_target_ck' % _TN)\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError:\n raise InternalError(\n 'The service map is not prepared to handle location class %r'\n % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return self.object_priority, self.scope_priority\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None,\n personality=None, host_environment=None):\n if network and location:\n raise AquilonError(\n \"A service can't be mapped to a Network and a Location at the same time\"\n )\n if network is None and location is None:\n raise AquilonError(\n 'A service should by mapped to a Network or a Location')\n if personality and host_environment:\n raise AquilonError(\n \"A service can't be mapped to a Personality and a HostEnvironment at the same time\"\n )\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location, personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n session = object_session(dbservice)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.\n host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), lazyload('service_instance.service'))\n instances = []\n min_seen_priority = maxsize,\n for map in q:\n si = map.service_instance\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n session = object_session(dblocation)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n PSLI = PersonalityServiceListItem\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in\n dbservices))\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n q = q.filter(or_(and_(ServiceMap.personality_id == null(), \n ServiceMap.host_environment_id == null()), ServiceMap.\n personality == dbstage.personality, ServiceMap.\n host_environment_id == coalesce(PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids), \n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), undefer(\n 'service_instance._client_count'), lazyload(\n 'service_instance.service'))\n instance_cache = {}\n instance_priority = defaultdict(lambda : (maxsize,))\n for map in q:\n si = map.service_instance\n service = si.service\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n return instance_cache\n",
"step-5": "# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-\n# ex: set expandtab softtabstop=4 shiftwidth=4:\n#\n# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Maps service instances to locations. See class.__doc__ \"\"\"\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom sys import maxsize\n\nfrom sqlalchemy import (Column, Integer, Sequence, DateTime, ForeignKey,\n UniqueConstraint, CheckConstraint)\nfrom sqlalchemy.orm import (relation, deferred, backref, defer, undefer,\n lazyload, contains_eager, object_session)\nfrom sqlalchemy.sql import and_, or_, null, case\nfrom sqlalchemy.sql.functions import coalesce\n\nfrom aquilon.exceptions_ import InternalError, AquilonError\nfrom aquilon.aqdb.model import (Base, Location, Desk, Rack, Room, Bunker,\n Building, City, Campus, Country, Continent, Hub,\n Organization, ServiceInstance, Network, Personality,\n PersonalityServiceListItem, HostEnvironment)\n\n_TN = 'service_map'\n\n# TODO: We could calculate this map by building a graph of Location subclasses\n# using Location.valid_parents as edges, and then doing a topological sort\n# NOTE: The actual values here are unimportant, what matters is their order\n_LOCATION_PRIORITY = {\n # Rack and Desk are at the same level\n Rack: 1000,\n Desk: 1000,\n Room: 1100,\n Bunker: 1200,\n Building: 1300,\n City: 1400,\n Campus: 1500,\n Country: 1600,\n Continent: 1700,\n Hub: 1800,\n Organization: 1900,\n}\n\n# NOTE: The actual value here is unimportant, what matters is the order wrt.\n# location-based priorities\n_NETWORK_PRIORITY = 100\n\n# NOTE: The actual values here are unimportant, only their order matters\n_TARGET_PERSONALITY = 10\n_TARGET_ENVIRONMENT = 100\n_TARGET_GLOBAL = 1000\n\n\nclass ServiceMap(Base):\n \"\"\" Service Map: mapping a service_instance to a location.\n The rows in this table assert that an instance is a valid useable\n default that clients can choose as their provider during service\n autoconfiguration.\n\n The contained information is actually a triplet:\n - The service instance to use,\n - Rules for the scope where the map is valid,\n - Rules for which objects does the map apply.\n \"\"\"\n\n __tablename__ = _TN\n\n id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)\n\n service_instance_id = Column(ForeignKey(ServiceInstance.id,\n ondelete='CASCADE'),\n nullable=False)\n\n personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),\n nullable=True, index=True)\n\n host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)\n\n location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),\n nullable=True, index=True)\n\n network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),\n nullable=True, index=True)\n\n creation_date = deferred(Column(DateTime, default=datetime.now,\n nullable=False))\n\n service_instance = relation(ServiceInstance, innerjoin=True,\n backref=backref('service_map',\n cascade=\"all, delete-orphan\",\n passive_deletes=True))\n personality = relation(Personality)\n host_environment = relation(HostEnvironment)\n location = relation(Location)\n network = relation(Network)\n\n __table_args__ = (UniqueConstraint(service_instance_id,\n personality_id, host_environment_id,\n location_id, network_id,\n name='%s_uk' % _TN),\n # At most one of personality_id and host_environment_id\n # can be not NULL\n CheckConstraint(case([(personality_id != null(), 1)], else_=0) +\n case([(host_environment_id != null(), 1)], else_=0) <= 1,\n name='%s_target_ck' % _TN))\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError: # pragma: no cover\n raise InternalError(\"The service map is not prepared to handle \"\n \"location class %r\" % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return (self.object_priority, self.scope_priority)\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None, personality=None,\n host_environment=None):\n if network and location: # pragma: no cover\n raise AquilonError(\"A service can't be mapped to a Network and a \"\n \"Location at the same time\")\n\n if network is None and location is None: # pragma: no cover\n raise AquilonError(\"A service should by mapped to a Network or a \"\n \"Location\")\n\n if personality and host_environment: # pragma: no cover\n raise AquilonError(\"A service can't be mapped to a Personality and \"\n \"a HostEnvironment at the same time\")\n\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location,\n personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n # Simplified service map lookup - single service, location-based maps\n # only, no client bindings\n session = object_session(dbservice)\n\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(),\n ServiceMap.host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'),\n defer('service_instance.comments'),\n lazyload('service_instance.service'))\n\n instances = []\n min_seen_priority = (maxsize,)\n\n # We want the instance(s) with the lowest priority\n for map in q:\n si = map.service_instance\n\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n\n session = object_session(dblocation)\n\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n\n PSLI = PersonalityServiceListItem\n\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in dbservices))\n\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n\n # Rules for filtering by target object\n q = q.filter(or_(\n and_(ServiceMap.personality_id == null(),\n ServiceMap.host_environment_id == null()),\n ServiceMap.personality == dbstage.personality,\n ServiceMap.host_environment_id == coalesce(\n PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n\n # Rules for filtering by location/scope\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids),\n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n\n q = q.options(contains_eager('service_instance'),\n defer('service_instance.comments'),\n undefer('service_instance._client_count'),\n lazyload('service_instance.service'))\n\n instance_cache = {}\n instance_priority = defaultdict(lambda: (maxsize,))\n\n # For every service, we want the instance(s) with the lowest priority\n for map in q:\n si = map.service_instance\n service = si.service\n\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n\n return instance_cache\n",
"step-ids": [
9,
10,
11,
13,
14
]
}
|
[
9,
10,
11,
13,
14
] |
#### As an example below shell script can be used to execute this every 300s.
####!/bin/bash
####while true
####do
#### /usr/bin/sudo python3 /path/of/the/python/script.sh
####done
#!/usr/bin/python
import sys
import time
import paho.mqtt.client as mqtt
broker_url = "<IP_Address_of_MQTT_broker>"
broker_port = <MQTT_Broker_port>
def on_connect(client, userdata, flags, rc):
print("Connected With Result Code: {}".format(rc))
def on_message(client, userdata, message):
print("Message Recieved: "+message.payload.decode())
file_name=message.payload.decode()
file_path="/home/demouser/nagios/node-check/logs/"+file_name+".ok"
file1 = open(file_path, 'w')
file1.write(message.payload.decode()+" is up and running\n")
file1.close()
def on_disconnect(client, userdata, rc):
print("Client Got Disconnected")
client = mqtt.Client("Nagios_NodeChecker")
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
client.username_pw_set(username="<mqtt_username>",password="<mqtt_password>")
client.connect(broker_url, broker_port)
client.subscribe(topic="nagios/node_check", qos=2)
client.message_callback_add("nagios/node_check", on_message)
client.loop_start()
time.sleep(300)
client.loop_stop()
|
normal
|
{
"blob_id": "f311b803d8c0ee68bc43526f56e6b14f3a2836b8",
"index": 7309,
"step-1": "#### As an example below shell script can be used to execute this every 300s.\r\n####!/bin/bash\r\n####while true\r\n####do\r\n#### /usr/bin/sudo python3 /path/of/the/python/script.sh\r\n####done\r\n\r\n#!/usr/bin/python\r\nimport sys\r\nimport time\r\nimport paho.mqtt.client as mqtt\r\n\r\nbroker_url = \"<IP_Address_of_MQTT_broker>\"\r\nbroker_port = <MQTT_Broker_port>\r\n\r\ndef on_connect(client, userdata, flags, rc):\r\n print(\"Connected With Result Code: {}\".format(rc))\r\n\r\ndef on_message(client, userdata, message):\r\n print(\"Message Recieved: \"+message.payload.decode())\r\n file_name=message.payload.decode()\r\n file_path=\"/home/demouser/nagios/node-check/logs/\"+file_name+\".ok\"\r\n file1 = open(file_path, 'w')\r\n file1.write(message.payload.decode()+\" is up and running\\n\")\r\n file1.close()\r\n\r\ndef on_disconnect(client, userdata, rc):\r\n print(\"Client Got Disconnected\")\r\n\r\nclient = mqtt.Client(\"Nagios_NodeChecker\")\r\nclient.on_connect = on_connect\r\nclient.on_disconnect = on_disconnect\r\nclient.on_message = on_message\r\nclient.username_pw_set(username=\"<mqtt_username>\",password=\"<mqtt_password>\")\r\n\r\nclient.connect(broker_url, broker_port)\r\nclient.subscribe(topic=\"nagios/node_check\", qos=2)\r\nclient.message_callback_add(\"nagios/node_check\", on_message)\r\n\r\nclient.loop_start()\r\ntime.sleep(300)\r\nclient.loop_stop()\r\n\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def send_confirmation_email(user):
try:
confirmation_key = user.confirmation_key
except:
confirmation_key = user.add_unconfirmed_email(user.email)
msg_txt = render_to_string('email/confirmation.txt', {'SITE_URL':
settings.SITE_URL, 'user': user.email, 'key': confirmation_key})
msg_html = render_to_string('email/confirmation.html', {'SITE_URL':
settings.SITE_URL, 'user': user.email, 'key': confirmation_key})
return send_mail('Confirmation email', msg_txt,
'daniyar.yeralin@gmail.com', [user.email], html_message=msg_html)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_temp_password():
length = 7
chars = string.ascii_letters + string.digits
rnd = random.SystemRandom()
return ''.join(rnd.choice(chars) for i in range(length))
def send_confirmation_email(user):
try:
confirmation_key = user.confirmation_key
except:
confirmation_key = user.add_unconfirmed_email(user.email)
msg_txt = render_to_string('email/confirmation.txt', {'SITE_URL':
settings.SITE_URL, 'user': user.email, 'key': confirmation_key})
msg_html = render_to_string('email/confirmation.html', {'SITE_URL':
settings.SITE_URL, 'user': user.email, 'key': confirmation_key})
return send_mail('Confirmation email', msg_txt,
'daniyar.yeralin@gmail.com', [user.email], html_message=msg_html)
<|reserved_special_token_1|>
import os, random, string
from django.conf import settings
from django.template.loader import render_to_string
from django.core.mail import send_mail
def generate_temp_password():
length = 7
chars = string.ascii_letters + string.digits
rnd = random.SystemRandom()
return ''.join(rnd.choice(chars) for i in range(length))
def send_confirmation_email(user):
try:
confirmation_key = user.confirmation_key
except:
confirmation_key = user.add_unconfirmed_email(user.email)
msg_txt = render_to_string('email/confirmation.txt', {'SITE_URL':
settings.SITE_URL, 'user': user.email, 'key': confirmation_key})
msg_html = render_to_string('email/confirmation.html', {'SITE_URL':
settings.SITE_URL, 'user': user.email, 'key': confirmation_key})
return send_mail('Confirmation email', msg_txt,
'daniyar.yeralin@gmail.com', [user.email], html_message=msg_html)
<|reserved_special_token_1|>
import os, random, string
from django.conf import settings
from django.template.loader import render_to_string
from django.core.mail import send_mail
def generate_temp_password():
length = 7
chars = string.ascii_letters + string.digits
rnd = random.SystemRandom()
return ''.join(rnd.choice(chars) for i in range(length))
def send_confirmation_email(user):
#Bug in simple_email_confirmation: refer to https://github.com/mfogel/django-simple-email-confirmation/issues/22
try:
confirmation_key = user.confirmation_key
except:
confirmation_key = user.add_unconfirmed_email(user.email)
msg_txt=render_to_string('email/confirmation.txt', {'SITE_URL': settings.SITE_URL, 'user': user.email, 'key' : confirmation_key})
msg_html = render_to_string('email/confirmation.html', {'SITE_URL': settings.SITE_URL, 'user': user.email, 'key' : confirmation_key})
return send_mail('Confirmation email',msg_txt,'daniyar.yeralin@gmail.com',[user.email],html_message=msg_html,)
|
flexible
|
{
"blob_id": "822fc2941099cb9d7791580678cfb2a89a987175",
"index": 4685,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef send_confirmation_email(user):\n try:\n confirmation_key = user.confirmation_key\n except:\n confirmation_key = user.add_unconfirmed_email(user.email)\n msg_txt = render_to_string('email/confirmation.txt', {'SITE_URL':\n settings.SITE_URL, 'user': user.email, 'key': confirmation_key})\n msg_html = render_to_string('email/confirmation.html', {'SITE_URL':\n settings.SITE_URL, 'user': user.email, 'key': confirmation_key})\n return send_mail('Confirmation email', msg_txt,\n 'daniyar.yeralin@gmail.com', [user.email], html_message=msg_html)\n",
"step-3": "<mask token>\n\n\ndef generate_temp_password():\n length = 7\n chars = string.ascii_letters + string.digits\n rnd = random.SystemRandom()\n return ''.join(rnd.choice(chars) for i in range(length))\n\n\ndef send_confirmation_email(user):\n try:\n confirmation_key = user.confirmation_key\n except:\n confirmation_key = user.add_unconfirmed_email(user.email)\n msg_txt = render_to_string('email/confirmation.txt', {'SITE_URL':\n settings.SITE_URL, 'user': user.email, 'key': confirmation_key})\n msg_html = render_to_string('email/confirmation.html', {'SITE_URL':\n settings.SITE_URL, 'user': user.email, 'key': confirmation_key})\n return send_mail('Confirmation email', msg_txt,\n 'daniyar.yeralin@gmail.com', [user.email], html_message=msg_html)\n",
"step-4": "import os, random, string\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\nfrom django.core.mail import send_mail\n\n\ndef generate_temp_password():\n length = 7\n chars = string.ascii_letters + string.digits\n rnd = random.SystemRandom()\n return ''.join(rnd.choice(chars) for i in range(length))\n\n\ndef send_confirmation_email(user):\n try:\n confirmation_key = user.confirmation_key\n except:\n confirmation_key = user.add_unconfirmed_email(user.email)\n msg_txt = render_to_string('email/confirmation.txt', {'SITE_URL':\n settings.SITE_URL, 'user': user.email, 'key': confirmation_key})\n msg_html = render_to_string('email/confirmation.html', {'SITE_URL':\n settings.SITE_URL, 'user': user.email, 'key': confirmation_key})\n return send_mail('Confirmation email', msg_txt,\n 'daniyar.yeralin@gmail.com', [user.email], html_message=msg_html)\n",
"step-5": "import os, random, string\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\nfrom django.core.mail import send_mail\n\ndef generate_temp_password(): \n length = 7\n chars = string.ascii_letters + string.digits\n rnd = random.SystemRandom()\n return ''.join(rnd.choice(chars) for i in range(length))\n\ndef send_confirmation_email(user):\n #Bug in simple_email_confirmation: refer to https://github.com/mfogel/django-simple-email-confirmation/issues/22\n try: \n confirmation_key = user.confirmation_key\n except:\n confirmation_key = user.add_unconfirmed_email(user.email)\n msg_txt=render_to_string('email/confirmation.txt', {'SITE_URL': settings.SITE_URL, 'user': user.email, 'key' : confirmation_key})\n msg_html = render_to_string('email/confirmation.html', {'SITE_URL': settings.SITE_URL, 'user': user.email, 'key' : confirmation_key})\n return send_mail('Confirmation email',msg_txt,'daniyar.yeralin@gmail.com',[user.email],html_message=msg_html,)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#source: https://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
#capture the video file
b="blood.mp4"
c="Center.avi"
d="Deformed.avi"
i="Inlet.avi"
videofile=c
vs = cv2.VideoCapture(videofile)
#width = vs.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
#height = vs.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
width = vs.get(3)
height=vs.get(4)
print("Width x: ",width, " Height y: ",height)
print("Frame Number,x coordinate of ROI,Weidth,Height,Width/Height")
# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
j=0
totalframesampled=0
totalcelldetected=0
while True:
j+=1
if j%1000 !=0 :
continue
totalframesampled+=1
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()
frame = frame[1]
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
#print(cnts)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
#print("Frame: ",j)
#print(cnts)
# loop over the contours
for c in cnts:
#print("c:",c)
area=cv2.contourArea(c)
#print("Area:",area)
minarea=250
if area<=minarea:
continue
(x, y, w, h) = cv2.boundingRect(c)# top left x,y, wid,hei
condition_center_inlet=x>440 and x<450
condition_deformation=y>240 and y<300
if condition_center_inlet:
totalcelldetected+=1
print("totalcelldetected:",totalcelldetected)
print(j,x,y,w,h,w/h)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
k=0
frameskip=10 # for center and inlet skip=10
while k<frameskip:
k+=1
temp=vs.read()
break
# if the contour is too small, ignore it
# compute the bounding box for the contour, draw it on the frame,
# and update the text
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# cleanup the camera and close any open windows
vs.release()
cv2.destroyAllWindows()
print("Total frame: ",j-1)
print("Frame sampled: ",totalframesampled)
print("Total object detected: ",totalcelldetected)
|
normal
|
{
"blob_id": "4bd928c16cd0f06931aad5a478f8a911c5a7108b",
"index": 5850,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Width x: ', width, ' Height y: ', height)\nprint('Frame Number,x coordinate of ROI,Weidth,Height,Width/Height')\n<mask token>\nwhile True:\n j += 1\n if j % 1000 != 0:\n continue\n totalframesampled += 1\n frame = vs.read()\n frame = frame[1]\n text = 'Unoccupied'\n if frame is None:\n break\n frame = imutils.resize(frame, width=500)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n if firstFrame is None:\n firstFrame = gray\n continue\n frameDelta = cv2.absdiff(firstFrame, gray)\n thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]\n thresh = cv2.dilate(thresh, None, iterations=2)\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n for c in cnts:\n area = cv2.contourArea(c)\n minarea = 250\n if area <= minarea:\n continue\n x, y, w, h = cv2.boundingRect(c)\n condition_center_inlet = x > 440 and x < 450\n condition_deformation = y > 240 and y < 300\n if condition_center_inlet:\n totalcelldetected += 1\n print('totalcelldetected:', totalcelldetected)\n print(j, x, y, w, h, w / h)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n text = 'Occupied'\n k = 0\n frameskip = 10\n while k < frameskip:\n k += 1\n temp = vs.read()\n break\n cv2.putText(frame, 'Room Status: {}'.format(text), (10, 20), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n cv2.putText(frame, datetime.datetime.now().strftime(\n '%A %d %B %Y %I:%M:%S%p'), (10, frame.shape[0] - 10), cv2.\n FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)\n cv2.imshow('Security Feed', frame)\n cv2.imshow('Thresh', thresh)\n cv2.imshow('Frame Delta', frameDelta)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\nvs.release()\ncv2.destroyAllWindows()\nprint('Total frame: ', j - 1)\nprint('Frame sampled: ', totalframesampled)\nprint('Total object detected: ', totalcelldetected)\n",
"step-3": "<mask token>\nb = 'blood.mp4'\nc = 'Center.avi'\nd = 'Deformed.avi'\ni = 'Inlet.avi'\nvideofile = c\nvs = cv2.VideoCapture(videofile)\nwidth = vs.get(3)\nheight = vs.get(4)\nprint('Width x: ', width, ' Height y: ', height)\nprint('Frame Number,x coordinate of ROI,Weidth,Height,Width/Height')\nfirstFrame = None\nj = 0\ntotalframesampled = 0\ntotalcelldetected = 0\nwhile True:\n j += 1\n if j % 1000 != 0:\n continue\n totalframesampled += 1\n frame = vs.read()\n frame = frame[1]\n text = 'Unoccupied'\n if frame is None:\n break\n frame = imutils.resize(frame, width=500)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n if firstFrame is None:\n firstFrame = gray\n continue\n frameDelta = cv2.absdiff(firstFrame, gray)\n thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]\n thresh = cv2.dilate(thresh, None, iterations=2)\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n for c in cnts:\n area = cv2.contourArea(c)\n minarea = 250\n if area <= minarea:\n continue\n x, y, w, h = cv2.boundingRect(c)\n condition_center_inlet = x > 440 and x < 450\n condition_deformation = y > 240 and y < 300\n if condition_center_inlet:\n totalcelldetected += 1\n print('totalcelldetected:', totalcelldetected)\n print(j, x, y, w, h, w / h)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n text = 'Occupied'\n k = 0\n frameskip = 10\n while k < frameskip:\n k += 1\n temp = vs.read()\n break\n cv2.putText(frame, 'Room Status: {}'.format(text), (10, 20), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n cv2.putText(frame, datetime.datetime.now().strftime(\n '%A %d %B %Y %I:%M:%S%p'), (10, frame.shape[0] - 10), cv2.\n FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)\n cv2.imshow('Security Feed', frame)\n cv2.imshow('Thresh', thresh)\n cv2.imshow('Frame Delta', frameDelta)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\nvs.release()\ncv2.destroyAllWindows()\nprint('Total frame: ', j - 1)\nprint('Frame sampled: ', totalframesampled)\nprint('Total object detected: ', totalcelldetected)\n",
"step-4": "from imutils.video import VideoStream\nimport argparse\nimport datetime\nimport imutils\nimport time\nimport cv2\nb = 'blood.mp4'\nc = 'Center.avi'\nd = 'Deformed.avi'\ni = 'Inlet.avi'\nvideofile = c\nvs = cv2.VideoCapture(videofile)\nwidth = vs.get(3)\nheight = vs.get(4)\nprint('Width x: ', width, ' Height y: ', height)\nprint('Frame Number,x coordinate of ROI,Weidth,Height,Width/Height')\nfirstFrame = None\nj = 0\ntotalframesampled = 0\ntotalcelldetected = 0\nwhile True:\n j += 1\n if j % 1000 != 0:\n continue\n totalframesampled += 1\n frame = vs.read()\n frame = frame[1]\n text = 'Unoccupied'\n if frame is None:\n break\n frame = imutils.resize(frame, width=500)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n if firstFrame is None:\n firstFrame = gray\n continue\n frameDelta = cv2.absdiff(firstFrame, gray)\n thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]\n thresh = cv2.dilate(thresh, None, iterations=2)\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n for c in cnts:\n area = cv2.contourArea(c)\n minarea = 250\n if area <= minarea:\n continue\n x, y, w, h = cv2.boundingRect(c)\n condition_center_inlet = x > 440 and x < 450\n condition_deformation = y > 240 and y < 300\n if condition_center_inlet:\n totalcelldetected += 1\n print('totalcelldetected:', totalcelldetected)\n print(j, x, y, w, h, w / h)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n text = 'Occupied'\n k = 0\n frameskip = 10\n while k < frameskip:\n k += 1\n temp = vs.read()\n break\n cv2.putText(frame, 'Room Status: {}'.format(text), (10, 20), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n cv2.putText(frame, datetime.datetime.now().strftime(\n '%A %d %B %Y %I:%M:%S%p'), (10, frame.shape[0] - 10), cv2.\n FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)\n cv2.imshow('Security Feed', frame)\n cv2.imshow('Thresh', thresh)\n cv2.imshow('Frame Delta', frameDelta)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\nvs.release()\ncv2.destroyAllWindows()\nprint('Total frame: ', j - 1)\nprint('Frame sampled: ', totalframesampled)\nprint('Total object detected: ', totalcelldetected)\n",
"step-5": "#source: https://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/\r\n\r\nfrom imutils.video import VideoStream\r\nimport argparse\r\nimport datetime\r\nimport imutils\r\nimport time\r\nimport cv2\r\n\r\n\r\n#capture the video file\r\nb=\"blood.mp4\"\r\nc=\"Center.avi\"\r\nd=\"Deformed.avi\"\r\ni=\"Inlet.avi\"\r\nvideofile=c\r\nvs = cv2.VideoCapture(videofile)\r\n\r\n#width = vs.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)\r\n#height = vs.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)\r\nwidth = vs.get(3)\r\nheight=vs.get(4)\r\nprint(\"Width x: \",width, \" Height y: \",height)\r\nprint(\"Frame Number,x coordinate of ROI,Weidth,Height,Width/Height\")\r\n\r\n# initialize the first frame in the video stream\r\nfirstFrame = None\r\n\r\n# loop over the frames of the video\r\nj=0\r\ntotalframesampled=0\r\ntotalcelldetected=0\r\nwhile True:\r\n \r\n j+=1\r\n if j%1000 !=0 :\r\n continue\r\n totalframesampled+=1\r\n\t# grab the current frame and initialize the occupied/unoccupied\r\n\t# text\r\n frame = vs.read()\r\n frame = frame[1]\r\n text = \"Unoccupied\"\r\n \r\n\t# if the frame could not be grabbed, then we have reached the end\r\n\t# of the video\r\n if frame is None:\r\n break\r\n \r\n\t\r\n \r\n\t# resize the frame, convert it to grayscale, and blur it\r\n frame = imutils.resize(frame, width=500)\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\r\n \r\n\t# if the first frame is None, initialize it\r\n if firstFrame is None:\r\n firstFrame = gray\r\n continue\r\n \r\n\t\r\n\t\r\n\t\r\n\r\n\t\t# compute the absolute difference between the current frame and\r\n\t# first frame\r\n frameDelta = cv2.absdiff(firstFrame, gray)\r\n thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]\r\n \r\n\t# dilate the thresholded image to fill in holes, then find contours\r\n\t# on thresholded image\r\n thresh = cv2.dilate(thresh, None, iterations=2)\r\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,\r\n\t cv2.CHAIN_APPROX_SIMPLE)\r\n\t#print(cnts)\r\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\r\n #print(\"Frame: \",j)\r\n #print(cnts)\r\n \r\n\t# loop over the contours\r\n for c in cnts:\r\n #print(\"c:\",c)\r\n area=cv2.contourArea(c)\r\n #print(\"Area:\",area)\r\n minarea=250\r\n if area<=minarea:\r\n continue\r\n \r\n \r\n \r\n (x, y, w, h) = cv2.boundingRect(c)# top left x,y, wid,hei\r\n condition_center_inlet=x>440 and x<450\r\n condition_deformation=y>240 and y<300\r\n if condition_center_inlet:\r\n totalcelldetected+=1\r\n print(\"totalcelldetected:\",totalcelldetected)\r\n print(j,x,y,w,h,w/h)\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n text = \"Occupied\"\r\n k=0\r\n frameskip=10 # for center and inlet skip=10\r\n while k<frameskip:\r\n k+=1\r\n temp=vs.read()\r\n break\r\n\t\r\n\t\r\n\t\t# if the contour is too small, ignore it\r\n\t\r\n\t \r\n \r\n\t\t# compute the bounding box for the contour, draw it on the frame,\r\n\t\t# and update the text\r\n\t\r\n\t\r\n\t\t\t# draw the text and timestamp on the frame\r\n cv2.putText(frame, \"Room Status: {}\".format(text), (10, 20),\r\n\t cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\r\n cv2.putText(frame, datetime.datetime.now().strftime(\"%A %d %B %Y %I:%M:%S%p\"),\r\n\t (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)\r\n \r\n\t# show the frame and record if the user presses a key\r\n cv2.imshow(\"Security Feed\", frame)\r\n cv2.imshow(\"Thresh\", thresh)\r\n cv2.imshow(\"Frame Delta\", frameDelta)\r\n key = cv2.waitKey(1) & 0xFF\r\n # if the `q` key is pressed, break from the lop\r\n if key == ord(\"q\"):\r\n break\r\n \r\n\t\r\n \r\n \r\n\t\r\n \r\n# cleanup the camera and close any open windows\r\nvs.release()\r\ncv2.destroyAllWindows()\r\nprint(\"Total frame: \",j-1)\r\nprint(\"Frame sampled: \",totalframesampled)\r\nprint(\"Total object detected: \",totalcelldetected)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
import tensorflow as tf
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
from PIL import Image
from six.moves import range
train_folder = './data/train'
test_folder = './data/valid'
dimensions = (229, 229)
max_angle = 15
# rotating image
def rotate_img(image, angle, color, filter = Image.NEAREST):
if image.mode == "P" or filter == Image.NEAREST:
matte = Image.new("1", image.size, 1) # mask
else:
matte = Image.new("L", image.size, 255) # true matte
bg = Image.new(image.mode, image.size, color)
bg.paste(
image.rotate(angle, filter),
matte.rotate(angle, filter)
)
return bg
# make gray_scale image or 1channel image
def make_greyscale_white_bg(im, r, b, g):
im = im.convert('RGBA') # Convert to RGBA
data = np.array(im) # "data" is a height x width x 4 numpy array
red, green, blue, alpha = data.T # Temporarily unpack the bands for readability
# Replace grey with white... (leaves alpha values alone...)
grey_areas = (red == r) & (blue == b) & (green == g)
data[..., :-1][grey_areas.T] = (255, 255, 255) # Transpose back needed
im2 = Image.fromarray(data)
im2 = im2.convert('L') # convert to greyscale image
#im2.show()
return im2
def process_images(folder):
classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))] # get list of all sub-folders in folder
img_cnt = 0
for class_x in classes:
if os.path.isdir(class_x):
# get paths to all the images in this folder
images = [os.path.join(class_x, i) for i in sorted(os.listdir(class_x)) if i != '.DS_Store']
for image in images:
img_cnt = img_cnt + 1
if(img_cnt % 1000 == 0):
print("Processed %s images" % str(img_cnt))
im = Image.open(image)
im = im.resize(dimensions) # resize image according to dimensions set
im.save(image) # overwrite previous image file with new image
print("Finished processing images, images found = ")
print(img_cnt)
process_images(test_folder)
process_images(train_folder)
print('ok')
image_size = 229 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3), dtype=np.float32)
print(dataset.shape)
num_images = 0
for image_index, image in enumerate(image_files):
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth
print(image_data.shape)
if image_data.shape != (image_size, image_size, 3):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
folders_list = os.listdir(data_folders)
for folder in folders_list:
#print(os.path.join(data_folders, folder))
curr_folder_path = os.path.join(data_folders, folder)
if os.path.isdir(curr_folder_path):
set_filename = curr_folder_path + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# # You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(curr_folder_path, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folder, 89, True)
test_datasets = maybe_pickle(test_folder, 10, True)
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
f.close()
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 89
valid_size = 10
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
# _, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
# print('Testing:', test_dataset.shape, test_labels.shape)
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
# test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
pickle_file = './bacteria.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
|
normal
|
{
"blob_id": "28c4c09b81d63785750cee36a8efd77760cac451",
"index": 7231,
"step-1": "<mask token>\n\n\ndef rotate_img(image, angle, color, filter=Image.NEAREST):\n if image.mode == 'P' or filter == Image.NEAREST:\n matte = Image.new('1', image.size, 1)\n else:\n matte = Image.new('L', image.size, 255)\n bg = Image.new(image.mode, image.size, color)\n bg.paste(image.rotate(angle, filter), matte.rotate(angle, filter))\n return bg\n\n\ndef make_greyscale_white_bg(im, r, b, g):\n im = im.convert('RGBA')\n data = np.array(im)\n red, green, blue, alpha = data.T\n grey_areas = (red == r) & (blue == b) & (green == g)\n data[..., :-1][grey_areas.T] = 255, 255, 255\n im2 = Image.fromarray(data)\n im2 = im2.convert('L')\n return im2\n\n\n<mask token>\n\n\ndef maybe_pickle(data_folders, min_num_images_per_class, force=False):\n dataset_names = []\n folders_list = os.listdir(data_folders)\n for folder in folders_list:\n curr_folder_path = os.path.join(data_folders, folder)\n if os.path.isdir(curr_folder_path):\n set_filename = curr_folder_path + '.pickle'\n dataset_names.append(set_filename)\n if os.path.exists(set_filename) and not force:\n print('%s already present - Skipping pickling.' % set_filename)\n else:\n print('Pickling %s.' % set_filename)\n dataset = load_letter(curr_folder_path,\n min_num_images_per_class)\n try:\n with open(set_filename, 'wb') as f:\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to', set_filename, ':', e)\n return dataset_names\n\n\n<mask token>\n\n\ndef merge_datasets(pickle_files, train_size, valid_size=0):\n num_classes = len(pickle_files)\n valid_dataset, valid_labels = make_arrays(valid_size, image_size)\n train_dataset, train_labels = make_arrays(train_size, image_size)\n vsize_per_class = valid_size // num_classes\n tsize_per_class = train_size // num_classes\n start_v, start_t = 0, 0\n end_v, end_t = vsize_per_class, tsize_per_class\n end_l = vsize_per_class + tsize_per_class\n for label, pickle_file in enumerate(pickle_files):\n try:\n with open(pickle_file, 'rb') as f:\n letter_set = pickle.load(f)\n f.close()\n np.random.shuffle(letter_set)\n if valid_dataset is not None:\n valid_letter = letter_set[:vsize_per_class, :, :]\n valid_dataset[start_v:end_v, :, :] = valid_letter\n valid_labels[start_v:end_v] = label\n start_v += vsize_per_class\n end_v += vsize_per_class\n train_letter = letter_set[vsize_per_class:end_l, :, :]\n train_dataset[start_t:end_t, :, :] = train_letter\n train_labels[start_t:end_t] = label\n start_t += tsize_per_class\n end_t += tsize_per_class\n except Exception as e:\n print('Unable to process data from', pickle_file, ':', e)\n raise\n return valid_dataset, valid_labels, train_dataset, train_labels\n\n\n<mask token>\n\n\ndef randomize(dataset, labels):\n permutation = np.random.permutation(labels.shape[0])\n shuffled_dataset = dataset[permutation, :, :]\n shuffled_labels = labels[permutation]\n return shuffled_dataset, shuffled_labels\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef rotate_img(image, angle, color, filter=Image.NEAREST):\n if image.mode == 'P' or filter == Image.NEAREST:\n matte = Image.new('1', image.size, 1)\n else:\n matte = Image.new('L', image.size, 255)\n bg = Image.new(image.mode, image.size, color)\n bg.paste(image.rotate(angle, filter), matte.rotate(angle, filter))\n return bg\n\n\ndef make_greyscale_white_bg(im, r, b, g):\n im = im.convert('RGBA')\n data = np.array(im)\n red, green, blue, alpha = data.T\n grey_areas = (red == r) & (blue == b) & (green == g)\n data[..., :-1][grey_areas.T] = 255, 255, 255\n im2 = Image.fromarray(data)\n im2 = im2.convert('L')\n return im2\n\n\ndef process_images(folder):\n classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))]\n img_cnt = 0\n for class_x in classes:\n if os.path.isdir(class_x):\n images = [os.path.join(class_x, i) for i in sorted(os.listdir(\n class_x)) if i != '.DS_Store']\n for image in images:\n img_cnt = img_cnt + 1\n if img_cnt % 1000 == 0:\n print('Processed %s images' % str(img_cnt))\n im = Image.open(image)\n im = im.resize(dimensions)\n im.save(image)\n print('Finished processing images, images found = ')\n print(img_cnt)\n\n\n<mask token>\n\n\ndef load_letter(folder, min_num_images):\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3\n ), dtype=np.float32)\n print(dataset.shape)\n num_images = 0\n for image_index, image in enumerate(image_files):\n image_file = os.path.join(folder, image)\n try:\n image_data = (ndimage.imread(image_file).astype(float) - \n pixel_depth / 2) / pixel_depth\n print(image_data.shape)\n if image_data.shape != (image_size, image_size, 3):\n raise Exception('Unexpected image shape: %s' % str(\n image_data.shape))\n dataset[num_images, :, :] = image_data\n num_images = num_images + 1\n except IOError as e:\n print('Could not read:', image_file, ':', e, \"- it's ok, skipping.\"\n )\n dataset = dataset[0:num_images, :, :]\n if num_images < min_num_images:\n raise Exception('Many fewer images than expected: %d < %d' % (\n num_images, min_num_images))\n print('Full dataset tensor:', dataset.shape)\n print('Mean:', np.mean(dataset))\n print('Standard deviation:', np.std(dataset))\n return dataset\n\n\ndef maybe_pickle(data_folders, min_num_images_per_class, force=False):\n dataset_names = []\n folders_list = os.listdir(data_folders)\n for folder in folders_list:\n curr_folder_path = os.path.join(data_folders, folder)\n if os.path.isdir(curr_folder_path):\n set_filename = curr_folder_path + '.pickle'\n dataset_names.append(set_filename)\n if os.path.exists(set_filename) and not force:\n print('%s already present - Skipping pickling.' % set_filename)\n else:\n print('Pickling %s.' % set_filename)\n dataset = load_letter(curr_folder_path,\n min_num_images_per_class)\n try:\n with open(set_filename, 'wb') as f:\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to', set_filename, ':', e)\n return dataset_names\n\n\n<mask token>\n\n\ndef make_arrays(nb_rows, img_size):\n if nb_rows:\n dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32\n )\n labels = np.ndarray(nb_rows, dtype=np.int32)\n else:\n dataset, labels = None, None\n return dataset, labels\n\n\ndef merge_datasets(pickle_files, train_size, valid_size=0):\n num_classes = len(pickle_files)\n valid_dataset, valid_labels = make_arrays(valid_size, image_size)\n train_dataset, train_labels = make_arrays(train_size, image_size)\n vsize_per_class = valid_size // num_classes\n tsize_per_class = train_size // num_classes\n start_v, start_t = 0, 0\n end_v, end_t = vsize_per_class, tsize_per_class\n end_l = vsize_per_class + tsize_per_class\n for label, pickle_file in enumerate(pickle_files):\n try:\n with open(pickle_file, 'rb') as f:\n letter_set = pickle.load(f)\n f.close()\n np.random.shuffle(letter_set)\n if valid_dataset is not None:\n valid_letter = letter_set[:vsize_per_class, :, :]\n valid_dataset[start_v:end_v, :, :] = valid_letter\n valid_labels[start_v:end_v] = label\n start_v += vsize_per_class\n end_v += vsize_per_class\n train_letter = letter_set[vsize_per_class:end_l, :, :]\n train_dataset[start_t:end_t, :, :] = train_letter\n train_labels[start_t:end_t] = label\n start_t += tsize_per_class\n end_t += tsize_per_class\n except Exception as e:\n print('Unable to process data from', pickle_file, ':', e)\n raise\n return valid_dataset, valid_labels, train_dataset, train_labels\n\n\n<mask token>\n\n\ndef randomize(dataset, labels):\n permutation = np.random.permutation(labels.shape[0])\n shuffled_dataset = dataset[permutation, :, :]\n shuffled_labels = labels[permutation]\n return shuffled_dataset, shuffled_labels\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef rotate_img(image, angle, color, filter=Image.NEAREST):\n if image.mode == 'P' or filter == Image.NEAREST:\n matte = Image.new('1', image.size, 1)\n else:\n matte = Image.new('L', image.size, 255)\n bg = Image.new(image.mode, image.size, color)\n bg.paste(image.rotate(angle, filter), matte.rotate(angle, filter))\n return bg\n\n\ndef make_greyscale_white_bg(im, r, b, g):\n im = im.convert('RGBA')\n data = np.array(im)\n red, green, blue, alpha = data.T\n grey_areas = (red == r) & (blue == b) & (green == g)\n data[..., :-1][grey_areas.T] = 255, 255, 255\n im2 = Image.fromarray(data)\n im2 = im2.convert('L')\n return im2\n\n\ndef process_images(folder):\n classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))]\n img_cnt = 0\n for class_x in classes:\n if os.path.isdir(class_x):\n images = [os.path.join(class_x, i) for i in sorted(os.listdir(\n class_x)) if i != '.DS_Store']\n for image in images:\n img_cnt = img_cnt + 1\n if img_cnt % 1000 == 0:\n print('Processed %s images' % str(img_cnt))\n im = Image.open(image)\n im = im.resize(dimensions)\n im.save(image)\n print('Finished processing images, images found = ')\n print(img_cnt)\n\n\nprocess_images(test_folder)\nprocess_images(train_folder)\nprint('ok')\n<mask token>\n\n\ndef load_letter(folder, min_num_images):\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3\n ), dtype=np.float32)\n print(dataset.shape)\n num_images = 0\n for image_index, image in enumerate(image_files):\n image_file = os.path.join(folder, image)\n try:\n image_data = (ndimage.imread(image_file).astype(float) - \n pixel_depth / 2) / pixel_depth\n print(image_data.shape)\n if image_data.shape != (image_size, image_size, 3):\n raise Exception('Unexpected image shape: %s' % str(\n image_data.shape))\n dataset[num_images, :, :] = image_data\n num_images = num_images + 1\n except IOError as e:\n print('Could not read:', image_file, ':', e, \"- it's ok, skipping.\"\n )\n dataset = dataset[0:num_images, :, :]\n if num_images < min_num_images:\n raise Exception('Many fewer images than expected: %d < %d' % (\n num_images, min_num_images))\n print('Full dataset tensor:', dataset.shape)\n print('Mean:', np.mean(dataset))\n print('Standard deviation:', np.std(dataset))\n return dataset\n\n\ndef maybe_pickle(data_folders, min_num_images_per_class, force=False):\n dataset_names = []\n folders_list = os.listdir(data_folders)\n for folder in folders_list:\n curr_folder_path = os.path.join(data_folders, folder)\n if os.path.isdir(curr_folder_path):\n set_filename = curr_folder_path + '.pickle'\n dataset_names.append(set_filename)\n if os.path.exists(set_filename) and not force:\n print('%s already present - Skipping pickling.' % set_filename)\n else:\n print('Pickling %s.' % set_filename)\n dataset = load_letter(curr_folder_path,\n min_num_images_per_class)\n try:\n with open(set_filename, 'wb') as f:\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to', set_filename, ':', e)\n return dataset_names\n\n\n<mask token>\n\n\ndef make_arrays(nb_rows, img_size):\n if nb_rows:\n dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32\n )\n labels = np.ndarray(nb_rows, dtype=np.int32)\n else:\n dataset, labels = None, None\n return dataset, labels\n\n\ndef merge_datasets(pickle_files, train_size, valid_size=0):\n num_classes = len(pickle_files)\n valid_dataset, valid_labels = make_arrays(valid_size, image_size)\n train_dataset, train_labels = make_arrays(train_size, image_size)\n vsize_per_class = valid_size // num_classes\n tsize_per_class = train_size // num_classes\n start_v, start_t = 0, 0\n end_v, end_t = vsize_per_class, tsize_per_class\n end_l = vsize_per_class + tsize_per_class\n for label, pickle_file in enumerate(pickle_files):\n try:\n with open(pickle_file, 'rb') as f:\n letter_set = pickle.load(f)\n f.close()\n np.random.shuffle(letter_set)\n if valid_dataset is not None:\n valid_letter = letter_set[:vsize_per_class, :, :]\n valid_dataset[start_v:end_v, :, :] = valid_letter\n valid_labels[start_v:end_v] = label\n start_v += vsize_per_class\n end_v += vsize_per_class\n train_letter = letter_set[vsize_per_class:end_l, :, :]\n train_dataset[start_t:end_t, :, :] = train_letter\n train_labels[start_t:end_t] = label\n start_t += tsize_per_class\n end_t += tsize_per_class\n except Exception as e:\n print('Unable to process data from', pickle_file, ':', e)\n raise\n return valid_dataset, valid_labels, train_dataset, train_labels\n\n\n<mask token>\nprint('Training:', train_dataset.shape, train_labels.shape)\nprint('Validation:', valid_dataset.shape, valid_labels.shape)\n\n\ndef randomize(dataset, labels):\n permutation = np.random.permutation(labels.shape[0])\n shuffled_dataset = dataset[permutation, :, :]\n shuffled_labels = labels[permutation]\n return shuffled_dataset, shuffled_labels\n\n\n<mask token>\ntry:\n f = open(pickle_file, 'wb')\n save = {'train_dataset': train_dataset, 'train_labels': train_labels,\n 'valid_dataset': valid_dataset, 'valid_labels': valid_labels}\n pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)\n f.close()\nexcept Exception as e:\n print('Unable to save data to', pickle_file, ':', e)\n raise\n<mask token>\nprint('Compressed pickle size:', statinfo.st_size)\n",
"step-4": "from __future__ import print_function\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport sys\nimport tarfile\nimport tensorflow as tf\nfrom IPython.display import display, Image\nfrom scipy import ndimage\nfrom sklearn.linear_model import LogisticRegression\nfrom six.moves.urllib.request import urlretrieve\nfrom six.moves import cPickle as pickle\nfrom PIL import Image\nfrom six.moves import range\ntrain_folder = './data/train'\ntest_folder = './data/valid'\ndimensions = 229, 229\nmax_angle = 15\n\n\ndef rotate_img(image, angle, color, filter=Image.NEAREST):\n if image.mode == 'P' or filter == Image.NEAREST:\n matte = Image.new('1', image.size, 1)\n else:\n matte = Image.new('L', image.size, 255)\n bg = Image.new(image.mode, image.size, color)\n bg.paste(image.rotate(angle, filter), matte.rotate(angle, filter))\n return bg\n\n\ndef make_greyscale_white_bg(im, r, b, g):\n im = im.convert('RGBA')\n data = np.array(im)\n red, green, blue, alpha = data.T\n grey_areas = (red == r) & (blue == b) & (green == g)\n data[..., :-1][grey_areas.T] = 255, 255, 255\n im2 = Image.fromarray(data)\n im2 = im2.convert('L')\n return im2\n\n\ndef process_images(folder):\n classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))]\n img_cnt = 0\n for class_x in classes:\n if os.path.isdir(class_x):\n images = [os.path.join(class_x, i) for i in sorted(os.listdir(\n class_x)) if i != '.DS_Store']\n for image in images:\n img_cnt = img_cnt + 1\n if img_cnt % 1000 == 0:\n print('Processed %s images' % str(img_cnt))\n im = Image.open(image)\n im = im.resize(dimensions)\n im.save(image)\n print('Finished processing images, images found = ')\n print(img_cnt)\n\n\nprocess_images(test_folder)\nprocess_images(train_folder)\nprint('ok')\nimage_size = 229\npixel_depth = 255.0\n\n\ndef load_letter(folder, min_num_images):\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3\n ), dtype=np.float32)\n print(dataset.shape)\n num_images = 0\n for image_index, image in enumerate(image_files):\n image_file = os.path.join(folder, image)\n try:\n image_data = (ndimage.imread(image_file).astype(float) - \n pixel_depth / 2) / pixel_depth\n print(image_data.shape)\n if image_data.shape != (image_size, image_size, 3):\n raise Exception('Unexpected image shape: %s' % str(\n image_data.shape))\n dataset[num_images, :, :] = image_data\n num_images = num_images + 1\n except IOError as e:\n print('Could not read:', image_file, ':', e, \"- it's ok, skipping.\"\n )\n dataset = dataset[0:num_images, :, :]\n if num_images < min_num_images:\n raise Exception('Many fewer images than expected: %d < %d' % (\n num_images, min_num_images))\n print('Full dataset tensor:', dataset.shape)\n print('Mean:', np.mean(dataset))\n print('Standard deviation:', np.std(dataset))\n return dataset\n\n\ndef maybe_pickle(data_folders, min_num_images_per_class, force=False):\n dataset_names = []\n folders_list = os.listdir(data_folders)\n for folder in folders_list:\n curr_folder_path = os.path.join(data_folders, folder)\n if os.path.isdir(curr_folder_path):\n set_filename = curr_folder_path + '.pickle'\n dataset_names.append(set_filename)\n if os.path.exists(set_filename) and not force:\n print('%s already present - Skipping pickling.' % set_filename)\n else:\n print('Pickling %s.' % set_filename)\n dataset = load_letter(curr_folder_path,\n min_num_images_per_class)\n try:\n with open(set_filename, 'wb') as f:\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to', set_filename, ':', e)\n return dataset_names\n\n\ntrain_datasets = maybe_pickle(train_folder, 89, True)\ntest_datasets = maybe_pickle(test_folder, 10, True)\n\n\ndef make_arrays(nb_rows, img_size):\n if nb_rows:\n dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32\n )\n labels = np.ndarray(nb_rows, dtype=np.int32)\n else:\n dataset, labels = None, None\n return dataset, labels\n\n\ndef merge_datasets(pickle_files, train_size, valid_size=0):\n num_classes = len(pickle_files)\n valid_dataset, valid_labels = make_arrays(valid_size, image_size)\n train_dataset, train_labels = make_arrays(train_size, image_size)\n vsize_per_class = valid_size // num_classes\n tsize_per_class = train_size // num_classes\n start_v, start_t = 0, 0\n end_v, end_t = vsize_per_class, tsize_per_class\n end_l = vsize_per_class + tsize_per_class\n for label, pickle_file in enumerate(pickle_files):\n try:\n with open(pickle_file, 'rb') as f:\n letter_set = pickle.load(f)\n f.close()\n np.random.shuffle(letter_set)\n if valid_dataset is not None:\n valid_letter = letter_set[:vsize_per_class, :, :]\n valid_dataset[start_v:end_v, :, :] = valid_letter\n valid_labels[start_v:end_v] = label\n start_v += vsize_per_class\n end_v += vsize_per_class\n train_letter = letter_set[vsize_per_class:end_l, :, :]\n train_dataset[start_t:end_t, :, :] = train_letter\n train_labels[start_t:end_t] = label\n start_t += tsize_per_class\n end_t += tsize_per_class\n except Exception as e:\n print('Unable to process data from', pickle_file, ':', e)\n raise\n return valid_dataset, valid_labels, train_dataset, train_labels\n\n\ntrain_size = 89\nvalid_size = 10\nvalid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(\n train_datasets, train_size, valid_size)\nprint('Training:', train_dataset.shape, train_labels.shape)\nprint('Validation:', valid_dataset.shape, valid_labels.shape)\n\n\ndef randomize(dataset, labels):\n permutation = np.random.permutation(labels.shape[0])\n shuffled_dataset = dataset[permutation, :, :]\n shuffled_labels = labels[permutation]\n return shuffled_dataset, shuffled_labels\n\n\ntrain_dataset, train_labels = randomize(train_dataset, train_labels)\nvalid_dataset, valid_labels = randomize(valid_dataset, valid_labels)\npickle_file = './bacteria.pickle'\ntry:\n f = open(pickle_file, 'wb')\n save = {'train_dataset': train_dataset, 'train_labels': train_labels,\n 'valid_dataset': valid_dataset, 'valid_labels': valid_labels}\n pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)\n f.close()\nexcept Exception as e:\n print('Unable to save data to', pickle_file, ':', e)\n raise\nstatinfo = os.stat(pickle_file)\nprint('Compressed pickle size:', statinfo.st_size)\n",
"step-5": "\r\nfrom __future__ import print_function\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nimport sys\r\nimport tarfile\r\nimport tensorflow as tf\r\nfrom IPython.display import display, Image\r\nfrom scipy import ndimage\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom six.moves.urllib.request import urlretrieve\r\nfrom six.moves import cPickle as pickle\r\nfrom PIL import Image\r\nfrom six.moves import range\r\n\r\ntrain_folder = './data/train'\r\ntest_folder = './data/valid'\r\ndimensions = (229, 229)\r\nmax_angle = 15\r\n\r\n\r\n# rotating image\r\ndef rotate_img(image, angle, color, filter = Image.NEAREST):\r\n\r\n if image.mode == \"P\" or filter == Image.NEAREST:\r\n matte = Image.new(\"1\", image.size, 1) # mask\r\n else:\r\n matte = Image.new(\"L\", image.size, 255) # true matte\r\n bg = Image.new(image.mode, image.size, color)\r\n bg.paste(\r\n image.rotate(angle, filter),\r\n matte.rotate(angle, filter)\r\n )\r\n return bg\r\n\r\n# make gray_scale image or 1channel image\r\ndef make_greyscale_white_bg(im, r, b, g):\r\n\r\n im = im.convert('RGBA') # Convert to RGBA\r\n\r\n\r\n data = np.array(im) # \"data\" is a height x width x 4 numpy array\r\n red, green, blue, alpha = data.T # Temporarily unpack the bands for readability\r\n\r\n # Replace grey with white... (leaves alpha values alone...)\r\n grey_areas = (red == r) & (blue == b) & (green == g)\r\n data[..., :-1][grey_areas.T] = (255, 255, 255) # Transpose back needed\r\n\r\n im2 = Image.fromarray(data)\r\n im2 = im2.convert('L') # convert to greyscale image\r\n\r\n\r\n\r\n #im2.show()\r\n\r\n return im2\r\n\r\ndef process_images(folder):\r\n\r\n classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))] # get list of all sub-folders in folder\r\n img_cnt = 0\r\n\r\n for class_x in classes:\r\n\r\n if os.path.isdir(class_x):\r\n\r\n # get paths to all the images in this folder\r\n images = [os.path.join(class_x, i) for i in sorted(os.listdir(class_x)) if i != '.DS_Store']\r\n\r\n\r\n for image in images:\r\n\r\n img_cnt = img_cnt + 1\r\n\r\n if(img_cnt % 1000 == 0):\r\n print(\"Processed %s images\" % str(img_cnt))\r\n\r\n im = Image.open(image)\r\n im = im.resize(dimensions) # resize image according to dimensions set\r\n im.save(image) # overwrite previous image file with new image\r\n\r\n print(\"Finished processing images, images found = \")\r\n print(img_cnt)\r\n\r\n\r\nprocess_images(test_folder)\r\nprocess_images(train_folder)\r\n\r\nprint('ok')\r\n\r\nimage_size = 229 # Pixel width and height.\r\npixel_depth = 255.0 # Number of levels per pixel.\r\n\r\n\r\ndef load_letter(folder, min_num_images):\r\n\r\n\r\n image_files = os.listdir(folder)\r\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3), dtype=np.float32)\r\n print(dataset.shape)\r\n\r\n num_images = 0\r\n for image_index, image in enumerate(image_files):\r\n image_file = os.path.join(folder, image)\r\n try:\r\n image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth\r\n print(image_data.shape)\r\n\r\n if image_data.shape != (image_size, image_size, 3):\r\n raise Exception('Unexpected image shape: %s' % str(image_data.shape))\r\n dataset[num_images, :, :] = image_data\r\n num_images = num_images + 1\r\n except IOError as e:\r\n print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\r\n\r\n dataset = dataset[0:num_images, :, :]\r\n if num_images < min_num_images:\r\n raise Exception('Many fewer images than expected: %d < %d' %\r\n (num_images, min_num_images))\r\n\r\n print('Full dataset tensor:', dataset.shape)\r\n print('Mean:', np.mean(dataset))\r\n print('Standard deviation:', np.std(dataset))\r\n return dataset\r\n\r\ndef maybe_pickle(data_folders, min_num_images_per_class, force=False):\r\n dataset_names = []\r\n folders_list = os.listdir(data_folders)\r\n for folder in folders_list:\r\n\r\n\r\n #print(os.path.join(data_folders, folder))\r\n curr_folder_path = os.path.join(data_folders, folder)\r\n if os.path.isdir(curr_folder_path):\r\n set_filename = curr_folder_path + '.pickle'\r\n dataset_names.append(set_filename)\r\n if os.path.exists(set_filename) and not force:\r\n # # You may override by setting force=True.\r\n print('%s already present - Skipping pickling.' % set_filename)\r\n else:\r\n print('Pickling %s.' % set_filename)\r\n dataset = load_letter(curr_folder_path, min_num_images_per_class)\r\n try:\r\n with open(set_filename, 'wb') as f:\r\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\r\n f.close()\r\n except Exception as e:\r\n print('Unable to save data to', set_filename, ':', e)\r\n\r\n return dataset_names\r\n\r\ntrain_datasets = maybe_pickle(train_folder, 89, True)\r\ntest_datasets = maybe_pickle(test_folder, 10, True)\r\n\r\n\r\ndef make_arrays(nb_rows, img_size):\r\n if nb_rows:\r\n dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32)\r\n labels = np.ndarray(nb_rows, dtype=np.int32)\r\n else:\r\n dataset, labels = None, None\r\n return dataset, labels\r\n\r\ndef merge_datasets(pickle_files, train_size, valid_size=0):\r\n num_classes = len(pickle_files)\r\n valid_dataset, valid_labels = make_arrays(valid_size, image_size)\r\n train_dataset, train_labels = make_arrays(train_size, image_size)\r\n vsize_per_class = valid_size // num_classes\r\n tsize_per_class = train_size // num_classes\r\n\r\n start_v, start_t = 0, 0\r\n end_v, end_t = vsize_per_class, tsize_per_class\r\n end_l = vsize_per_class+tsize_per_class\r\n for label, pickle_file in enumerate(pickle_files):\r\n try:\r\n with open(pickle_file, 'rb') as f:\r\n letter_set = pickle.load(f)\r\n f.close()\r\n # let's shuffle the letters to have random validation and training set\r\n np.random.shuffle(letter_set)\r\n if valid_dataset is not None:\r\n valid_letter = letter_set[:vsize_per_class, :, :]\r\n valid_dataset[start_v:end_v, :, :] = valid_letter\r\n valid_labels[start_v:end_v] = label\r\n start_v += vsize_per_class\r\n end_v += vsize_per_class\r\n\r\n train_letter = letter_set[vsize_per_class:end_l, :, :]\r\n train_dataset[start_t:end_t, :, :] = train_letter\r\n train_labels[start_t:end_t] = label\r\n start_t += tsize_per_class\r\n end_t += tsize_per_class\r\n except Exception as e:\r\n print('Unable to process data from', pickle_file, ':', e)\r\n raise\r\n\r\n return valid_dataset, valid_labels, train_dataset, train_labels\r\n\r\n\r\ntrain_size = 89\r\nvalid_size = 10\r\n\r\n\r\nvalid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(\r\n train_datasets, train_size, valid_size)\r\n# _, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)\r\n\r\nprint('Training:', train_dataset.shape, train_labels.shape)\r\nprint('Validation:', valid_dataset.shape, valid_labels.shape)\r\n# print('Testing:', test_dataset.shape, test_labels.shape)\r\n\r\ndef randomize(dataset, labels):\r\n permutation = np.random.permutation(labels.shape[0])\r\n shuffled_dataset = dataset[permutation,:,:]\r\n shuffled_labels = labels[permutation]\r\n return shuffled_dataset, shuffled_labels\r\ntrain_dataset, train_labels = randomize(train_dataset, train_labels)\r\n# test_dataset, test_labels = randomize(test_dataset, test_labels)\r\nvalid_dataset, valid_labels = randomize(valid_dataset, valid_labels)\r\n\r\n\r\npickle_file = './bacteria.pickle'\r\n\r\ntry:\r\n f = open(pickle_file, 'wb')\r\n save = {\r\n 'train_dataset': train_dataset,\r\n 'train_labels': train_labels,\r\n 'valid_dataset': valid_dataset,\r\n 'valid_labels': valid_labels,\r\n }\r\n pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)\r\n f.close()\r\nexcept Exception as e:\r\n print('Unable to save data to', pickle_file, ':', e)\r\n raise\r\n\r\n\r\nstatinfo = os.stat(pickle_file)\r\nprint('Compressed pickle size:', statinfo.st_size)\r\n",
"step-ids": [
5,
8,
9,
11,
12
]
}
|
[
5,
8,
9,
11,
12
] |
string=input();
string=string.replace("(","");
string=string.replace(")","");
string=list(map(int,string.split(",")));
if(1 in string):
string.remove(1);
mid=[string[0]];
string.remove(string[0]);
result=0;
tar=0;
while(string!=[]):
tar=0;
length=len(string);
i=0
while(i<len(string)):
cout=0;
count=0
for j in mid:
for k in range(2,min(string[i],j)+1):
if(string[i]%k==0)&(j%k==0):
mid.append(string[i]);
string.remove(string[i]);
count=1;
break;
if(count==0):
cout+=1;
else:
break;
if(count==0):
i+=1;
if(cout==len(mid)):
tar+=1;
if (tar == length)|(string==[]):
if (len(mid) > result):
result = len(mid);
if(string!=[]):
mid = [string[0]];
string.remove((string[0]));
if(len(mid)>result):
reuslt=len(mid);
print(result)
|
normal
|
{
"blob_id": "6a8cab1fceffa0d70441cc600137417a8b81d7b1",
"index": 6897,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif 1 in string:\n string.remove(1)\n<mask token>\nstring.remove(string[0])\n<mask token>\nwhile string != []:\n tar = 0\n length = len(string)\n i = 0\n while i < len(string):\n cout = 0\n count = 0\n for j in mid:\n for k in range(2, min(string[i], j) + 1):\n if (string[i] % k == 0) & (j % k == 0):\n mid.append(string[i])\n string.remove(string[i])\n count = 1\n break\n if count == 0:\n cout += 1\n else:\n break\n if count == 0:\n i += 1\n if cout == len(mid):\n tar += 1\n if (tar == length) | (string == []):\n if len(mid) > result:\n result = len(mid)\n if string != []:\n mid = [string[0]]\n string.remove(string[0])\nif len(mid) > result:\n reuslt = len(mid)\nprint(result)\n",
"step-3": "string = input()\nstring = string.replace('(', '')\nstring = string.replace(')', '')\nstring = list(map(int, string.split(',')))\nif 1 in string:\n string.remove(1)\nmid = [string[0]]\nstring.remove(string[0])\nresult = 0\ntar = 0\nwhile string != []:\n tar = 0\n length = len(string)\n i = 0\n while i < len(string):\n cout = 0\n count = 0\n for j in mid:\n for k in range(2, min(string[i], j) + 1):\n if (string[i] % k == 0) & (j % k == 0):\n mid.append(string[i])\n string.remove(string[i])\n count = 1\n break\n if count == 0:\n cout += 1\n else:\n break\n if count == 0:\n i += 1\n if cout == len(mid):\n tar += 1\n if (tar == length) | (string == []):\n if len(mid) > result:\n result = len(mid)\n if string != []:\n mid = [string[0]]\n string.remove(string[0])\nif len(mid) > result:\n reuslt = len(mid)\nprint(result)\n",
"step-4": "string=input();\nstring=string.replace(\"(\",\"\");\nstring=string.replace(\")\",\"\");\nstring=list(map(int,string.split(\",\")));\nif(1 in string):\n string.remove(1);\nmid=[string[0]];\nstring.remove(string[0]);\nresult=0;\ntar=0;\nwhile(string!=[]):\n tar=0;\n length=len(string);\n i=0\n while(i<len(string)):\n cout=0;\n count=0\n for j in mid:\n for k in range(2,min(string[i],j)+1):\n if(string[i]%k==0)&(j%k==0):\n mid.append(string[i]);\n string.remove(string[i]);\n count=1;\n break;\n if(count==0):\n cout+=1;\n else:\n break;\n if(count==0):\n i+=1;\n if(cout==len(mid)):\n tar+=1;\n if (tar == length)|(string==[]):\n if (len(mid) > result):\n result = len(mid);\n if(string!=[]):\n mid = [string[0]];\n string.remove((string[0]));\nif(len(mid)>result):\n reuslt=len(mid);\nprint(result)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from sys import getsizeof
# using parenthesis indicates that we are creating a generator
a = (b for b in range(10))
print(getsizeof(a))
c = [b for b in range(10)]
# c uses more memory than a
print(getsizeof(c))
for b in a:
print(b)
print(sum(a)) # the sequence has disappeared
|
normal
|
{
"blob_id": "2ee4b31f880441e87c437d7cc4601f260f34ae24",
"index": 6574,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(getsizeof(a))\n<mask token>\nprint(getsizeof(c))\nfor b in a:\n print(b)\nprint(sum(a))\n",
"step-3": "<mask token>\na = (b for b in range(10))\nprint(getsizeof(a))\nc = [b for b in range(10)]\nprint(getsizeof(c))\nfor b in a:\n print(b)\nprint(sum(a))\n",
"step-4": "from sys import getsizeof\na = (b for b in range(10))\nprint(getsizeof(a))\nc = [b for b in range(10)]\nprint(getsizeof(c))\nfor b in a:\n print(b)\nprint(sum(a))\n",
"step-5": "from sys import getsizeof\n\n# using parenthesis indicates that we are creating a generator\na = (b for b in range(10))\n\nprint(getsizeof(a))\n\nc = [b for b in range(10)]\n\n# c uses more memory than a\nprint(getsizeof(c))\n\nfor b in a:\n print(b)\n\nprint(sum(a)) # the sequence has disappeared\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
help find Holly find dups in the PC's
Given a particular dir - report the dupset of each of the files so we can see
where the dups are
"""
import os, sys, re
from comms.dup_manager import DupManager
class DupFinder (DupManager):
base_archives_path = '/Volumes/archives/CommunicationsImageCollection/'
base_dedup_path = '/Volumes/cic-de-duped/'
def __init__ (self, dup_data_path):
DupManager.__init__ (self, dup_data_path)
def make_deduped_path (self, archive_path):
# return archive_path
rel_dedup_path = archive_path.replace (self.base_archives_path, '')
# Kludge for Stage / Field Projects
if rel_dedup_path.startswith('Staging'):
rel_dedup_path = rel_dedup_path.replace('Staging', 'Field Projects')
return os.path.join (self.base_dedup_path, rel_dedup_path)
def make_archives_path (self, dedup_path):
rel_archives_path = dedup_path.replace (self.base_dedup_path, '')
# Kludge for Stage / Field Projects
if rel_archives_path.startswith('Field Projects'):
rel_archives_path = rel_archives_path.replace('Field Projects', 'Staging')
return os.path.join (self.base_archives_path, rel_archives_path)
def find_dups (self, dir_path):
return self.find_dups_for_file(dir_path)
def find_dups_for_directory (self, dirpath):
dupset ={}
for filename in self.list_dir(dirpath):
path = os.path.join(dirpath, filename)
dups = self.find_dups (path)
if dups:
dupset[path] = dups
return dupset
def get_dup_display_path (self, dup_path):
default_base_dup_display = os.path.join(self.base_dedup_path, 'CIC-ExternalDisk1/')
if dup_path.startswith (default_base_dup_display):
return dup_path.replace(default_base_dup_display, '')
else:
return dup_path.replace (self.base_dedup_path, '')
def report_dir (self, dir_path):
"""
print a list of duplicates, the one which exists on disk is marked with an asterisk
:param dir_path: The path to the directory to be reported
:return:
"""
print len(os.listdir(dir_path)), 'in archive directory'
dupset = self.find_dups_for_directory (dir_path)
keys = dupset.keys()
keys.sort()
print '- ', len(keys), 'dups found'
for key in keys:
# print '\n', key.replace(archives_base_path, '')
dedup_key_path = self.make_deduped_path(key)
# print '\n', '{}{}'.format(dedup_key_path, os.path.exists(dedup_key_path) and ' *' or '')
print '\n', '{}{}'.format(self.get_dup_display_path(dedup_key_path), os.path.exists(dedup_key_path) and ' *' or '')
dups = dupset[key]
for dup in dups:
dedup_path = self.make_deduped_path(dup)
# print ' - {}{}'.format(dedup_path, os.path.exists(dedup_path) and ' *' or '')
print ' - {}{}'.format(self.get_dup_display_path(dedup_path), os.path.exists(dedup_path) and ' *' or '')
def list_dir (self, frag):
if frag[0] == '/':
path = frag
else:
# path = os.path.join(base_path, frag)
path = os.path.join(self.base_dedup_path, frag)
print 'PATH: ', path
return os.listdir (path)
if __name__ == '__main__':
# base_path = '/Volumes/archives/CommunicationsImageCollection/Staging'
# filepath = os.path.join (archive_base_path, rel_path)
if 0: # search under CIC-ExternalDisk1
archive_base_path = '/Volumes/archives/CommunicationsImageCollection/CIC-ExternalDisk1'
deduped_base_path = None # default
rel_path = 'disc 182/Emily CoBabe Ammann'
if 0: # search under field projects
archive_base_path = '/Volumes/archives/CommunicationsImageCollection/Staging'
deduped_base_path = '/Volumes/cic-de-duped/Field Projects'
rel_path = 'Field Project-HIAPER-FP2/HIAPER 8-19-05/8-19-05'
rel_path = 'Field Project-HIAPER-FP2/HIAPER 8-19-05/8-19-05/tif&jpgs'
if 1: # search under field projects
archive_base_path = '/Volumes/archives/CommunicationsImageCollection/Staging'
rel_path = 'SOARS-3/SOARS 11-1/HIRO-mentors'
rel_path = 'Field Project-ARISTO-FP21/jpgs'
dup_data_path = '/Users/ostwald/Documents/Comms/Composite_DB/master_check_sum_dups.json'
print dup_data_path
# finder = DupFinder (dup_data_path, archive_base_path, deduped_base_path)
finder = DupFinder (dup_data_path)
dir_path = os.path.join (archive_base_path, rel_path)
print 'DIR_PATH:', dir_path
finder.report_dir(dir_path)
if 0: # test some paths
path = '/Volumes/cic-de-duped/CIC-ExternalDisk1/disc 19/HIAPER take-off/8-19-05/tif&jpgs/IMG_5820.tif'
print finder.make_deduped_path (path)
path ='/Volumes/archives/CommunicationsImageCollection/Staging/Field Project-HIAPER-FP2/HIAPER Backups/HIAPER 2/HIAPER take-off/8-19-05/jpgs/IMG_5820.jpg'
print finder.make_deduped_path(path)
|
normal
|
{
"blob_id": "037a02ff2c0699acdd1fefbe60098c93cd99e777",
"index": 1987,
"step-1": "\"\"\"\nhelp find Holly find dups in the PC's\n\nGiven a particular dir - report the dupset of each of the files so we can see\nwhere the dups are\n\n\"\"\"\nimport os, sys, re\n\nfrom comms.dup_manager import DupManager\n\nclass DupFinder (DupManager):\n\n base_archives_path = '/Volumes/archives/CommunicationsImageCollection/'\n base_dedup_path = '/Volumes/cic-de-duped/'\n\n def __init__ (self, dup_data_path):\n DupManager.__init__ (self, dup_data_path)\n\n def make_deduped_path (self, archive_path):\n # return archive_path\n rel_dedup_path = archive_path.replace (self.base_archives_path, '')\n # Kludge for Stage / Field Projects\n if rel_dedup_path.startswith('Staging'):\n rel_dedup_path = rel_dedup_path.replace('Staging', 'Field Projects')\n\n return os.path.join (self.base_dedup_path, rel_dedup_path)\n\n def make_archives_path (self, dedup_path):\n rel_archives_path = dedup_path.replace (self.base_dedup_path, '')\n # Kludge for Stage / Field Projects\n if rel_archives_path.startswith('Field Projects'):\n rel_archives_path = rel_archives_path.replace('Field Projects', 'Staging')\n\n return os.path.join (self.base_archives_path, rel_archives_path)\n\n def find_dups (self, dir_path):\n return self.find_dups_for_file(dir_path)\n\n def find_dups_for_directory (self, dirpath):\n dupset ={}\n for filename in self.list_dir(dirpath):\n path = os.path.join(dirpath, filename)\n dups = self.find_dups (path)\n if dups:\n dupset[path] = dups\n return dupset\n\n def get_dup_display_path (self, dup_path):\n default_base_dup_display = os.path.join(self.base_dedup_path, 'CIC-ExternalDisk1/')\n if dup_path.startswith (default_base_dup_display):\n return dup_path.replace(default_base_dup_display, '')\n else:\n return dup_path.replace (self.base_dedup_path, '')\n\n def report_dir (self, dir_path):\n \"\"\"\n print a list of duplicates, the one which exists on disk is marked with an asterisk\n :param dir_path: The path to the directory to be reported\n :return:\n \"\"\"\n print len(os.listdir(dir_path)), 'in archive directory'\n dupset = self.find_dups_for_directory (dir_path)\n keys = dupset.keys()\n keys.sort()\n print '- ', len(keys), 'dups found'\n for key in keys:\n # print '\\n', key.replace(archives_base_path, '')\n dedup_key_path = self.make_deduped_path(key)\n # print '\\n', '{}{}'.format(dedup_key_path, os.path.exists(dedup_key_path) and ' *' or '')\n print '\\n', '{}{}'.format(self.get_dup_display_path(dedup_key_path), os.path.exists(dedup_key_path) and ' *' or '')\n dups = dupset[key]\n for dup in dups:\n dedup_path = self.make_deduped_path(dup)\n # print ' - {}{}'.format(dedup_path, os.path.exists(dedup_path) and ' *' or '')\n print ' - {}{}'.format(self.get_dup_display_path(dedup_path), os.path.exists(dedup_path) and ' *' or '')\n\n def list_dir (self, frag):\n if frag[0] == '/':\n path = frag\n else:\n # path = os.path.join(base_path, frag)\n path = os.path.join(self.base_dedup_path, frag)\n\n print 'PATH: ', path\n return os.listdir (path)\n\nif __name__ == '__main__':\n\n\n # base_path = '/Volumes/archives/CommunicationsImageCollection/Staging'\n # filepath = os.path.join (archive_base_path, rel_path)\n\n if 0: # search under CIC-ExternalDisk1\n archive_base_path = '/Volumes/archives/CommunicationsImageCollection/CIC-ExternalDisk1'\n deduped_base_path = None # default\n rel_path = 'disc 182/Emily CoBabe Ammann'\n\n if 0: # search under field projects\n archive_base_path = '/Volumes/archives/CommunicationsImageCollection/Staging'\n deduped_base_path = '/Volumes/cic-de-duped/Field Projects'\n rel_path = 'Field Project-HIAPER-FP2/HIAPER 8-19-05/8-19-05'\n rel_path = 'Field Project-HIAPER-FP2/HIAPER 8-19-05/8-19-05/tif&jpgs'\n\n if 1: # search under field projects\n archive_base_path = '/Volumes/archives/CommunicationsImageCollection/Staging'\n rel_path = 'SOARS-3/SOARS 11-1/HIRO-mentors'\n rel_path = 'Field Project-ARISTO-FP21/jpgs'\n\n dup_data_path = '/Users/ostwald/Documents/Comms/Composite_DB/master_check_sum_dups.json'\n print dup_data_path\n # finder = DupFinder (dup_data_path, archive_base_path, deduped_base_path)\n finder = DupFinder (dup_data_path)\n dir_path = os.path.join (archive_base_path, rel_path)\n print 'DIR_PATH:', dir_path\n finder.report_dir(dir_path)\n\n if 0: # test some paths\n path = '/Volumes/cic-de-duped/CIC-ExternalDisk1/disc 19/HIAPER take-off/8-19-05/tif&jpgs/IMG_5820.tif'\n print finder.make_deduped_path (path)\n\n path ='/Volumes/archives/CommunicationsImageCollection/Staging/Field Project-HIAPER-FP2/HIAPER Backups/HIAPER 2/HIAPER take-off/8-19-05/jpgs/IMG_5820.jpg'\n print finder.make_deduped_path(path)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from matplotlib import cm
from datascience.visu.util import plt, save_fig, get_figure
from sklearn.metrics import roc_curve, auc, confusion_matrix
import numpy as np
y = np.array([
[0.8869, 1.],
[1.-0.578, 0.],
[0.7959, 1.],
[0.8618, 1.],
[1.-0.2278, 0.],
[0.6607, 1.],
[0.7006, 1.],
[1.-0.4859, 0.],
[0.6935, 1.],
[0.9048, 1.],
[0.6681, 1.],
[0.7585, 1.],
[1.-0.5063, 0.],
[1.-0.4516, 0.],
[1.-0.5158, 0.],
[1.-0.5873, 0.],
[1.-0.7682, 0.],
[0.8620, 1.],
[1-0.7337, 0.],
[0.9412, 1.],
[1.-0.5819, 0.],
[.2738, 1.],
[1.-.5136, 0.],
[.8819, 1.],
[1.-.4387, 0.],
[1.-.6257, 0.],
[.7857, 1.],
[1.-.3722, 0.],
[1.-0.8049, 0.],
[0.7864, 1.],
[1.-0.2372, 0.],
[0.7934, 1.],
[0.9583, 1.],
[0.9739, 1.],
[1.-0.3556, 0.],
[1.-0.2551, 0.],
[1.-0.4532, 0.],
[0.4605, 1.],
[0.7572, 1.],
[0.9496, 1.],
[0.8268, 1.],
[1.-0.4876, 0.],
[0.8523, 1.],
[1.-0.2629, 0.],
[1.-0.9021, 0.],
[0.6977, 1.],
[0.9142, 1.],
[1.-0.8175, 0.],
[1.-0.4865, 0.],
[0.9110, 1.],
[1.-0.2159, 0.],
[1.-0.6943, 0.],
[1.-0.2753, 0.],
[0.8590, 1.],
[0.8273, 1.],
[1.-0.5169, 0.],
[1.-0.7412, 0.]
])
fpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)
ax = plt('roc_curve').gca()
ax.set_xlim([-0.007, 1.0])
ax.set_ylim([0.0, 1.01])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))
ax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')
ax.plot(fpr, tpr, color='yellow', label='IArt')
ax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label='Perfect model')
ax.legend(loc="lower right")
ax = plt('confusion_matrix').gca()
y_threshold = (y > 0.7).astype(int)
matrix = confusion_matrix(y[:, 1], y_threshold[:, 0])
matrix = matrix / matrix.astype(np.float).sum(axis=1)
im = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))
ax.axis('off')
get_figure('confusion_matrix').colorbar(im)
save_fig()
|
normal
|
{
"blob_id": "5b3514af839c132fda9a2e6e178ae62f780f291e",
"index": 3388,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nax.set_xlim([-0.007, 1.0])\nax.set_ylim([0.0, 1.01])\nax.set_xlabel('False Positive Rate')\nax.set_ylabel('True Positive Rate')\nax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))\nax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')\nax.plot(fpr, tpr, color='yellow', label='IArt')\nax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label=\n 'Perfect model')\nax.legend(loc='lower right')\n<mask token>\nax.axis('off')\nget_figure('confusion_matrix').colorbar(im)\nsave_fig()\n",
"step-3": "<mask token>\ny = np.array([[0.8869, 1.0], [1.0 - 0.578, 0.0], [0.7959, 1.0], [0.8618, \n 1.0], [1.0 - 0.2278, 0.0], [0.6607, 1.0], [0.7006, 1.0], [1.0 - 0.4859,\n 0.0], [0.6935, 1.0], [0.9048, 1.0], [0.6681, 1.0], [0.7585, 1.0], [1.0 -\n 0.5063, 0.0], [1.0 - 0.4516, 0.0], [1.0 - 0.5158, 0.0], [1.0 - 0.5873, \n 0.0], [1.0 - 0.7682, 0.0], [0.862, 1.0], [1 - 0.7337, 0.0], [0.9412, \n 1.0], [1.0 - 0.5819, 0.0], [0.2738, 1.0], [1.0 - 0.5136, 0.0], [0.8819,\n 1.0], [1.0 - 0.4387, 0.0], [1.0 - 0.6257, 0.0], [0.7857, 1.0], [1.0 - \n 0.3722, 0.0], [1.0 - 0.8049, 0.0], [0.7864, 1.0], [1.0 - 0.2372, 0.0],\n [0.7934, 1.0], [0.9583, 1.0], [0.9739, 1.0], [1.0 - 0.3556, 0.0], [1.0 -\n 0.2551, 0.0], [1.0 - 0.4532, 0.0], [0.4605, 1.0], [0.7572, 1.0], [\n 0.9496, 1.0], [0.8268, 1.0], [1.0 - 0.4876, 0.0], [0.8523, 1.0], [1.0 -\n 0.2629, 0.0], [1.0 - 0.9021, 0.0], [0.6977, 1.0], [0.9142, 1.0], [1.0 -\n 0.8175, 0.0], [1.0 - 0.4865, 0.0], [0.911, 1.0], [1.0 - 0.2159, 0.0], [\n 1.0 - 0.6943, 0.0], [1.0 - 0.2753, 0.0], [0.859, 1.0], [0.8273, 1.0], [\n 1.0 - 0.5169, 0.0], [1.0 - 0.7412, 0.0]])\nfpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)\nax = plt('roc_curve').gca()\nax.set_xlim([-0.007, 1.0])\nax.set_ylim([0.0, 1.01])\nax.set_xlabel('False Positive Rate')\nax.set_ylabel('True Positive Rate')\nax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))\nax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')\nax.plot(fpr, tpr, color='yellow', label='IArt')\nax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label=\n 'Perfect model')\nax.legend(loc='lower right')\nax = plt('confusion_matrix').gca()\ny_threshold = (y > 0.7).astype(int)\nmatrix = confusion_matrix(y[:, 1], y_threshold[:, 0])\nmatrix = matrix / matrix.astype(np.float).sum(axis=1)\nim = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))\nax.axis('off')\nget_figure('confusion_matrix').colorbar(im)\nsave_fig()\n",
"step-4": "from matplotlib import cm\nfrom datascience.visu.util import plt, save_fig, get_figure\nfrom sklearn.metrics import roc_curve, auc, confusion_matrix\nimport numpy as np\ny = np.array([[0.8869, 1.0], [1.0 - 0.578, 0.0], [0.7959, 1.0], [0.8618, \n 1.0], [1.0 - 0.2278, 0.0], [0.6607, 1.0], [0.7006, 1.0], [1.0 - 0.4859,\n 0.0], [0.6935, 1.0], [0.9048, 1.0], [0.6681, 1.0], [0.7585, 1.0], [1.0 -\n 0.5063, 0.0], [1.0 - 0.4516, 0.0], [1.0 - 0.5158, 0.0], [1.0 - 0.5873, \n 0.0], [1.0 - 0.7682, 0.0], [0.862, 1.0], [1 - 0.7337, 0.0], [0.9412, \n 1.0], [1.0 - 0.5819, 0.0], [0.2738, 1.0], [1.0 - 0.5136, 0.0], [0.8819,\n 1.0], [1.0 - 0.4387, 0.0], [1.0 - 0.6257, 0.0], [0.7857, 1.0], [1.0 - \n 0.3722, 0.0], [1.0 - 0.8049, 0.0], [0.7864, 1.0], [1.0 - 0.2372, 0.0],\n [0.7934, 1.0], [0.9583, 1.0], [0.9739, 1.0], [1.0 - 0.3556, 0.0], [1.0 -\n 0.2551, 0.0], [1.0 - 0.4532, 0.0], [0.4605, 1.0], [0.7572, 1.0], [\n 0.9496, 1.0], [0.8268, 1.0], [1.0 - 0.4876, 0.0], [0.8523, 1.0], [1.0 -\n 0.2629, 0.0], [1.0 - 0.9021, 0.0], [0.6977, 1.0], [0.9142, 1.0], [1.0 -\n 0.8175, 0.0], [1.0 - 0.4865, 0.0], [0.911, 1.0], [1.0 - 0.2159, 0.0], [\n 1.0 - 0.6943, 0.0], [1.0 - 0.2753, 0.0], [0.859, 1.0], [0.8273, 1.0], [\n 1.0 - 0.5169, 0.0], [1.0 - 0.7412, 0.0]])\nfpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)\nax = plt('roc_curve').gca()\nax.set_xlim([-0.007, 1.0])\nax.set_ylim([0.0, 1.01])\nax.set_xlabel('False Positive Rate')\nax.set_ylabel('True Positive Rate')\nax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))\nax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')\nax.plot(fpr, tpr, color='yellow', label='IArt')\nax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label=\n 'Perfect model')\nax.legend(loc='lower right')\nax = plt('confusion_matrix').gca()\ny_threshold = (y > 0.7).astype(int)\nmatrix = confusion_matrix(y[:, 1], y_threshold[:, 0])\nmatrix = matrix / matrix.astype(np.float).sum(axis=1)\nim = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))\nax.axis('off')\nget_figure('confusion_matrix').colorbar(im)\nsave_fig()\n",
"step-5": "from matplotlib import cm\n\nfrom datascience.visu.util import plt, save_fig, get_figure\n\nfrom sklearn.metrics import roc_curve, auc, confusion_matrix\n\nimport numpy as np\n\ny = np.array([\n [0.8869, 1.],\n [1.-0.578, 0.],\n [0.7959, 1.],\n [0.8618, 1.],\n [1.-0.2278, 0.],\n [0.6607, 1.],\n [0.7006, 1.],\n [1.-0.4859, 0.],\n [0.6935, 1.],\n [0.9048, 1.],\n [0.6681, 1.],\n [0.7585, 1.],\n [1.-0.5063, 0.],\n [1.-0.4516, 0.],\n [1.-0.5158, 0.],\n [1.-0.5873, 0.],\n [1.-0.7682, 0.],\n [0.8620, 1.],\n [1-0.7337, 0.],\n [0.9412, 1.],\n [1.-0.5819, 0.],\n [.2738, 1.],\n [1.-.5136, 0.],\n [.8819, 1.],\n [1.-.4387, 0.],\n [1.-.6257, 0.],\n [.7857, 1.],\n [1.-.3722, 0.],\n [1.-0.8049, 0.],\n [0.7864, 1.],\n [1.-0.2372, 0.],\n [0.7934, 1.],\n [0.9583, 1.],\n [0.9739, 1.],\n [1.-0.3556, 0.],\n [1.-0.2551, 0.],\n [1.-0.4532, 0.],\n [0.4605, 1.],\n [0.7572, 1.],\n [0.9496, 1.],\n [0.8268, 1.],\n [1.-0.4876, 0.],\n [0.8523, 1.],\n [1.-0.2629, 0.],\n [1.-0.9021, 0.],\n [0.6977, 1.],\n [0.9142, 1.],\n [1.-0.8175, 0.],\n [1.-0.4865, 0.],\n [0.9110, 1.],\n [1.-0.2159, 0.],\n [1.-0.6943, 0.],\n [1.-0.2753, 0.],\n [0.8590, 1.],\n [0.8273, 1.],\n [1.-0.5169, 0.],\n [1.-0.7412, 0.]\n])\n\nfpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)\n\nax = plt('roc_curve').gca()\n\nax.set_xlim([-0.007, 1.0])\nax.set_ylim([0.0, 1.01])\nax.set_xlabel('False Positive Rate')\nax.set_ylabel('True Positive Rate')\nax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))\n\nax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')\nax.plot(fpr, tpr, color='yellow', label='IArt')\nax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label='Perfect model')\n\nax.legend(loc=\"lower right\")\n\nax = plt('confusion_matrix').gca()\ny_threshold = (y > 0.7).astype(int)\n\nmatrix = confusion_matrix(y[:, 1], y_threshold[:, 0])\n\nmatrix = matrix / matrix.astype(np.float).sum(axis=1)\n\nim = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))\nax.axis('off')\nget_figure('confusion_matrix').colorbar(im)\n\nsave_fig()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# coding=utf-8
# date 2020-10-22 10:54:38
# author calllivecn <c-all@qq.com>
import sys
import random
import asyncio
import argparse
def httpResponse(msg):
response = [
"HTTP/1.1 200 ok",
"Server: py",
"Content-Type: text/plain",
"Content-Length: " + str(len(msg)),
"\r\n",
]
return "\r\n".join(response).encode("utf8") + msg
async def echo(reader, writer):
#t = random.randint(100, 3000)/1000
#await asyncio.sleep(t)
data = await reader.read(1024)
if not data:
return
writer.write(httpResponse(b"hello world!\n"))
await writer.drain()
async def handle(reader, writer):
try:
await echo(reader, writer)
except ConnectionResetError:
pass
finally:
writer.close()
try:
await writer.wait_closed()
except ConnectionResetError:
pass
def usage_uvloop():
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ModuleNotFoundError:
print("需要安装uvloop(pip install --user uvloop)")
sys.exit(1)
def main():
parse = argparse.ArgumentParser()
parse.add_argument("--addr", action="store", default="*", help="listen 地址 (default: ipv4+ipv6)")
parse.add_argument("--port", action="store", type=int, default=6789, help="port (default: 6789)")
parse.add_argument("--uvloop", action="store_true", help="使用uvloop")
parse.add_argument("--parse", action="store_true", help=argparse.SUPPRESS)
args = parse.parse_args()
if args.parse:
parse.print_usage()
sys.exit(0)
if args.uvloop:
usage_uvloop()
else:
print("可以选使用uvloop加速")
async def server():
# server = await asyncio.start_server(handle, args.addr, args.port, reuse_address=True, reuse_port=True)
server = await asyncio.start_server(handle, args.addr, args.port, reuse_address=True, backlog=4096)
async with server:
await server.serve_forever()
print(f"listen: {args.addr}:{args.port}")
try:
asyncio.run(server())
except KeyboardInterrupt:
print("exit")
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "9320926c9eb8a03d36446f3692f11b242c4fc745",
"index": 8364,
"step-1": "<mask token>\n\n\ndef httpResponse(msg):\n response = ['HTTP/1.1 200 ok', 'Server: py', 'Content-Type: text/plain',\n 'Content-Length: ' + str(len(msg)), '\\r\\n']\n return '\\r\\n'.join(response).encode('utf8') + msg\n\n\n<mask token>\n\n\ndef usage_uvloop():\n try:\n import uvloop\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n except ModuleNotFoundError:\n print('需要安装uvloop(pip install --user uvloop)')\n sys.exit(1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef httpResponse(msg):\n response = ['HTTP/1.1 200 ok', 'Server: py', 'Content-Type: text/plain',\n 'Content-Length: ' + str(len(msg)), '\\r\\n']\n return '\\r\\n'.join(response).encode('utf8') + msg\n\n\n<mask token>\n\n\ndef usage_uvloop():\n try:\n import uvloop\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n except ModuleNotFoundError:\n print('需要安装uvloop(pip install --user uvloop)')\n sys.exit(1)\n\n\ndef main():\n parse = argparse.ArgumentParser()\n parse.add_argument('--addr', action='store', default='*', help=\n 'listen 地址 (default: ipv4+ipv6)')\n parse.add_argument('--port', action='store', type=int, default=6789,\n help='port (default: 6789)')\n parse.add_argument('--uvloop', action='store_true', help='使用uvloop')\n parse.add_argument('--parse', action='store_true', help=argparse.SUPPRESS)\n args = parse.parse_args()\n if args.parse:\n parse.print_usage()\n sys.exit(0)\n if args.uvloop:\n usage_uvloop()\n else:\n print('可以选使用uvloop加速')\n\n async def server():\n server = await asyncio.start_server(handle, args.addr, args.port,\n reuse_address=True, backlog=4096)\n async with server:\n await server.serve_forever()\n print(f'listen: {args.addr}:{args.port}')\n try:\n asyncio.run(server())\n except KeyboardInterrupt:\n print('exit')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef httpResponse(msg):\n response = ['HTTP/1.1 200 ok', 'Server: py', 'Content-Type: text/plain',\n 'Content-Length: ' + str(len(msg)), '\\r\\n']\n return '\\r\\n'.join(response).encode('utf8') + msg\n\n\nasync def echo(reader, writer):\n data = await reader.read(1024)\n if not data:\n return\n writer.write(httpResponse(b'hello world!\\n'))\n await writer.drain()\n\n\nasync def handle(reader, writer):\n try:\n await echo(reader, writer)\n except ConnectionResetError:\n pass\n finally:\n writer.close()\n try:\n await writer.wait_closed()\n except ConnectionResetError:\n pass\n\n\ndef usage_uvloop():\n try:\n import uvloop\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n except ModuleNotFoundError:\n print('需要安装uvloop(pip install --user uvloop)')\n sys.exit(1)\n\n\ndef main():\n parse = argparse.ArgumentParser()\n parse.add_argument('--addr', action='store', default='*', help=\n 'listen 地址 (default: ipv4+ipv6)')\n parse.add_argument('--port', action='store', type=int, default=6789,\n help='port (default: 6789)')\n parse.add_argument('--uvloop', action='store_true', help='使用uvloop')\n parse.add_argument('--parse', action='store_true', help=argparse.SUPPRESS)\n args = parse.parse_args()\n if args.parse:\n parse.print_usage()\n sys.exit(0)\n if args.uvloop:\n usage_uvloop()\n else:\n print('可以选使用uvloop加速')\n\n async def server():\n server = await asyncio.start_server(handle, args.addr, args.port,\n reuse_address=True, backlog=4096)\n async with server:\n await server.serve_forever()\n print(f'listen: {args.addr}:{args.port}')\n try:\n asyncio.run(server())\n except KeyboardInterrupt:\n print('exit')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nimport random\nimport asyncio\nimport argparse\n\n\ndef httpResponse(msg):\n response = ['HTTP/1.1 200 ok', 'Server: py', 'Content-Type: text/plain',\n 'Content-Length: ' + str(len(msg)), '\\r\\n']\n return '\\r\\n'.join(response).encode('utf8') + msg\n\n\nasync def echo(reader, writer):\n data = await reader.read(1024)\n if not data:\n return\n writer.write(httpResponse(b'hello world!\\n'))\n await writer.drain()\n\n\nasync def handle(reader, writer):\n try:\n await echo(reader, writer)\n except ConnectionResetError:\n pass\n finally:\n writer.close()\n try:\n await writer.wait_closed()\n except ConnectionResetError:\n pass\n\n\ndef usage_uvloop():\n try:\n import uvloop\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n except ModuleNotFoundError:\n print('需要安装uvloop(pip install --user uvloop)')\n sys.exit(1)\n\n\ndef main():\n parse = argparse.ArgumentParser()\n parse.add_argument('--addr', action='store', default='*', help=\n 'listen 地址 (default: ipv4+ipv6)')\n parse.add_argument('--port', action='store', type=int, default=6789,\n help='port (default: 6789)')\n parse.add_argument('--uvloop', action='store_true', help='使用uvloop')\n parse.add_argument('--parse', action='store_true', help=argparse.SUPPRESS)\n args = parse.parse_args()\n if args.parse:\n parse.print_usage()\n sys.exit(0)\n if args.uvloop:\n usage_uvloop()\n else:\n print('可以选使用uvloop加速')\n\n async def server():\n server = await asyncio.start_server(handle, args.addr, args.port,\n reuse_address=True, backlog=4096)\n async with server:\n await server.serve_forever()\n print(f'listen: {args.addr}:{args.port}')\n try:\n asyncio.run(server())\n except KeyboardInterrupt:\n print('exit')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n# coding=utf-8\n# date 2020-10-22 10:54:38\n# author calllivecn <c-all@qq.com>\n\n\nimport sys\nimport random\nimport asyncio\nimport argparse\n\n\ndef httpResponse(msg):\n response = [\n \"HTTP/1.1 200 ok\",\n \"Server: py\",\n \"Content-Type: text/plain\",\n \"Content-Length: \" + str(len(msg)),\n \"\\r\\n\",\n ]\n return \"\\r\\n\".join(response).encode(\"utf8\") + msg\n\n\nasync def echo(reader, writer):\n\n #t = random.randint(100, 3000)/1000\n #await asyncio.sleep(t)\n\n data = await reader.read(1024)\n\n if not data:\n return\n\n writer.write(httpResponse(b\"hello world!\\n\"))\n await writer.drain()\n\n\nasync def handle(reader, writer):\n\n try:\n await echo(reader, writer)\n except ConnectionResetError:\n pass\n\n finally:\n writer.close()\n try:\n await writer.wait_closed()\n except ConnectionResetError:\n pass\n \n\ndef usage_uvloop():\n try:\n import uvloop\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n except ModuleNotFoundError:\n print(\"需要安装uvloop(pip install --user uvloop)\")\n sys.exit(1)\n\n\n\ndef main():\n\n parse = argparse.ArgumentParser()\n parse.add_argument(\"--addr\", action=\"store\", default=\"*\", help=\"listen 地址 (default: ipv4+ipv6)\")\n parse.add_argument(\"--port\", action=\"store\", type=int, default=6789, help=\"port (default: 6789)\")\n parse.add_argument(\"--uvloop\", action=\"store_true\", help=\"使用uvloop\")\n\n parse.add_argument(\"--parse\", action=\"store_true\", help=argparse.SUPPRESS)\n\n args = parse.parse_args()\n\n if args.parse:\n parse.print_usage()\n sys.exit(0)\n\n if args.uvloop:\n usage_uvloop()\n else:\n print(\"可以选使用uvloop加速\")\n\n\n async def server():\n # server = await asyncio.start_server(handle, args.addr, args.port, reuse_address=True, reuse_port=True)\n server = await asyncio.start_server(handle, args.addr, args.port, reuse_address=True, backlog=4096)\n\n async with server:\n await server.serve_forever()\n\n print(f\"listen: {args.addr}:{args.port}\")\n\n try:\n asyncio.run(server())\n except KeyboardInterrupt:\n print(\"exit\")\n\n\nif __name__ == \"__main__\":\n main()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class AnnotatorConfig(object):
<|reserved_special_token_0|>
def __init__(self, filename=None):
pass
<|reserved_special_token_0|>
def get(self, key, default=None):
return self.__dict__.get(key, default)
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
def __len__(self):
return len(self.__dict__)
def __getstate__(self):
return self.as_dict()
def __setstate__(self, state):
self.override(state)
def items(self):
return list(self.__dict__.items())
def as_dict(self):
return dict(list(self.items()))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AnnotatorConfig(object):
DEFAULT_PROJECT_NAME = 'default'
def __init__(self, filename=None):
pass
def __getitem__(self, key):
return self.__dict__[key]
def get(self, key, default=None):
return self.__dict__.get(key, default)
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
def __len__(self):
return len(self.__dict__)
def __getstate__(self):
return self.as_dict()
def __setstate__(self, state):
self.override(state)
def items(self):
return list(self.__dict__.items())
def as_dict(self):
return dict(list(self.items()))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InvalidConfigError(ValueError):
<|reserved_special_token_0|>
def __init__(self, message):
super(InvalidConfigError, self).__init__(message)
class AnnotatorConfig(object):
DEFAULT_PROJECT_NAME = 'default'
def __init__(self, filename=None):
pass
def __getitem__(self, key):
return self.__dict__[key]
def get(self, key, default=None):
return self.__dict__.get(key, default)
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
def __len__(self):
return len(self.__dict__)
def __getstate__(self):
return self.as_dict()
def __setstate__(self, state):
self.override(state)
def items(self):
return list(self.__dict__.items())
def as_dict(self):
return dict(list(self.items()))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InvalidConfigError(ValueError):
"""Raised if an invalid configuration is encountered."""
def __init__(self, message):
super(InvalidConfigError, self).__init__(message)
class AnnotatorConfig(object):
DEFAULT_PROJECT_NAME = 'default'
def __init__(self, filename=None):
pass
def __getitem__(self, key):
return self.__dict__[key]
def get(self, key, default=None):
return self.__dict__.get(key, default)
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
def __len__(self):
return len(self.__dict__)
def __getstate__(self):
return self.as_dict()
def __setstate__(self, state):
self.override(state)
def items(self):
return list(self.__dict__.items())
def as_dict(self):
return dict(list(self.items()))
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
# Describes where to search for the config file if no location is specified
DEFAULT_CONFIG_LOCATION = "config.json"
DEFAULT_CONFIG = {
"project": None,
"fixed_model_name": None,
"config": DEFAULT_CONFIG_LOCATION,
"data": None,
"emulate": None,
"language": "en",
"log_file": None,
"log_level": 'INFO',
"mitie_file": os.path.join("data", "total_word_feature_extractor.dat"),
"spacy_model_name": None,
"num_threads": 1,
"max_training_processes": 1,
"path": "projects",
"port": 5000,
"token": None,
"cors_origins": [],
"max_number_of_ngrams": 7,
"pipeline": [],
"response_log": "logs",
"aws_endpoint_url": None,
"duckling_dimensions": None,
"duckling_http_url": None,
"ner_crf": {
"BILOU_flag": True,
"features": [
["low", "title", "upper", "pos", "pos2"],
["bias", "low", "word3", "word2", "upper", "title", "digit", "pos", "pos2", "pattern"],
["low", "title", "upper", "pos", "pos2"]],
"max_iterations": 50,
"L1_c": 1,
"L2_c": 1e-3
},
"intent_classifier_sklearn": {
"C": [1, 2, 5, 10, 20, 100],
"kernel": "linear"
}
}
class InvalidConfigError(ValueError):
"""Raised if an invalid configuration is encountered."""
def __init__(self, message):
# type: (Text) -> None
super(InvalidConfigError, self).__init__(message)
class AnnotatorConfig(object):
DEFAULT_PROJECT_NAME = "default"
def __init__(self, filename=None):
pass
def __getitem__(self, key):
return self.__dict__[key]
def get(self, key, default=None):
return self.__dict__.get(key, default)
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
def __len__(self):
return len(self.__dict__)
def __getstate__(self):
return self.as_dict()
def __setstate__(self, state):
self.override(state)
def items(self):
return list(self.__dict__.items())
def as_dict(self):
return dict(list(self.items()))
|
flexible
|
{
"blob_id": "5c4c893caa19e58491e641420261bb70e7202cf0",
"index": 3566,
"step-1": "<mask token>\n\n\nclass AnnotatorConfig(object):\n <mask token>\n\n def __init__(self, filename=None):\n pass\n <mask token>\n\n def get(self, key, default=None):\n return self.__dict__.get(key, default)\n\n def __setitem__(self, key, value):\n self.__dict__[key] = value\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __contains__(self, key):\n return key in self.__dict__\n\n def __len__(self):\n return len(self.__dict__)\n\n def __getstate__(self):\n return self.as_dict()\n\n def __setstate__(self, state):\n self.override(state)\n\n def items(self):\n return list(self.__dict__.items())\n\n def as_dict(self):\n return dict(list(self.items()))\n",
"step-2": "<mask token>\n\n\nclass AnnotatorConfig(object):\n DEFAULT_PROJECT_NAME = 'default'\n\n def __init__(self, filename=None):\n pass\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def get(self, key, default=None):\n return self.__dict__.get(key, default)\n\n def __setitem__(self, key, value):\n self.__dict__[key] = value\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __contains__(self, key):\n return key in self.__dict__\n\n def __len__(self):\n return len(self.__dict__)\n\n def __getstate__(self):\n return self.as_dict()\n\n def __setstate__(self, state):\n self.override(state)\n\n def items(self):\n return list(self.__dict__.items())\n\n def as_dict(self):\n return dict(list(self.items()))\n",
"step-3": "<mask token>\n\n\nclass InvalidConfigError(ValueError):\n <mask token>\n\n def __init__(self, message):\n super(InvalidConfigError, self).__init__(message)\n\n\nclass AnnotatorConfig(object):\n DEFAULT_PROJECT_NAME = 'default'\n\n def __init__(self, filename=None):\n pass\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def get(self, key, default=None):\n return self.__dict__.get(key, default)\n\n def __setitem__(self, key, value):\n self.__dict__[key] = value\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __contains__(self, key):\n return key in self.__dict__\n\n def __len__(self):\n return len(self.__dict__)\n\n def __getstate__(self):\n return self.as_dict()\n\n def __setstate__(self, state):\n self.override(state)\n\n def items(self):\n return list(self.__dict__.items())\n\n def as_dict(self):\n return dict(list(self.items()))\n",
"step-4": "<mask token>\n\n\nclass InvalidConfigError(ValueError):\n \"\"\"Raised if an invalid configuration is encountered.\"\"\"\n\n def __init__(self, message):\n super(InvalidConfigError, self).__init__(message)\n\n\nclass AnnotatorConfig(object):\n DEFAULT_PROJECT_NAME = 'default'\n\n def __init__(self, filename=None):\n pass\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def get(self, key, default=None):\n return self.__dict__.get(key, default)\n\n def __setitem__(self, key, value):\n self.__dict__[key] = value\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __contains__(self, key):\n return key in self.__dict__\n\n def __len__(self):\n return len(self.__dict__)\n\n def __getstate__(self):\n return self.as_dict()\n\n def __setstate__(self, state):\n self.override(state)\n\n def items(self):\n return list(self.__dict__.items())\n\n def as_dict(self):\n return dict(list(self.items()))\n",
"step-5": "#!/usr/bin/python \n# -*- coding: utf-8 -*-\n\nimport os\n\n# Describes where to search for the config file if no location is specified\n\nDEFAULT_CONFIG_LOCATION = \"config.json\"\n\nDEFAULT_CONFIG = {\n \"project\": None,\n \"fixed_model_name\": None,\n \"config\": DEFAULT_CONFIG_LOCATION,\n \"data\": None,\n \"emulate\": None,\n \"language\": \"en\",\n \"log_file\": None,\n \"log_level\": 'INFO',\n \"mitie_file\": os.path.join(\"data\", \"total_word_feature_extractor.dat\"),\n \"spacy_model_name\": None,\n \"num_threads\": 1,\n \"max_training_processes\": 1,\n \"path\": \"projects\",\n \"port\": 5000,\n \"token\": None,\n \"cors_origins\": [],\n \"max_number_of_ngrams\": 7,\n \"pipeline\": [],\n \"response_log\": \"logs\",\n \"aws_endpoint_url\": None,\n \"duckling_dimensions\": None,\n \"duckling_http_url\": None,\n \"ner_crf\": {\n \"BILOU_flag\": True,\n \"features\": [\n [\"low\", \"title\", \"upper\", \"pos\", \"pos2\"],\n [\"bias\", \"low\", \"word3\", \"word2\", \"upper\", \"title\", \"digit\", \"pos\", \"pos2\", \"pattern\"],\n [\"low\", \"title\", \"upper\", \"pos\", \"pos2\"]],\n \"max_iterations\": 50,\n \"L1_c\": 1,\n \"L2_c\": 1e-3\n },\n \"intent_classifier_sklearn\": {\n \"C\": [1, 2, 5, 10, 20, 100],\n \"kernel\": \"linear\"\n }\n}\n\n\nclass InvalidConfigError(ValueError):\n \"\"\"Raised if an invalid configuration is encountered.\"\"\"\n\n def __init__(self, message):\n # type: (Text) -> None\n super(InvalidConfigError, self).__init__(message)\n\n\nclass AnnotatorConfig(object):\n DEFAULT_PROJECT_NAME = \"default\"\n\n def __init__(self, filename=None):\n pass\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def get(self, key, default=None):\n return self.__dict__.get(key, default)\n\n def __setitem__(self, key, value):\n self.__dict__[key] = value\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __contains__(self, key):\n return key in self.__dict__\n\n def __len__(self):\n return len(self.__dict__)\n\n def __getstate__(self):\n return self.as_dict()\n\n def __setstate__(self, state):\n self.override(state)\n\n def items(self):\n return list(self.__dict__.items())\n\n def as_dict(self):\n return dict(list(self.items()))\n",
"step-ids": [
11,
13,
15,
16,
19
]
}
|
[
11,
13,
15,
16,
19
] |
__author__ = 'AChen'
from rec_linked_list import *
def filter_pos_rec(lst):
"""
@type lst: LinkedListRec
>>> lst = LinkedListRec([3, -10, 4, 0])
>>> pos = filter_pos_rec(lst)
>>> str(pos)
'3 -> 4'
"""
if lst.is_empty():
return lst
else:
pos_rec = LinkedListRec([])
if lst._first > 0:
pos_rec._first = lst._first
pos_rec._rest = filter_pos_rec(lst._rest)
else:
pos_rec = filter_pos_rec(lst._rest)
return pos_rec
|
normal
|
{
"blob_id": "efcbe296ea72a94be967124a8ba8c84a524e2eb1",
"index": 66,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef filter_pos_rec(lst):\n \"\"\"\n @type lst: LinkedListRec\n >>> lst = LinkedListRec([3, -10, 4, 0])\n >>> pos = filter_pos_rec(lst)\n >>> str(pos)\n '3 -> 4'\n\n \"\"\"\n if lst.is_empty():\n return lst\n else:\n pos_rec = LinkedListRec([])\n if lst._first > 0:\n pos_rec._first = lst._first\n pos_rec._rest = filter_pos_rec(lst._rest)\n else:\n pos_rec = filter_pos_rec(lst._rest)\n return pos_rec\n",
"step-3": "__author__ = 'AChen'\n<mask token>\n\n\ndef filter_pos_rec(lst):\n \"\"\"\n @type lst: LinkedListRec\n >>> lst = LinkedListRec([3, -10, 4, 0])\n >>> pos = filter_pos_rec(lst)\n >>> str(pos)\n '3 -> 4'\n\n \"\"\"\n if lst.is_empty():\n return lst\n else:\n pos_rec = LinkedListRec([])\n if lst._first > 0:\n pos_rec._first = lst._first\n pos_rec._rest = filter_pos_rec(lst._rest)\n else:\n pos_rec = filter_pos_rec(lst._rest)\n return pos_rec\n",
"step-4": "__author__ = 'AChen'\nfrom rec_linked_list import *\n\n\ndef filter_pos_rec(lst):\n \"\"\"\n @type lst: LinkedListRec\n >>> lst = LinkedListRec([3, -10, 4, 0])\n >>> pos = filter_pos_rec(lst)\n >>> str(pos)\n '3 -> 4'\n\n \"\"\"\n if lst.is_empty():\n return lst\n else:\n pos_rec = LinkedListRec([])\n if lst._first > 0:\n pos_rec._first = lst._first\n pos_rec._rest = filter_pos_rec(lst._rest)\n else:\n pos_rec = filter_pos_rec(lst._rest)\n return pos_rec\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_content(url):
paste_info = {'site': 'pomf', 'url': url}
m = re.match('^.*/([0-9a-zA-Z]+)\\.([a-zA-Z0-9]+)$', url)
response = requests.get(url)
if response.status_code != 200:
return
paste_info['ext'] = m.group(2)
paste_info['orig_filename'] = m.group(1)
paste_info['content'] = response.content
return paste_info
<|reserved_special_token_1|>
import requests
import re
def get_content(url):
paste_info = {'site': 'pomf', 'url': url}
m = re.match('^.*/([0-9a-zA-Z]+)\\.([a-zA-Z0-9]+)$', url)
response = requests.get(url)
if response.status_code != 200:
return
paste_info['ext'] = m.group(2)
paste_info['orig_filename'] = m.group(1)
paste_info['content'] = response.content
return paste_info
<|reserved_special_token_1|>
#!/usr/bin/env python
import requests
import re
def get_content(url):
paste_info = {
'site': 'pomf',
'url': url
}
m = re.match('^.*/([0-9a-zA-Z]+)\.([a-zA-Z0-9]+)$',url)
response = requests.get(url)
if response.status_code != 200:
return
paste_info['ext'] = m.group(2)
paste_info['orig_filename'] = m.group(1)
paste_info['content'] = response.content
return paste_info
|
flexible
|
{
"blob_id": "78a6202f501bc116e21e98a3e83c9e3f8d6402b4",
"index": 3981,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_content(url):\n paste_info = {'site': 'pomf', 'url': url}\n m = re.match('^.*/([0-9a-zA-Z]+)\\\\.([a-zA-Z0-9]+)$', url)\n response = requests.get(url)\n if response.status_code != 200:\n return\n paste_info['ext'] = m.group(2)\n paste_info['orig_filename'] = m.group(1)\n paste_info['content'] = response.content\n return paste_info\n",
"step-3": "import requests\nimport re\n\n\ndef get_content(url):\n paste_info = {'site': 'pomf', 'url': url}\n m = re.match('^.*/([0-9a-zA-Z]+)\\\\.([a-zA-Z0-9]+)$', url)\n response = requests.get(url)\n if response.status_code != 200:\n return\n paste_info['ext'] = m.group(2)\n paste_info['orig_filename'] = m.group(1)\n paste_info['content'] = response.content\n return paste_info\n",
"step-4": "#!/usr/bin/env python\nimport requests\nimport re\ndef get_content(url):\n paste_info = {\n 'site': 'pomf',\n 'url': url\n }\n m = re.match('^.*/([0-9a-zA-Z]+)\\.([a-zA-Z0-9]+)$',url)\n response = requests.get(url)\n if response.status_code != 200:\n return\n paste_info['ext'] = m.group(2)\n paste_info['orig_filename'] = m.group(1)\n paste_info['content'] = response.content\n return paste_info\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
import calendar
a=int(raw_input("enter the year to check that year is leap year or not\n"))
cal=calendar.isleap(a)
if cal :
print "leap year"
else :
print "not a leap year"
print "\nthanks "
'''
'''
|
normal
|
{
"blob_id": "a077221d91f75645172ba5d86afad8e49cb7ed2f",
"index": 2796,
"step-1": "#!/usr/bin/python\nimport calendar\n\na=int(raw_input(\"enter the year to check that year is leap year or not\\n\")) \ncal=calendar.isleap(a)\n \nif cal :\n\t\t\tprint \"leap year\"\nelse :\n\t\t\tprint \"not a leap year\"\n\nprint \"\\nthanks \"\n\n'''\n\n'''\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''
Copyright (c) 2011 Jacob K. Schoen (jacob.schoen@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import logging
import datetime
import os
import db
myLogger = logging.getLogger('smugScan')
def getAllPictureInfo(configobj, smugmug, lock):
myLogger.info("getAllPictures() parent process:'{0}' process id:'{1}".format(os.getppid(),os.getpid()))
conn = db.getConn(configobj)
#start fresh on this
myLogger.debug("Emptying smugmug tables.")
_emptySmugMugTables(conn, lock)
#now get the albums
myLogger.debug("Getting album info from smugmug.")
albums = _getAlbums(conn, smugmug, lock)
for album in albums["Albums"]:
#and the pictures in each album
myLogger.debug("geting picture info for album '%s'", album["Title"])
_getPictures(album, conn, smugmug, lock)
#get categories
ids = _getUserCategories(conn, smugmug, lock)
_getUserSubCategories(conn, smugmug, lock, ids)
conn.close()
myLogger.info('Finished Scanning SmugMug')
def _getAlbums(conn, smugmug, lock):
albums = smugmug.albums_get(Extras="LastUpdated")
for album in albums["Albums"]:
myLogger.debug(album)
title = album["Title"]
cat = None
catid = None
subCat = None
subCatid = None
try:
cat = album["Category"]["Name"]
catid = album["Category"]["id"]
except KeyError:
cat = None
catid = None
try:
subCat = album["SubCategory"]["Name"]
subCatid = album["SubCategory"]["id"]
except KeyError:
subCat = None
subCatid = None
lock.acquire()
db.addSmugAlbum(conn,cat, catid, subCat, subCatid, title, datetime.datetime.strptime(album["LastUpdated"],'%Y-%m-%d %H:%M:%S'), album["Key"], album["id"])
lock.release()
return albums
def _getPictures(album, conn, smugmug, lock):
pictures = smugmug.images_get(AlbumID=album["id"], AlbumKey=album["Key"], Extras="MD5Sum,LastUpdated,FileName")
albumId = pictures["Album"]["id"]
for picture in pictures["Album"]["Images"]:
lock.acquire()
db.addSmugImage(conn,albumId, datetime.datetime.strptime(picture["LastUpdated"],'%Y-%m-%d %H:%M:%S'), picture["MD5Sum"], picture["Key"], picture["id"], picture["FileName"])
lock.release()
def _getUserCategories(conn, smugmug, lock):
result = smugmug.categories_get()
categories = result["Categories"]
ids = []
for category in categories:
ids.append(category["id"])
lock.acquire()
db.addUserCategory(conn,category["Type"],category["id"],category["NiceName"],category["Name"])
lock.release()
return ids
def _getUserSubCategories(conn, smugmug, lock, ids):
for categoryid in ids:
result = smugmug.subcategories_get(CategoryID=categoryid)
subcategories = result["SubCategories"]
for subcategory in subcategories:
lock.acquire()
db.addUserSubCategory(conn,subcategory["id"],subcategory["NiceName"],subcategory["Name"], categoryid)
lock.release()
def _emptySmugMugTables(conn, lock):
lock.acquire()
db.execute(conn,"DELETE FROM smug_album")
db.execute(conn,"DELETE FROM smug_image")
db.execute(conn,"DELETE FROM user_category")
db.execute(conn,"DELETE FROM user_subcategory")
lock.release()
|
normal
|
{
"blob_id": "e2e3b63deba20cd87fdfca81a9f67fa24891a1e0",
"index": 6416,
"step-1": "<mask token>\n\n\ndef _getAlbums(conn, smugmug, lock):\n albums = smugmug.albums_get(Extras='LastUpdated')\n for album in albums['Albums']:\n myLogger.debug(album)\n title = album['Title']\n cat = None\n catid = None\n subCat = None\n subCatid = None\n try:\n cat = album['Category']['Name']\n catid = album['Category']['id']\n except KeyError:\n cat = None\n catid = None\n try:\n subCat = album['SubCategory']['Name']\n subCatid = album['SubCategory']['id']\n except KeyError:\n subCat = None\n subCatid = None\n lock.acquire()\n db.addSmugAlbum(conn, cat, catid, subCat, subCatid, title, datetime\n .datetime.strptime(album['LastUpdated'], '%Y-%m-%d %H:%M:%S'),\n album['Key'], album['id'])\n lock.release()\n return albums\n\n\ndef _getPictures(album, conn, smugmug, lock):\n pictures = smugmug.images_get(AlbumID=album['id'], AlbumKey=album['Key'\n ], Extras='MD5Sum,LastUpdated,FileName')\n albumId = pictures['Album']['id']\n for picture in pictures['Album']['Images']:\n lock.acquire()\n db.addSmugImage(conn, albumId, datetime.datetime.strptime(picture[\n 'LastUpdated'], '%Y-%m-%d %H:%M:%S'), picture['MD5Sum'],\n picture['Key'], picture['id'], picture['FileName'])\n lock.release()\n\n\ndef _getUserCategories(conn, smugmug, lock):\n result = smugmug.categories_get()\n categories = result['Categories']\n ids = []\n for category in categories:\n ids.append(category['id'])\n lock.acquire()\n db.addUserCategory(conn, category['Type'], category['id'], category\n ['NiceName'], category['Name'])\n lock.release()\n return ids\n\n\ndef _getUserSubCategories(conn, smugmug, lock, ids):\n for categoryid in ids:\n result = smugmug.subcategories_get(CategoryID=categoryid)\n subcategories = result['SubCategories']\n for subcategory in subcategories:\n lock.acquire()\n db.addUserSubCategory(conn, subcategory['id'], subcategory[\n 'NiceName'], subcategory['Name'], categoryid)\n lock.release()\n\n\ndef _emptySmugMugTables(conn, lock):\n lock.acquire()\n db.execute(conn, 'DELETE FROM smug_album')\n db.execute(conn, 'DELETE FROM smug_image')\n db.execute(conn, 'DELETE FROM user_category')\n db.execute(conn, 'DELETE FROM user_subcategory')\n lock.release()\n",
"step-2": "<mask token>\n\n\ndef getAllPictureInfo(configobj, smugmug, lock):\n myLogger.info(\"getAllPictures() parent process:'{0}' process id:'{1}\".\n format(os.getppid(), os.getpid()))\n conn = db.getConn(configobj)\n myLogger.debug('Emptying smugmug tables.')\n _emptySmugMugTables(conn, lock)\n myLogger.debug('Getting album info from smugmug.')\n albums = _getAlbums(conn, smugmug, lock)\n for album in albums['Albums']:\n myLogger.debug(\"geting picture info for album '%s'\", album['Title'])\n _getPictures(album, conn, smugmug, lock)\n ids = _getUserCategories(conn, smugmug, lock)\n _getUserSubCategories(conn, smugmug, lock, ids)\n conn.close()\n myLogger.info('Finished Scanning SmugMug')\n\n\ndef _getAlbums(conn, smugmug, lock):\n albums = smugmug.albums_get(Extras='LastUpdated')\n for album in albums['Albums']:\n myLogger.debug(album)\n title = album['Title']\n cat = None\n catid = None\n subCat = None\n subCatid = None\n try:\n cat = album['Category']['Name']\n catid = album['Category']['id']\n except KeyError:\n cat = None\n catid = None\n try:\n subCat = album['SubCategory']['Name']\n subCatid = album['SubCategory']['id']\n except KeyError:\n subCat = None\n subCatid = None\n lock.acquire()\n db.addSmugAlbum(conn, cat, catid, subCat, subCatid, title, datetime\n .datetime.strptime(album['LastUpdated'], '%Y-%m-%d %H:%M:%S'),\n album['Key'], album['id'])\n lock.release()\n return albums\n\n\ndef _getPictures(album, conn, smugmug, lock):\n pictures = smugmug.images_get(AlbumID=album['id'], AlbumKey=album['Key'\n ], Extras='MD5Sum,LastUpdated,FileName')\n albumId = pictures['Album']['id']\n for picture in pictures['Album']['Images']:\n lock.acquire()\n db.addSmugImage(conn, albumId, datetime.datetime.strptime(picture[\n 'LastUpdated'], '%Y-%m-%d %H:%M:%S'), picture['MD5Sum'],\n picture['Key'], picture['id'], picture['FileName'])\n lock.release()\n\n\ndef _getUserCategories(conn, smugmug, lock):\n result = smugmug.categories_get()\n categories = result['Categories']\n ids = []\n for category in categories:\n ids.append(category['id'])\n lock.acquire()\n db.addUserCategory(conn, category['Type'], category['id'], category\n ['NiceName'], category['Name'])\n lock.release()\n return ids\n\n\ndef _getUserSubCategories(conn, smugmug, lock, ids):\n for categoryid in ids:\n result = smugmug.subcategories_get(CategoryID=categoryid)\n subcategories = result['SubCategories']\n for subcategory in subcategories:\n lock.acquire()\n db.addUserSubCategory(conn, subcategory['id'], subcategory[\n 'NiceName'], subcategory['Name'], categoryid)\n lock.release()\n\n\ndef _emptySmugMugTables(conn, lock):\n lock.acquire()\n db.execute(conn, 'DELETE FROM smug_album')\n db.execute(conn, 'DELETE FROM smug_image')\n db.execute(conn, 'DELETE FROM user_category')\n db.execute(conn, 'DELETE FROM user_subcategory')\n lock.release()\n",
"step-3": "<mask token>\nmyLogger = logging.getLogger('smugScan')\n\n\ndef getAllPictureInfo(configobj, smugmug, lock):\n myLogger.info(\"getAllPictures() parent process:'{0}' process id:'{1}\".\n format(os.getppid(), os.getpid()))\n conn = db.getConn(configobj)\n myLogger.debug('Emptying smugmug tables.')\n _emptySmugMugTables(conn, lock)\n myLogger.debug('Getting album info from smugmug.')\n albums = _getAlbums(conn, smugmug, lock)\n for album in albums['Albums']:\n myLogger.debug(\"geting picture info for album '%s'\", album['Title'])\n _getPictures(album, conn, smugmug, lock)\n ids = _getUserCategories(conn, smugmug, lock)\n _getUserSubCategories(conn, smugmug, lock, ids)\n conn.close()\n myLogger.info('Finished Scanning SmugMug')\n\n\ndef _getAlbums(conn, smugmug, lock):\n albums = smugmug.albums_get(Extras='LastUpdated')\n for album in albums['Albums']:\n myLogger.debug(album)\n title = album['Title']\n cat = None\n catid = None\n subCat = None\n subCatid = None\n try:\n cat = album['Category']['Name']\n catid = album['Category']['id']\n except KeyError:\n cat = None\n catid = None\n try:\n subCat = album['SubCategory']['Name']\n subCatid = album['SubCategory']['id']\n except KeyError:\n subCat = None\n subCatid = None\n lock.acquire()\n db.addSmugAlbum(conn, cat, catid, subCat, subCatid, title, datetime\n .datetime.strptime(album['LastUpdated'], '%Y-%m-%d %H:%M:%S'),\n album['Key'], album['id'])\n lock.release()\n return albums\n\n\ndef _getPictures(album, conn, smugmug, lock):\n pictures = smugmug.images_get(AlbumID=album['id'], AlbumKey=album['Key'\n ], Extras='MD5Sum,LastUpdated,FileName')\n albumId = pictures['Album']['id']\n for picture in pictures['Album']['Images']:\n lock.acquire()\n db.addSmugImage(conn, albumId, datetime.datetime.strptime(picture[\n 'LastUpdated'], '%Y-%m-%d %H:%M:%S'), picture['MD5Sum'],\n picture['Key'], picture['id'], picture['FileName'])\n lock.release()\n\n\ndef _getUserCategories(conn, smugmug, lock):\n result = smugmug.categories_get()\n categories = result['Categories']\n ids = []\n for category in categories:\n ids.append(category['id'])\n lock.acquire()\n db.addUserCategory(conn, category['Type'], category['id'], category\n ['NiceName'], category['Name'])\n lock.release()\n return ids\n\n\ndef _getUserSubCategories(conn, smugmug, lock, ids):\n for categoryid in ids:\n result = smugmug.subcategories_get(CategoryID=categoryid)\n subcategories = result['SubCategories']\n for subcategory in subcategories:\n lock.acquire()\n db.addUserSubCategory(conn, subcategory['id'], subcategory[\n 'NiceName'], subcategory['Name'], categoryid)\n lock.release()\n\n\ndef _emptySmugMugTables(conn, lock):\n lock.acquire()\n db.execute(conn, 'DELETE FROM smug_album')\n db.execute(conn, 'DELETE FROM smug_image')\n db.execute(conn, 'DELETE FROM user_category')\n db.execute(conn, 'DELETE FROM user_subcategory')\n lock.release()\n",
"step-4": "<mask token>\nimport logging\nimport datetime\nimport os\nimport db\nmyLogger = logging.getLogger('smugScan')\n\n\ndef getAllPictureInfo(configobj, smugmug, lock):\n myLogger.info(\"getAllPictures() parent process:'{0}' process id:'{1}\".\n format(os.getppid(), os.getpid()))\n conn = db.getConn(configobj)\n myLogger.debug('Emptying smugmug tables.')\n _emptySmugMugTables(conn, lock)\n myLogger.debug('Getting album info from smugmug.')\n albums = _getAlbums(conn, smugmug, lock)\n for album in albums['Albums']:\n myLogger.debug(\"geting picture info for album '%s'\", album['Title'])\n _getPictures(album, conn, smugmug, lock)\n ids = _getUserCategories(conn, smugmug, lock)\n _getUserSubCategories(conn, smugmug, lock, ids)\n conn.close()\n myLogger.info('Finished Scanning SmugMug')\n\n\ndef _getAlbums(conn, smugmug, lock):\n albums = smugmug.albums_get(Extras='LastUpdated')\n for album in albums['Albums']:\n myLogger.debug(album)\n title = album['Title']\n cat = None\n catid = None\n subCat = None\n subCatid = None\n try:\n cat = album['Category']['Name']\n catid = album['Category']['id']\n except KeyError:\n cat = None\n catid = None\n try:\n subCat = album['SubCategory']['Name']\n subCatid = album['SubCategory']['id']\n except KeyError:\n subCat = None\n subCatid = None\n lock.acquire()\n db.addSmugAlbum(conn, cat, catid, subCat, subCatid, title, datetime\n .datetime.strptime(album['LastUpdated'], '%Y-%m-%d %H:%M:%S'),\n album['Key'], album['id'])\n lock.release()\n return albums\n\n\ndef _getPictures(album, conn, smugmug, lock):\n pictures = smugmug.images_get(AlbumID=album['id'], AlbumKey=album['Key'\n ], Extras='MD5Sum,LastUpdated,FileName')\n albumId = pictures['Album']['id']\n for picture in pictures['Album']['Images']:\n lock.acquire()\n db.addSmugImage(conn, albumId, datetime.datetime.strptime(picture[\n 'LastUpdated'], '%Y-%m-%d %H:%M:%S'), picture['MD5Sum'],\n picture['Key'], picture['id'], picture['FileName'])\n lock.release()\n\n\ndef _getUserCategories(conn, smugmug, lock):\n result = smugmug.categories_get()\n categories = result['Categories']\n ids = []\n for category in categories:\n ids.append(category['id'])\n lock.acquire()\n db.addUserCategory(conn, category['Type'], category['id'], category\n ['NiceName'], category['Name'])\n lock.release()\n return ids\n\n\ndef _getUserSubCategories(conn, smugmug, lock, ids):\n for categoryid in ids:\n result = smugmug.subcategories_get(CategoryID=categoryid)\n subcategories = result['SubCategories']\n for subcategory in subcategories:\n lock.acquire()\n db.addUserSubCategory(conn, subcategory['id'], subcategory[\n 'NiceName'], subcategory['Name'], categoryid)\n lock.release()\n\n\ndef _emptySmugMugTables(conn, lock):\n lock.acquire()\n db.execute(conn, 'DELETE FROM smug_album')\n db.execute(conn, 'DELETE FROM smug_image')\n db.execute(conn, 'DELETE FROM user_category')\n db.execute(conn, 'DELETE FROM user_subcategory')\n lock.release()\n",
"step-5": "'''\nCopyright (c) 2011 Jacob K. Schoen (jacob.schoen@gmail.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in \nthe Software without restriction, including without limitation the rights to \nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies \nof the Software, and to permit persons to whom the Software is furnished to do \nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all \ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE \nSOFTWARE.\n'''\n\nimport logging\nimport datetime\nimport os\n\nimport db\n\nmyLogger = logging.getLogger('smugScan')\n\ndef getAllPictureInfo(configobj, smugmug, lock):\n myLogger.info(\"getAllPictures() parent process:'{0}' process id:'{1}\".format(os.getppid(),os.getpid()))\n conn = db.getConn(configobj)\n #start fresh on this\n myLogger.debug(\"Emptying smugmug tables.\")\n _emptySmugMugTables(conn, lock)\n \n #now get the albums \n myLogger.debug(\"Getting album info from smugmug.\")\n albums = _getAlbums(conn, smugmug, lock)\n for album in albums[\"Albums\"]:\n #and the pictures in each album\n myLogger.debug(\"geting picture info for album '%s'\", album[\"Title\"])\n _getPictures(album, conn, smugmug, lock)\n \n #get categories\n ids = _getUserCategories(conn, smugmug, lock)\n _getUserSubCategories(conn, smugmug, lock, ids)\n conn.close()\n myLogger.info('Finished Scanning SmugMug')\n\ndef _getAlbums(conn, smugmug, lock):\n albums = smugmug.albums_get(Extras=\"LastUpdated\")\n \n for album in albums[\"Albums\"]:\n myLogger.debug(album)\n title = album[\"Title\"]\n \n cat = None\n catid = None\n subCat = None\n subCatid = None\n try:\n cat = album[\"Category\"][\"Name\"]\n catid = album[\"Category\"][\"id\"]\n except KeyError:\n cat = None\n catid = None\n try:\n subCat = album[\"SubCategory\"][\"Name\"]\n subCatid = album[\"SubCategory\"][\"id\"]\n except KeyError:\n subCat = None\n subCatid = None\n lock.acquire()\n db.addSmugAlbum(conn,cat, catid, subCat, subCatid, title, datetime.datetime.strptime(album[\"LastUpdated\"],'%Y-%m-%d %H:%M:%S'), album[\"Key\"], album[\"id\"])\n lock.release() \n return albums\n\ndef _getPictures(album, conn, smugmug, lock):\n pictures = smugmug.images_get(AlbumID=album[\"id\"], AlbumKey=album[\"Key\"], Extras=\"MD5Sum,LastUpdated,FileName\")\n albumId = pictures[\"Album\"][\"id\"]\n for picture in pictures[\"Album\"][\"Images\"]:\n lock.acquire()\n db.addSmugImage(conn,albumId, datetime.datetime.strptime(picture[\"LastUpdated\"],'%Y-%m-%d %H:%M:%S'), picture[\"MD5Sum\"], picture[\"Key\"], picture[\"id\"], picture[\"FileName\"])\n lock.release() \n\ndef _getUserCategories(conn, smugmug, lock):\n result = smugmug.categories_get()\n categories = result[\"Categories\"]\n ids = []\n for category in categories:\n ids.append(category[\"id\"])\n lock.acquire()\n db.addUserCategory(conn,category[\"Type\"],category[\"id\"],category[\"NiceName\"],category[\"Name\"])\n lock.release() \n return ids \n\ndef _getUserSubCategories(conn, smugmug, lock, ids):\n for categoryid in ids:\n result = smugmug.subcategories_get(CategoryID=categoryid)\n subcategories = result[\"SubCategories\"]\n for subcategory in subcategories:\n lock.acquire()\n db.addUserSubCategory(conn,subcategory[\"id\"],subcategory[\"NiceName\"],subcategory[\"Name\"], categoryid)\n lock.release() \n\ndef _emptySmugMugTables(conn, lock):\n lock.acquire()\n db.execute(conn,\"DELETE FROM smug_album\")\n db.execute(conn,\"DELETE FROM smug_image\")\n db.execute(conn,\"DELETE FROM user_category\")\n db.execute(conn,\"DELETE FROM user_subcategory\")\n lock.release()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class DeadlineMiddleware:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DeadlineMiddleware:
def __init__(self, get_response):
self.get_response = get_response
<|reserved_special_token_0|>
def process_view(self, request, view_func, view_args, view_kwargs):
if (view_func.__module__ == 'api.views' and view_func.__name__ ==
'ComingsoonData'):
return None
if (view_func.__module__ == 'django.contrib.admin.sites' or request
.user.is_superuser):
return None
else:
survey = datetime(2019, 9, 16, 23, 50, 0, 0)
teatime = datetime(2019, 9, 17, 18, 30, 0, 0)
if survey - datetime.now() <= timedelta(milliseconds=0):
if teatime - datetime.now() <= timedelta(milliseconds=0):
if (view_func.__module__ == 'enter.views' and view_func
.__name__ == 'attend'):
messages.add_message(request, messages.INFO,
'表單已結束提交', extra_tags='teatimestart')
return HttpResponseRedirect(reverse('index'))
else:
return None
elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':
return None
else:
if (view_func.__module__ == 'enter.views' and view_func
.__name__ == 'attend'):
messages.add_message(request, messages.INFO,
'表單已結束提交', extra_tags='teatimeform')
else:
messages.add_message(request, messages.INFO,
'茶會尚未開始', extra_tags='yetstart')
return HttpResponseRedirect(reverse('comingsoon'))
elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':
return None
elif view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':
return None
elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'index':
return HttpResponseRedirect(reverse('comingsoon'))
else:
messages.add_message(request, messages.INFO, '茶會尚未開始',
extra_tags='yetstart')
return HttpResponseRedirect(reverse('comingsoon'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DeadlineMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
return response
def process_view(self, request, view_func, view_args, view_kwargs):
if (view_func.__module__ == 'api.views' and view_func.__name__ ==
'ComingsoonData'):
return None
if (view_func.__module__ == 'django.contrib.admin.sites' or request
.user.is_superuser):
return None
else:
survey = datetime(2019, 9, 16, 23, 50, 0, 0)
teatime = datetime(2019, 9, 17, 18, 30, 0, 0)
if survey - datetime.now() <= timedelta(milliseconds=0):
if teatime - datetime.now() <= timedelta(milliseconds=0):
if (view_func.__module__ == 'enter.views' and view_func
.__name__ == 'attend'):
messages.add_message(request, messages.INFO,
'表單已結束提交', extra_tags='teatimestart')
return HttpResponseRedirect(reverse('index'))
else:
return None
elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':
return None
else:
if (view_func.__module__ == 'enter.views' and view_func
.__name__ == 'attend'):
messages.add_message(request, messages.INFO,
'表單已結束提交', extra_tags='teatimeform')
else:
messages.add_message(request, messages.INFO,
'茶會尚未開始', extra_tags='yetstart')
return HttpResponseRedirect(reverse('comingsoon'))
elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':
return None
elif view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':
return None
elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'index':
return HttpResponseRedirect(reverse('comingsoon'))
else:
messages.add_message(request, messages.INFO, '茶會尚未開始',
extra_tags='yetstart')
return HttpResponseRedirect(reverse('comingsoon'))
<|reserved_special_token_1|>
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib import messages
from datetime import datetime, timedelta
class DeadlineMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
return response
def process_view(self, request, view_func, view_args, view_kwargs):
if (view_func.__module__ == 'api.views' and view_func.__name__ ==
'ComingsoonData'):
return None
if (view_func.__module__ == 'django.contrib.admin.sites' or request
.user.is_superuser):
return None
else:
survey = datetime(2019, 9, 16, 23, 50, 0, 0)
teatime = datetime(2019, 9, 17, 18, 30, 0, 0)
if survey - datetime.now() <= timedelta(milliseconds=0):
if teatime - datetime.now() <= timedelta(milliseconds=0):
if (view_func.__module__ == 'enter.views' and view_func
.__name__ == 'attend'):
messages.add_message(request, messages.INFO,
'表單已結束提交', extra_tags='teatimestart')
return HttpResponseRedirect(reverse('index'))
else:
return None
elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':
return None
else:
if (view_func.__module__ == 'enter.views' and view_func
.__name__ == 'attend'):
messages.add_message(request, messages.INFO,
'表單已結束提交', extra_tags='teatimeform')
else:
messages.add_message(request, messages.INFO,
'茶會尚未開始', extra_tags='yetstart')
return HttpResponseRedirect(reverse('comingsoon'))
elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':
return None
elif view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':
return None
elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'index':
return HttpResponseRedirect(reverse('comingsoon'))
else:
messages.add_message(request, messages.INFO, '茶會尚未開始',
extra_tags='yetstart')
return HttpResponseRedirect(reverse('comingsoon'))
<|reserved_special_token_1|>
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib import messages
from datetime import datetime, timedelta
class DeadlineMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
return response
def process_view(self, request, view_func, view_args, view_kwargs):
if view_func.__module__ == 'api.views' and view_func.__name__ == 'ComingsoonData':
return None
if view_func.__module__ == 'django.contrib.admin.sites' or request.user.is_superuser:
return None
else:
survey = datetime(2019, 9, 16, 23, 50, 0, 0) #茶會調查結束時間
teatime = datetime(2019, 9, 17, 18, 30, 0, 0) #茶會開始時間
if survey - datetime.now() <= timedelta(milliseconds=0): #調查結束
if teatime - datetime.now() <= timedelta(milliseconds=0): #調查結束+茶會開始
if view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':
messages.add_message(request, messages.INFO, '表單已結束提交', extra_tags='teatimestart')
return HttpResponseRedirect(reverse('index'))
else:
return None
else: #調查結束+茶會未開始
if view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':
return None
else: #從調查表單轉址->表單已結束提交,其他->茶會尚未開始
if view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':
messages.add_message(request, messages.INFO, '表單已結束提交', extra_tags='teatimeform')
else:
messages.add_message(request, messages.INFO, '茶會尚未開始', extra_tags='yetstart')
return HttpResponseRedirect(reverse('comingsoon'))
else: #調查未結束
if view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':
return None
elif view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':
return None
else:
if view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'index':
return HttpResponseRedirect(reverse('comingsoon'))
else:
messages.add_message(request, messages.INFO, '茶會尚未開始', extra_tags='yetstart')
return HttpResponseRedirect(reverse('comingsoon'))
|
flexible
|
{
"blob_id": "0d3e1df1720812e8546b1f3509c83d1e6566e103",
"index": 4639,
"step-1": "<mask token>\n\n\nclass DeadlineMiddleware:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DeadlineMiddleware:\n\n def __init__(self, get_response):\n self.get_response = get_response\n <mask token>\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n if (view_func.__module__ == 'api.views' and view_func.__name__ ==\n 'ComingsoonData'):\n return None\n if (view_func.__module__ == 'django.contrib.admin.sites' or request\n .user.is_superuser):\n return None\n else:\n survey = datetime(2019, 9, 16, 23, 50, 0, 0)\n teatime = datetime(2019, 9, 17, 18, 30, 0, 0)\n if survey - datetime.now() <= timedelta(milliseconds=0):\n if teatime - datetime.now() <= timedelta(milliseconds=0):\n if (view_func.__module__ == 'enter.views' and view_func\n .__name__ == 'attend'):\n messages.add_message(request, messages.INFO,\n '表單已結束提交', extra_tags='teatimestart')\n return HttpResponseRedirect(reverse('index'))\n else:\n return None\n elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':\n return None\n else:\n if (view_func.__module__ == 'enter.views' and view_func\n .__name__ == 'attend'):\n messages.add_message(request, messages.INFO,\n '表單已結束提交', extra_tags='teatimeform')\n else:\n messages.add_message(request, messages.INFO,\n '茶會尚未開始', extra_tags='yetstart')\n return HttpResponseRedirect(reverse('comingsoon'))\n elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':\n return None\n elif view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':\n return None\n elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'index':\n return HttpResponseRedirect(reverse('comingsoon'))\n else:\n messages.add_message(request, messages.INFO, '茶會尚未開始',\n extra_tags='yetstart')\n return HttpResponseRedirect(reverse('comingsoon'))\n",
"step-3": "<mask token>\n\n\nclass DeadlineMiddleware:\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n response = self.get_response(request)\n return response\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n if (view_func.__module__ == 'api.views' and view_func.__name__ ==\n 'ComingsoonData'):\n return None\n if (view_func.__module__ == 'django.contrib.admin.sites' or request\n .user.is_superuser):\n return None\n else:\n survey = datetime(2019, 9, 16, 23, 50, 0, 0)\n teatime = datetime(2019, 9, 17, 18, 30, 0, 0)\n if survey - datetime.now() <= timedelta(milliseconds=0):\n if teatime - datetime.now() <= timedelta(milliseconds=0):\n if (view_func.__module__ == 'enter.views' and view_func\n .__name__ == 'attend'):\n messages.add_message(request, messages.INFO,\n '表單已結束提交', extra_tags='teatimestart')\n return HttpResponseRedirect(reverse('index'))\n else:\n return None\n elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':\n return None\n else:\n if (view_func.__module__ == 'enter.views' and view_func\n .__name__ == 'attend'):\n messages.add_message(request, messages.INFO,\n '表單已結束提交', extra_tags='teatimeform')\n else:\n messages.add_message(request, messages.INFO,\n '茶會尚未開始', extra_tags='yetstart')\n return HttpResponseRedirect(reverse('comingsoon'))\n elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':\n return None\n elif view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':\n return None\n elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'index':\n return HttpResponseRedirect(reverse('comingsoon'))\n else:\n messages.add_message(request, messages.INFO, '茶會尚未開始',\n extra_tags='yetstart')\n return HttpResponseRedirect(reverse('comingsoon'))\n",
"step-4": "from django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.contrib import messages\nfrom datetime import datetime, timedelta\n\n\nclass DeadlineMiddleware:\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n response = self.get_response(request)\n return response\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n if (view_func.__module__ == 'api.views' and view_func.__name__ ==\n 'ComingsoonData'):\n return None\n if (view_func.__module__ == 'django.contrib.admin.sites' or request\n .user.is_superuser):\n return None\n else:\n survey = datetime(2019, 9, 16, 23, 50, 0, 0)\n teatime = datetime(2019, 9, 17, 18, 30, 0, 0)\n if survey - datetime.now() <= timedelta(milliseconds=0):\n if teatime - datetime.now() <= timedelta(milliseconds=0):\n if (view_func.__module__ == 'enter.views' and view_func\n .__name__ == 'attend'):\n messages.add_message(request, messages.INFO,\n '表單已結束提交', extra_tags='teatimestart')\n return HttpResponseRedirect(reverse('index'))\n else:\n return None\n elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':\n return None\n else:\n if (view_func.__module__ == 'enter.views' and view_func\n .__name__ == 'attend'):\n messages.add_message(request, messages.INFO,\n '表單已結束提交', extra_tags='teatimeform')\n else:\n messages.add_message(request, messages.INFO,\n '茶會尚未開始', extra_tags='yetstart')\n return HttpResponseRedirect(reverse('comingsoon'))\n elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':\n return None\n elif view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':\n return None\n elif view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'index':\n return HttpResponseRedirect(reverse('comingsoon'))\n else:\n messages.add_message(request, messages.INFO, '茶會尚未開始',\n extra_tags='yetstart')\n return HttpResponseRedirect(reverse('comingsoon'))\n",
"step-5": "\r\nfrom django.http import HttpResponseRedirect\r\nfrom django.urls import reverse\r\nfrom django.contrib import messages\r\nfrom datetime import datetime, timedelta\r\n\r\nclass DeadlineMiddleware:\r\n def __init__(self, get_response):\r\n self.get_response = get_response\r\n \r\n def __call__(self, request):\r\n response = self.get_response(request)\r\n return response\r\n \r\n def process_view(self, request, view_func, view_args, view_kwargs):\r\n if view_func.__module__ == 'api.views' and view_func.__name__ == 'ComingsoonData':\r\n return None\r\n if view_func.__module__ == 'django.contrib.admin.sites' or request.user.is_superuser:\r\n return None\r\n else:\r\n survey = datetime(2019, 9, 16, 23, 50, 0, 0) #茶會調查結束時間\r\n teatime = datetime(2019, 9, 17, 18, 30, 0, 0) #茶會開始時間\r\n if survey - datetime.now() <= timedelta(milliseconds=0): #調查結束\r\n if teatime - datetime.now() <= timedelta(milliseconds=0): #調查結束+茶會開始\r\n if view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':\r\n messages.add_message(request, messages.INFO, '表單已結束提交', extra_tags='teatimestart')\r\n return HttpResponseRedirect(reverse('index'))\r\n else:\r\n return None\r\n else: #調查結束+茶會未開始\r\n if view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':\r\n return None\r\n else: #從調查表單轉址->表單已結束提交,其他->茶會尚未開始\r\n if view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':\r\n messages.add_message(request, messages.INFO, '表單已結束提交', extra_tags='teatimeform')\r\n else:\r\n messages.add_message(request, messages.INFO, '茶會尚未開始', extra_tags='yetstart')\r\n return HttpResponseRedirect(reverse('comingsoon'))\r\n else: #調查未結束\r\n if view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':\r\n return None\r\n elif view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':\r\n return None\r\n else:\r\n if view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'index':\r\n return HttpResponseRedirect(reverse('comingsoon'))\r\n else:\r\n messages.add_message(request, messages.INFO, '茶會尚未開始', extra_tags='yetstart')\r\n return HttpResponseRedirect(reverse('comingsoon'))\r\n\r\n\r\n\r\n\r\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.